hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be321954e8763a4c51039e9d44e9a937f9446fbf
| 6,448
|
py
|
Python
|
manual/Alfred.alfredpreferences/workflows/user.workflow.4A7BFB75-AB54-42FE-B325-22358306BE4A/info.py
|
oscu0/orpheus
|
966a2dce37cef7d447e063ac5c2cd91a4388d2e0
|
[
"Unlicense"
] | 1
|
2019-02-22T16:06:03.000Z
|
2019-02-22T16:06:03.000Z
|
manual/Alfred.alfredpreferences/workflows/user.workflow.4A7BFB75-AB54-42FE-B325-22358306BE4A/info.py
|
oscu0/orpheus
|
966a2dce37cef7d447e063ac5c2cd91a4388d2e0
|
[
"Unlicense"
] | null | null | null |
manual/Alfred.alfredpreferences/workflows/user.workflow.4A7BFB75-AB54-42FE-B325-22358306BE4A/info.py
|
oscu0/orpheus
|
966a2dce37cef7d447e063ac5c2cd91a4388d2e0
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
# encoding: utf-8
#
# Copyright (c) 2014 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-12-26
#
"""info.py [options] [<query>]
View/manage workflow settings.
Usage:
info.py [<query>]
info.py (-h|--help)
info.py --openhelp
info.py --openactive
info.py --openunits
info.py --openapi
info.py --currencies [<query>]
Options:
-h, --help Show this message
--openhelp Open help file in default browser
--openactive Open active currency file in default editor
--openunits Open custom units file in default editor
--openapi Open the openexchangerates.org signup page
--currencies View/search supported currencies
"""
from __future__ import absolute_import
from datetime import timedelta
import subprocess
import sys
from docopt import docopt
from workflow import (
ICON_INFO,
ICON_WARNING,
ICON_WEB,
MATCH_ALL,
MATCH_ALLCHARS,
Workflow3,
)
from config import (
bootstrap,
ACTIVE_CURRENCIES_FILENAME,
CURRENCIES,
CRYPTO_CURRENCIES,
CURRENCY_CACHE_NAME,
CUSTOM_DEFINITIONS_FILENAME,
ICON_CURRENCY,
ICON_HELP,
KEYWORD_SETTINGS,
README_URL,
)
# Signup page for free API key
SIGNUP_URL = 'https://openexchangerates.org/signup/free'
log = None
DELIMITER = u'\u203a' # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
ALFRED_AS = 'tell application "Alfred 3" to search "{}"'.format(
KEYWORD_SETTINGS)
def human_timedelta(td):
"""Return relative time (past) in human-readable format.
Example: "10 minutes ago"
Args:
td (datetime.timedelta): Time delta to convert.
Returns:
unicode: Human-readable time delta.
"""
output = []
d = {'day': td.days}
d['hour'], rem = divmod(td.seconds, 3600)
d['minute'], d['second'] = divmod(rem, 60)
for unit in ('day', 'hour', 'minute', 'second'):
i = d[unit]
if unit == 'second' and len(output):
# no seconds unless last update was < 1m ago
break
if i == 1:
output.append('1 %s' % unit)
elif i > 1:
output.append('%d %ss' % (i, unit))
output.append('ago')
return ' '.join(output)
def handle_delimited_query(query):
"""Process sub-commands.
Args:
query (str): User query
"""
# Currencies or decimal places
if query.endswith(DELIMITER): # User deleted trailing space
subprocess.call(['osascript', '-e', ALFRED_AS])
return
mode, query = [s.strip() for s in query.split(DELIMITER)]
if mode == 'currencies':
currencies = sorted([(name, symbol) for (symbol, name)
in CURRENCIES.items()] +
[(name, symbol) for (symbol, name)
in CRYPTO_CURRENCIES.items()])
if query:
currencies = wf.filter(query, currencies,
key=lambda t: ' '.join(t),
match_on=MATCH_ALL ^ MATCH_ALLCHARS,
min_score=30)
else: # Show last update time
age = wf.cached_data_age(CURRENCY_CACHE_NAME)
if age > 0: # Exchange rates in cache
td = timedelta(seconds=age)
wf.add_item('Exchange rates updated {}'.format(
human_timedelta(td)),
icon=ICON_INFO)
if not currencies:
wf.add_item('No matching currencies',
'Try a different query',
icon=ICON_WARNING)
for name, symbol in currencies:
wf.add_item(u'{} // {}'.format(name, symbol),
u'Use `{}` in conversions'.format(symbol),
copytext=symbol,
valid=False,
icon=ICON_CURRENCY)
wf.send_feedback()
def main(wf):
"""Run Script Filter.
Args:
wf (workflow.Workflow): Workflow object.
"""
args = docopt(__doc__, wf.args)
log.debug('args : {!r}'.format(args))
query = args.get('<query>')
bootstrap(wf)
# Alternative actions ----------------------------------------------
if args.get('--openapi'):
subprocess.call(['open', SIGNUP_URL])
return
if args.get('--openhelp'):
subprocess.call(['open', README_URL])
return
if args.get('--openunits'):
path = wf.datafile(CUSTOM_DEFINITIONS_FILENAME)
subprocess.call(['open', path])
return
if args.get('--openactive'):
path = wf.datafile(ACTIVE_CURRENCIES_FILENAME)
subprocess.call(['open', path])
return
# Parse query ------------------------------------------------------
if DELIMITER in query:
return handle_delimited_query(query)
# Filter options ---------------------------------------------------
query = query.strip()
options = [
dict(title='View Help File',
subtitle='Open help file in your browser',
valid=True,
arg='--openhelp',
icon=ICON_HELP),
dict(title='View All Supported Currencies',
subtitle='View and search list of supported currencies',
autocomplete=u'currencies {} '.format(DELIMITER),
icon=ICON_CURRENCY),
dict(title='Edit Active Currencies',
subtitle='Edit the list of active currencies',
valid=True,
arg='--openactive',
icon='icon.png'),
dict(title='Edit Custom Units',
subtitle='Add and edit your own custom units',
valid=True,
arg='--openunits',
icon='icon.png'),
dict(title='Get API key',
subtitle='Sign up for free openexchangerates.org account',
valid=True,
arg='--openapi',
icon=ICON_WEB),
]
if query:
options = wf.filter(query, options, key=lambda d: d['title'],
min_score=30)
if not options:
wf.add_item('No matching options', 'Try a different query?',
icon=ICON_WARNING)
for d in options:
wf.add_item(**d)
wf.send_feedback()
return
if __name__ == '__main__':
wf = Workflow3()
log = wf.logger
sys.exit(wf.run(main))
| 25.486166
| 72
| 0.550093
|
71f2858c2a22a32559492a82e92531f79e910e69
| 87
|
py
|
Python
|
py_tdlib/constructors/get_option.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/get_option.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/get_option.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
class getOption(Method):
name = None # type: "string"
| 14.5
| 30
| 0.701149
|
7627936785cfaabab9d56a811bca204641d76f5c
| 8,247
|
py
|
Python
|
themes/minimal/base16-gigavolt.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
themes/minimal/base16-gigavolt.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
themes/minimal/base16-gigavolt.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
# base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova and Daniel Mulford
# Gigavolt scheme by Aidan Swope (http://github.com/Whillikers)
base00 = "#202126"
base01 = "#2d303d"
base02 = "#706d60"
base03 = "#a1d2e6"
base04 = "#cad3ff"
base05 = "#e9e7e1"
base06 = "#eff0f9"
base07 = "#f2fbff"
base08 = "#ff661a"
base09 = "#19f988"
base0A = "#ffdc2d"
base0B = "#f2e6a9"
base0C = "#fb6acb"
base0D = "#40bfff"
base0E = "#ae94f9"
base0F = "#6187ff"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base00
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0D
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base00
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0D
# Top border color of the completion widget category headers.
c.colors.completion.item.selected.border.top = base0D
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0D
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base00
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base09
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base05
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base0C
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base00
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base0A
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base00
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base0E
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base00
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base04
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base01
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base0E
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base01
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base0D
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base00
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base0D
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base00
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base09
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0B
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base00
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0B
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base00
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base00
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base0D
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base00
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base0D
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base00
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base0D
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base0D
# Background color for webpages if unset (or empty to use the theme's
# color).
c.colors.webpage.bg = base00
| 29.453571
| 71
| 0.771796
|
0fa1f7efb2de97801e7d0fd423d190cadf795d98
| 200
|
py
|
Python
|
cellsimulation/triangle.py
|
marcbperez/cell-simulation-python
|
b2f6e203a0fd46a8dcfc0b7f1fa0f5b514145489
|
[
"Apache-2.0"
] | 1
|
2019-04-15T09:33:29.000Z
|
2019-04-15T09:33:29.000Z
|
cellsimulation/triangle.py
|
mbpez/cell-simulation-python
|
b2f6e203a0fd46a8dcfc0b7f1fa0f5b514145489
|
[
"Apache-2.0"
] | null | null | null |
cellsimulation/triangle.py
|
mbpez/cell-simulation-python
|
b2f6e203a0fd46a8dcfc0b7f1fa0f5b514145489
|
[
"Apache-2.0"
] | null | null | null |
from mesh import Mesh
from OpenGL.GL import *
class Triangle(Mesh):
def render(self):
self.render_position()
glBegin(GL_TRIANGLES)
self.render_vertices()
glEnd()
| 18.181818
| 30
| 0.64
|
91fafa7c7fe4dd89070be0f5413dd1740352055e
| 2,055
|
py
|
Python
|
tests/tests_integration/test_api/test_data_sets.py
|
sakshi87/cognite-sdk-python
|
eb3d569fd058dfd8e3c0c29dee2a635deabad1ac
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_integration/test_api/test_data_sets.py
|
sakshi87/cognite-sdk-python
|
eb3d569fd058dfd8e3c0c29dee2a635deabad1ac
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_integration/test_api/test_data_sets.py
|
sakshi87/cognite-sdk-python
|
eb3d569fd058dfd8e3c0c29dee2a635deabad1ac
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
import pytest
from cognite.client import CogniteClient
from cognite.client.data_classes import DataSet, DataSetFilter, DataSetUpdate
from cognite.client.exceptions import CogniteNotFoundError
from tests.utils import set_request_limit
COGNITE_CLIENT = CogniteClient()
DS_API = COGNITE_CLIENT.data_sets
@pytest.fixture(scope="class")
def new_dataset():
dataset = DS_API.create(DataSet())
yield dataset
# todo: uncomment when delete is implemented
# DS_API.delete(id=dataset.id)
# assert DS_API.retrieve(dataset.id) is None
@pytest.fixture
def post_spy():
with mock.patch.object(DS_API, "_post", wraps=DS_API._post) as _:
yield
class TestDataSetsAPI:
def test_retrieve(self, new_dataset):
assert new_dataset.id == DS_API.retrieve(new_dataset.id).id
def test_retrieve_multiple(self):
res_listed_ids = [e.id for e in DS_API.list(limit=2)]
res_lookup_ids = [e.id for e in DS_API.retrieve_multiple(res_listed_ids)]
for listed_id in res_listed_ids:
assert listed_id in res_lookup_ids
def test_retrieve_unknown(self, new_dataset):
invalid_external_id = "this does not exist"
with pytest.raises(CogniteNotFoundError) as error:
DS_API.retrieve_multiple(ids=[new_dataset.id], external_ids=[invalid_external_id])
assert error.value.not_found[0]["externalId"] == invalid_external_id
def test_list(self, post_spy):
with set_request_limit(DS_API, 1):
res = DS_API.list(limit=2)
assert 2 == len(res)
assert 2 == COGNITE_CLIENT.data_sets._post.call_count
def test_aggregate(self):
res = COGNITE_CLIENT.data_sets.aggregate(filter=DataSetFilter(metadata={"1": "1"}))
assert res[0].count > 0
def test_update(self, new_dataset):
update_asset = DataSetUpdate(new_dataset.id).metadata.set({"1": "1"}).name.set("newname")
res = DS_API.update(update_asset)
assert {"1": "1"} == res.metadata
assert "newname" == res.name
| 33.688525
| 97
| 0.705109
|
ee4751d8f716877ee27de29cb2792055480b3eaf
| 101,980
|
py
|
Python
|
wandb/sdk/data_types.py
|
theodumont/client
|
7402ac67ada5bc8078078a49fd3e0cb4b6172307
|
[
"MIT"
] | null | null | null |
wandb/sdk/data_types.py
|
theodumont/client
|
7402ac67ada5bc8078078a49fd3e0cb4b6172307
|
[
"MIT"
] | 1
|
2021-04-27T20:13:45.000Z
|
2021-04-27T20:13:45.000Z
|
wandb/sdk/data_types.py
|
theodumont/client
|
7402ac67ada5bc8078078a49fd3e0cb4b6172307
|
[
"MIT"
] | null | null | null |
import codecs
import hashlib
import json
import logging
import numbers
import os
import re
import shutil
import sys
from typing import (
Any,
cast,
ClassVar,
Dict,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from pkg_resources import parse_version
import six
from six.moves.collections_abc import Sequence as SixSequence
import wandb
from wandb import util
from wandb._globals import _datatypes_callback
from wandb.compat import tempfile
from wandb.util import has_num
from .interface import _dtypes
if TYPE_CHECKING: # pragma: no cover
from .wandb_artifacts import Artifact as LocalArtifact
from .wandb_run import Run as LocalRun
from wandb.apis.public import Artifact as PublicArtifact
import numpy as np # type: ignore
import pandas as pd # type: ignore
import matplotlib # type: ignore
import plotly # type: ignore
import PIL # type: ignore
import torch # type: ignore
from typing import TextIO
TypeMappingType = Dict[str, Type["WBValue"]]
NumpyHistogram = Tuple[np.ndarray, np.ndarray]
ValToJsonType = Union[
dict,
"WBValue",
Sequence["WBValue"],
"plotly.Figure",
"matplotlib.artist.Artist",
"pd.DataFrame",
object,
]
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
_MEDIA_TMP = tempfile.TemporaryDirectory("wandb-media")
_DATA_FRAMES_SUBDIR = os.path.join("media", "data_frames")
def _get_max_cli_version() -> Union[str, None]:
_, server_info = wandb.api.viewer_server_info()
max_cli_version = server_info.get("cliVersionInfo", {}).get("max_cli_version", None)
return str(max_cli_version) if max_cli_version is not None else None
def _is_offline() -> bool:
return (
wandb.run is not None and wandb.run._settings.mode == "offline" # type: ignore
) or str(wandb.setup().settings.mode) == "offline"
def _server_accepts_client_ids() -> bool:
# First, if we are offline, assume the backend server cannot
# accept client IDs. Unfortunately, this is the best we can do
# until we are sure that all local versions are > "0.11.0" max_cli_version.
# The practical implication is that tables logged in offline mode
# will not show up in the workspace (but will still show up in artifacts). This
# means we never lose data, and we can still view using weave. If we decided
# to use client ids in offline mode, then the manifests and artifact data
# would never be resolvable and would lead to failed uploads. Our position
# is to never lose data - and instead take the tradeoff in the UI.
if _is_offline():
return False
# If the script is online, request the max_cli_version and ensure the server
# is of a high enough version.
max_cli_version = _get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.11.0") <= parse_version(max_cli_version)
class _WBValueArtifactSource(object):
artifact: "PublicArtifact"
name: Optional[str]
def __init__(self, artifact: "PublicArtifact", name: Optional[str] = None) -> None:
self.artifact = artifact
self.name = name
class _WBValueArtifactTarget(object):
artifact: "LocalArtifact"
name: Optional[str]
def __init__(self, artifact: "LocalArtifact", name: Optional[str] = None) -> None:
self.artifact = artifact
self.name = name
class WBValue(object):
"""
Abstract parent class for things that can be logged by `wandb.log()` and
visualized by wandb.
The objects will be serialized as JSON and always have a _type attribute
that indicates how to interpret the other fields.
"""
# Class Attributes
_type_mapping: ClassVar[Optional["TypeMappingType"]] = None
# override _log_type to indicate the type which the subclass deserializes
_log_type: ClassVar[Optional[str]] = None
# Instance Attributes
_artifact_source: Optional[_WBValueArtifactSource]
_artifact_target: Optional[_WBValueArtifactTarget]
def __init__(self) -> None:
self._artifact_source = None
self._artifact_target = None
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
"""Serializes the object into a JSON blob, using a run or artifact to store additional data.
Args:
run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which this object should be generating
JSON for - this is useful to to store additional data if needed.
Returns:
dict: JSON representation
"""
raise NotImplementedError
@classmethod
def from_json(
cls: Type["WBValue"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "WBValue":
"""Deserialize a `json_obj` into it's class representation. If additional resources were stored in the
`run_or_artifact` artifact during the `to_json` call, then those resources are expected to be in
the `source_artifact`.
Args:
json_obj (dict): A JSON dictionary to deserialize
source_artifact (wandb.Artifact): An artifact which will hold any additional resources which were stored
during the `to_json` function.
"""
raise NotImplementedError
@classmethod
def with_suffix(cls: Type["WBValue"], name: str, filetype: str = "json") -> str:
"""Helper function to return the name with suffix added if not already
Args:
name (str): the name of the file
filetype (str, optional): the filetype to use. Defaults to "json".
Returns:
str: a filename which is suffixed with it's `_log_type` followed by the filetype
"""
if cls._log_type is not None:
suffix = cls._log_type + "." + filetype
else:
suffix = filetype
if not name.endswith(suffix):
return name + "." + suffix
return name
@staticmethod
def init_from_json(
json_obj: dict, source_artifact: "PublicArtifact"
) -> "Optional[WBValue]":
"""Looks through all subclasses and tries to match the json obj with the class which created it. It will then
call that subclass' `from_json` method. Importantly, this function will set the return object's `source_artifact`
attribute to the passed in source artifact. This is critical for artifact bookkeeping. If you choose to create
a wandb.Value via it's `from_json` method, make sure to properly set this `artifact_source` to avoid data duplication.
Args:
json_obj (dict): A JSON dictionary to deserialize. It must contain a `_type` key. The value of
this key is used to lookup the correct subclass to use.
source_artifact (wandb.Artifact): An artifact which will hold any additional resources which were stored
during the `to_json` function.
Returns:
wandb.Value: a newly created instance of a subclass of wandb.Value
"""
class_option = WBValue.type_mapping().get(json_obj["_type"])
if class_option is not None:
obj = class_option.from_json(json_obj, source_artifact)
obj._set_artifact_source(source_artifact)
return obj
return None
@staticmethod
def type_mapping() -> "TypeMappingType":
"""Returns a map from `_log_type` to subclass. Used to lookup correct types for deserialization.
Returns:
dict: dictionary of str:class
"""
if WBValue._type_mapping is None:
WBValue._type_mapping = {}
frontier = [WBValue]
explored = set([])
while len(frontier) > 0:
class_option = frontier.pop()
explored.add(class_option)
if class_option._log_type is not None:
WBValue._type_mapping[class_option._log_type] = class_option
for subclass in class_option.__subclasses__():
if subclass not in explored:
frontier.append(subclass)
return WBValue._type_mapping
def __eq__(self, other: object) -> bool:
return id(self) == id(other)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def to_data_array(self) -> List[Any]:
"""Converts the object to a list of primitives representing the underlying data"""
raise NotImplementedError
def _set_artifact_source(
self, artifact: "PublicArtifact", name: Optional[str] = None
) -> None:
assert (
self._artifact_source is None
), "Cannot update artifact_source. Existing source: {}/{}".format(
self._artifact_source.artifact, self._artifact_source.name
)
self._artifact_source = _WBValueArtifactSource(artifact, name)
def _set_artifact_target(
self, artifact: "LocalArtifact", name: Optional[str] = None
) -> None:
assert (
self._artifact_target is None
), "Cannot update artifact_target. Existing target: {}/{}".format(
self._artifact_target.artifact, self._artifact_target.name
)
self._artifact_target = _WBValueArtifactTarget(artifact, name)
def _get_artifact_entry_ref_url(self) -> Optional[str]:
# If the object is coming from another artifact
if self._artifact_source and self._artifact_source.name:
ref_entry = self._artifact_source.artifact.get_path(
type(self).with_suffix(self._artifact_source.name)
)
return str(ref_entry.ref_url())
# Else, if the object is destined for another artifact and we support client IDs
elif (
self._artifact_target
and self._artifact_target.name
and self._artifact_target.artifact._client_id is not None
and self._artifact_target.artifact._final
and _server_accepts_client_ids()
):
return "wandb-client-artifact://{}/{}".format(
self._artifact_target.artifact._client_id,
type(self).with_suffix(self._artifact_target.name),
)
# Else if we do not support client IDs, but online, then block on upload
# Note: this is old behavior just to stay backwards compatible
# with older server versions. This code path should be removed
# once those versions are no longer supported. This path uses a .wait
# which blocks the user process on artifact upload.
elif (
self._artifact_target
and self._artifact_target.name
and self._artifact_target.artifact._logged_artifact is not None
and not _is_offline()
and not _server_accepts_client_ids()
):
self._artifact_target.artifact.wait()
ref_entry = self._artifact_target.artifact.get_path(
type(self).with_suffix(self._artifact_target.name)
)
return str(ref_entry.ref_url())
return None
def _get_artifact_entry_latest_ref_url(self) -> Optional[str]:
if (
self._artifact_target
and self._artifact_target.name
and self._artifact_target.artifact._client_id is not None
and self._artifact_target.artifact._final
and _server_accepts_client_ids()
):
return "wandb-client-artifact://{}:latest/{}".format(
self._artifact_target.artifact._sequence_client_id,
type(self).with_suffix(self._artifact_target.name),
)
# Else if we do not support client IDs, then block on upload
# Note: this is old behavior just to stay backwards compatible
# with older server versions. This code path should be removed
# once those versions are no longer supported. This path uses a .wait
# which blocks the user process on artifact upload.
elif (
self._artifact_target
and self._artifact_target.name
and self._artifact_target.artifact._logged_artifact is not None
and not _is_offline()
and not _server_accepts_client_ids()
):
self._artifact_target.artifact.wait()
ref_entry = self._artifact_target.artifact.get_path(
type(self).with_suffix(self._artifact_target.name)
)
return str(ref_entry.ref_url())
return None
class Histogram(WBValue):
"""wandb class for histograms.
This object works just like numpy's histogram function
https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
Examples:
Generate histogram from a sequence
```python
wandb.Histogram([1,2,3])
```
Efficiently initialize from np.histogram.
```python
hist = np.histogram(data)
wandb.Histogram(np_histogram=hist)
```
Arguments:
sequence: (array_like) input data for histogram
np_histogram: (numpy histogram) alternative input of a precoomputed histogram
num_bins: (int) Number of bins for the histogram. The default number of bins
is 64. The maximum number of bins is 512
Attributes:
bins: ([float]) edges of bins
histogram: ([int]) number of elements falling in each bin
"""
MAX_LENGTH: int = 512
_log_type = "histogram"
def __init__(
self,
sequence: Optional[Sequence] = None,
np_histogram: Optional["NumpyHistogram"] = None,
num_bins: int = 64,
) -> None:
if np_histogram:
if len(np_histogram) == 2:
self.histogram = (
np_histogram[0].tolist()
if hasattr(np_histogram[0], "tolist")
else np_histogram[0]
)
self.bins = (
np_histogram[1].tolist()
if hasattr(np_histogram[1], "tolist")
else np_histogram[1]
)
else:
raise ValueError(
"Expected np_histogram to be a tuple of (values, bin_edges) or sequence to be specified"
)
else:
np = util.get_module(
"numpy", required="Auto creation of histograms requires numpy"
)
self.histogram, self.bins = np.histogram(sequence, bins=num_bins)
self.histogram = self.histogram.tolist()
self.bins = self.bins.tolist()
if len(self.histogram) > self.MAX_LENGTH:
raise ValueError(
"The maximum length of a histogram is %i" % self.MAX_LENGTH
)
if len(self.histogram) + 1 != len(self.bins):
raise ValueError("len(bins) must be len(histogram) + 1")
def to_json(self, run: Union["LocalRun", "LocalArtifact"] = None) -> dict:
return {"_type": self._log_type, "values": self.histogram, "bins": self.bins}
def __sizeof__(self) -> int:
"""This returns an estimated size in bytes, currently the factor of 1.7
is used to account for the JSON encoding. We use this in tb_watcher.TBHistory
"""
return int((sys.getsizeof(self.histogram) + sys.getsizeof(self.bins)) * 1.7)
class Media(WBValue):
"""A WBValue that we store as a file outside JSON and show in a media panel
on the front end.
If necessary, we move or copy the file into the Run's media directory so that it gets
uploaded.
"""
_path: Optional[str]
_run: Optional["LocalRun"]
_caption: Optional[str]
_is_tmp: Optional[bool]
_extension: Optional[str]
_sha256: Optional[str]
_size: Optional[int]
def __init__(self, caption: Optional[str] = None) -> None:
super(Media, self).__init__()
self._path = None
# The run under which this object is bound, if any.
self._run = None
self._caption = caption
def _set_file(
self, path: str, is_tmp: bool = False, extension: Optional[str] = None
) -> None:
self._path = path
self._is_tmp = is_tmp
self._extension = extension
if extension is not None and not path.endswith(extension):
raise ValueError(
'Media file extension "{}" must occur at the end of path "{}".'.format(
extension, path
)
)
with open(self._path, "rb") as f:
self._sha256 = hashlib.sha256(f.read()).hexdigest()
self._size = os.path.getsize(self._path)
@classmethod
def get_media_subdir(cls: Type["Media"]) -> str:
raise NotImplementedError
@staticmethod
def captions(
media_items: Sequence["Media"],
) -> Union[bool, Sequence[Optional[str]]]:
if media_items[0]._caption is not None:
return [m._caption for m in media_items]
else:
return False
def is_bound(self) -> bool:
return self._run is not None
def file_is_set(self) -> bool:
return self._path is not None and self._sha256 is not None
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
) -> None:
"""Bind this object to a particular Run.
Calling this function is necessary so that we have somewhere specific to
put the file associated with this object, from which other Runs can
refer to it.
"""
if not self.file_is_set():
raise AssertionError("bind_to_run called before _set_file")
# The following two assertions are guaranteed to pass
# by definition file_is_set, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
assert isinstance(self._sha256, six.string_types)
if run is None:
raise TypeError('Argument "run" must not be None.')
self._run = run
# Following assertion required for mypy
assert self._run is not None
if self._extension is None:
_, extension = os.path.splitext(os.path.basename(self._path))
else:
extension = self._extension
if id_ is None:
id_ = self._sha256[:20]
file_path = _wb_filename(key, step, id_, extension)
media_path = os.path.join(self.get_media_subdir(), file_path)
new_path = os.path.join(self._run.dir, media_path)
util.mkdir_exists_ok(os.path.dirname(new_path))
if self._is_tmp:
shutil.move(self._path, new_path)
self._path = new_path
self._is_tmp = False
_datatypes_callback(media_path)
else:
shutil.copy(self._path, new_path)
self._path = new_path
_datatypes_callback(media_path)
def to_json(self, run: Union["LocalRun", "LocalArtifact"]) -> dict:
"""Serializes the object into a JSON blob, using a run or artifact to store additional data. If `run_or_artifact`
is a wandb.Run then `self.bind_to_run()` must have been previously been called.
Args:
run_or_artifact (wandb.Run | wandb.Artifact): the Run or Artifact for which this object should be generating
JSON for - this is useful to to store additional data if needed.
Returns:
dict: JSON representation
"""
# NOTE: uses of Audio in this class are a temporary hack -- when Ref support moves up
# into Media itself we should get rid of them
from wandb.data_types import Audio
json_obj = {}
if isinstance(run, wandb.wandb_sdk.wandb_run.Run):
if not self.is_bound():
raise RuntimeError(
"Value of type {} must be bound to a run with bind_to_run() before being serialized to JSON.".format(
type(self).__name__
)
)
assert (
self._run is run
), "We don't support referring to media files across runs."
# The following two assertions are guaranteed to pass
# by definition is_bound, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
json_obj.update(
{
"_type": "file", # TODO(adrian): This isn't (yet) a real media type we support on the frontend.
"path": util.to_forward_slash_path(
os.path.relpath(self._path, self._run.dir)
),
"sha256": self._sha256,
"size": self._size,
}
)
artifact_entry_url = self._get_artifact_entry_ref_url()
if artifact_entry_url is not None:
json_obj["artifact_path"] = artifact_entry_url
artifact_entry_latest_url = self._get_artifact_entry_latest_ref_url()
if artifact_entry_latest_url is not None:
json_obj["_latest_artifact_path"] = artifact_entry_latest_url
elif isinstance(run, wandb.wandb_sdk.wandb_artifacts.Artifact):
if self.file_is_set():
# The following two assertions are guaranteed to pass
# by definition of the call above, but are needed for
# mypy to understand that these are strings below.
assert isinstance(self._path, six.string_types)
assert isinstance(self._sha256, six.string_types)
artifact = run # Checks if the concrete image has already been added to this artifact
name = artifact.get_added_local_path_name(self._path)
if name is None:
if self._is_tmp:
name = os.path.join(
self.get_media_subdir(), os.path.basename(self._path)
)
else:
# If the files is not temporary, include the first 8 characters of the file's SHA256 to
# avoid name collisions. This way, if there are two images `dir1/img.png` and `dir2/img.png`
# we end up with a unique path for each.
name = os.path.join(
self.get_media_subdir(),
self._sha256[:20],
os.path.basename(self._path),
)
# if not, check to see if there is a source artifact for this object
if (
self._artifact_source
is not None
# and self._artifact_source.artifact != artifact
):
default_root = self._artifact_source.artifact._default_root()
# if there is, get the name of the entry (this might make sense to move to a helper off artifact)
if self._path.startswith(default_root):
name = self._path[len(default_root) :]
name = name.lstrip(os.sep)
# Add this image as a reference
path = self._artifact_source.artifact.get_path(name)
artifact.add_reference(path.ref_url(), name=name)
elif isinstance(self, Audio) and Audio.path_is_reference(
self._path
):
artifact.add_reference(self._path, name=name)
else:
entry = artifact.add_file(
self._path, name=name, is_tmp=self._is_tmp
)
name = entry.path
json_obj["path"] = name
json_obj["sha256"] = self._sha256
json_obj["_type"] = self._log_type
return json_obj
@classmethod
def from_json(
cls: Type["Media"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Media":
"""Likely will need to override for any more complicated media objects"""
return cls(source_artifact.get_path(json_obj["path"]).download())
def __eq__(self, other: object) -> bool:
"""Likely will need to override for any more complicated media objects"""
return (
isinstance(other, self.__class__)
and hasattr(self, "_sha256")
and hasattr(other, "_sha256")
and self._sha256 == other._sha256
)
class BatchableMedia(Media):
"""Parent class for Media we treat specially in batches, like images and
thumbnails.
Apart from images, we just use these batches to help organize files by name
in the media directory.
"""
def __init__(self) -> None:
super(BatchableMedia, self).__init__()
@classmethod
def seq_to_json(
cls: Type["BatchableMedia"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
raise NotImplementedError
class Object3D(BatchableMedia):
"""
Wandb class for 3D point clouds.
Arguments:
data_or_path: (numpy array, string, io)
Object3D can be initialized from a file or a numpy array.
The file types supported are obj, gltf, babylon, stl. You can pass a path to
a file or an io object and a file_type which must be one of `'obj', 'gltf', 'babylon', 'stl'`.
The shape of the numpy array must be one of either:
```python
[[x y z], ...] nx3
[x y z c], ...] nx4 where c is a category with supported range [1, 14]
[x y z r g b], ...] nx4 where is rgb is color
```
"""
SUPPORTED_TYPES: ClassVar[Set[str]] = set(
["obj", "gltf", "glb", "babylon", "stl", "pts.json"]
)
_log_type: ClassVar[str] = "object3D-file"
def __init__(
self, data_or_path: Union["np.ndarray", str, "TextIO"], **kwargs: str
) -> None:
super(Object3D, self).__init__()
if hasattr(data_or_path, "name"):
# if the file has a path, we just detect the type and copy it from there
data_or_path = data_or_path.name # type: ignore
if hasattr(data_or_path, "read"):
if hasattr(data_or_path, "seek"):
data_or_path.seek(0) # type: ignore
object_3d = data_or_path.read() # type: ignore
extension = kwargs.pop("file_type", None)
if extension is None:
raise ValueError(
"Must pass file type keyword argument when using io objects."
)
if extension not in Object3D.SUPPORTED_TYPES:
raise ValueError(
"Object 3D only supports numpy arrays or files of the type: "
+ ", ".join(Object3D.SUPPORTED_TYPES)
)
tmp_path = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + extension
)
with open(tmp_path, "w") as f:
f.write(object_3d)
self._set_file(tmp_path, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
path = data_or_path
extension = None
for supported_type in Object3D.SUPPORTED_TYPES:
if path.endswith(supported_type):
extension = supported_type
break
if not extension:
raise ValueError(
"File '"
+ path
+ "' is not compatible with Object3D: supported types are: "
+ ", ".join(Object3D.SUPPORTED_TYPES)
)
self._set_file(data_or_path, is_tmp=False)
# Supported different types and scene for 3D scenes
elif isinstance(data_or_path, dict) and "type" in data_or_path:
if data_or_path["type"] == "lidar/beta":
data = {
"type": data_or_path["type"],
"vectors": data_or_path["vectors"].tolist()
if "vectors" in data_or_path
else [],
"points": data_or_path["points"].tolist()
if "points" in data_or_path
else [],
"boxes": data_or_path["boxes"].tolist()
if "boxes" in data_or_path
else [],
}
else:
raise ValueError(
"Type not supported, only 'lidar/beta' is currently supported"
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".pts.json")
json.dump(
data,
codecs.open(tmp_path, "w", encoding="utf-8"),
separators=(",", ":"),
sort_keys=True,
indent=4,
)
self._set_file(tmp_path, is_tmp=True, extension=".pts.json")
elif _is_numpy_array(data_or_path):
np_data = data_or_path
# The following assertion is required for numpy to trust that
# np_data is numpy array. The reason it is behind a False
# guard is to ensure that this line does not run at runtime,
# which would cause a runtime error if the user's machine did
# not have numpy installed.
if TYPE_CHECKING:
assert isinstance(np_data, np.ndarray)
if len(np_data.shape) != 2 or np_data.shape[1] not in {3, 4, 6}:
raise ValueError(
"""The shape of the numpy array must be one of either
[[x y z], ...] nx3
[x y z c], ...] nx4 where c is a category with supported range [1, 14]
[x y z r g b], ...] nx4 where is rgb is color"""
)
list_data = np_data.tolist()
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".pts.json")
json.dump(
list_data,
codecs.open(tmp_path, "w", encoding="utf-8"),
separators=(",", ":"),
sort_keys=True,
indent=4,
)
self._set_file(tmp_path, is_tmp=True, extension=".pts.json")
else:
raise ValueError("data must be a numpy array, dict or a file object")
@classmethod
def get_media_subdir(cls: Type["Object3D"]) -> str:
return os.path.join("media", "object3D")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Object3D, self).to_json(run_or_artifact)
json_dict["_type"] = Object3D._log_type
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
if self._path is None or not self._path.endswith(".pts.json"):
raise ValueError(
"Non-point cloud 3D objects are not yet supported with Artifacts"
)
return json_dict
@classmethod
def seq_to_json(
cls: Type["Object3D"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
seq = list(seq)
jsons = [obj.to_json(run) for obj in seq]
for obj in jsons:
expected = util.to_forward_slash_path(cls.get_media_subdir())
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Object3D's must be in the {} directory, not {}".format(
expected, obj["path"]
)
)
return {
"_type": "object3D",
"filenames": [
os.path.relpath(j["path"], cls.get_media_subdir()) for j in jsons
],
"count": len(jsons),
"objects": jsons,
}
class Molecule(BatchableMedia):
"""
Wandb class for Molecular data
Arguments:
data_or_path: (string, io)
Molecule can be initialized from a file name or an io object.
"""
SUPPORTED_TYPES = set(
["pdb", "pqr", "mmcif", "mcif", "cif", "sdf", "sd", "gro", "mol2", "mmtf"]
)
_log_type = "molecule-file"
def __init__(self, data_or_path: Union[str, "TextIO"], **kwargs: str) -> None:
super(Molecule, self).__init__()
if hasattr(data_or_path, "name"):
# if the file has a path, we just detect the type and copy it from there
data_or_path = data_or_path.name # type: ignore
if hasattr(data_or_path, "read"):
if hasattr(data_or_path, "seek"):
data_or_path.seek(0) # type: ignore
molecule = data_or_path.read() # type: ignore
extension = kwargs.pop("file_type", None)
if extension is None:
raise ValueError(
"Must pass file type keyword argument when using io objects."
)
if extension not in Molecule.SUPPORTED_TYPES:
raise ValueError(
"Molecule 3D only supports files of the type: "
+ ", ".join(Molecule.SUPPORTED_TYPES)
)
tmp_path = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + extension
)
with open(tmp_path, "w") as f:
f.write(molecule)
self._set_file(tmp_path, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
extension = os.path.splitext(data_or_path)[1][1:]
if extension not in Molecule.SUPPORTED_TYPES:
raise ValueError(
"Molecule only supports files of the type: "
+ ", ".join(Molecule.SUPPORTED_TYPES)
)
self._set_file(data_or_path, is_tmp=False)
else:
raise ValueError("Data must be file name or a file object")
@classmethod
def get_media_subdir(cls: Type["Molecule"]) -> str:
return os.path.join("media", "molecule")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Molecule, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
if self._caption:
json_dict["caption"] = self._caption
return json_dict
@classmethod
def seq_to_json(
cls: Type["Molecule"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
seq = list(seq)
jsons = [obj.to_json(run) for obj in seq]
for obj in jsons:
expected = util.to_forward_slash_path(cls.get_media_subdir())
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Molecule's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
return {
"_type": "molecule",
"filenames": [obj["path"] for obj in jsons],
"count": len(jsons),
"captions": Media.captions(seq),
}
class Html(BatchableMedia):
"""
Wandb class for arbitrary html
Arguments:
data: (string or io object) HTML to display in wandb
inject: (boolean) Add a stylesheet to the HTML object. If set
to False the HTML will pass through unchanged.
"""
_log_type = "html-file"
def __init__(self, data: Union[str, "TextIO"], inject: bool = True) -> None:
super(Html, self).__init__()
data_is_path = isinstance(data, six.string_types) and os.path.exists(data)
data_path = ""
if data_is_path:
assert isinstance(data, six.string_types)
data_path = data
with open(data_path, "r") as file:
self.html = file.read()
elif isinstance(data, six.string_types):
self.html = data
elif hasattr(data, "read"):
if hasattr(data, "seek"):
data.seek(0)
self.html = data.read()
else:
raise ValueError("data must be a string or an io object")
if inject:
self.inject_head()
if inject or not data_is_path:
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".html")
with open(tmp_path, "w") as out:
out.write(self.html)
self._set_file(tmp_path, is_tmp=True)
else:
self._set_file(data_path, is_tmp=False)
def inject_head(self) -> None:
join = ""
if "<head>" in self.html:
parts = self.html.split("<head>", 1)
parts[0] = parts[0] + "<head>"
elif "<html>" in self.html:
parts = self.html.split("<html>", 1)
parts[0] = parts[0] + "<html><head>"
parts[1] = "</head>" + parts[1]
else:
parts = ["", self.html]
parts.insert(
1,
'<base target="_blank"><link rel="stylesheet" type="text/css" href="https://app.wandb.ai/normalize.css" />',
)
self.html = join.join(parts).strip()
@classmethod
def get_media_subdir(cls: Type["Html"]) -> str:
return os.path.join("media", "html")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Html, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
@classmethod
def from_json(
cls: Type["Html"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Html":
return cls(source_artifact.get_path(json_obj["path"]).download(), inject=False)
@classmethod
def seq_to_json(
cls: Type["Html"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
base_path = os.path.join(run.dir, cls.get_media_subdir())
util.mkdir_exists_ok(base_path)
meta = {
"_type": "html",
"count": len(seq),
"html": [h.to_json(run) for h in seq],
}
return meta
class Video(BatchableMedia):
"""
Wandb representation of video.
Arguments:
data_or_path: (numpy array, string, io)
Video can be initialized with a path to a file or an io object.
The format must be "gif", "mp4", "webm" or "ogg".
The format must be specified with the format argument.
Video can be initialized with a numpy tensor.
The numpy tensor must be either 4 dimensional or 5 dimensional.
Channels should be (time, channel, height, width) or
(batch, time, channel, height width)
caption: (string) caption associated with the video for display
fps: (int) frames per second for video. Default is 4.
format: (string) format of video, necessary if initializing with path or io object.
"""
_log_type = "video-file"
EXTS = ("gif", "mp4", "webm", "ogg")
_width: Optional[int]
_height: Optional[int]
def __init__(
self,
data_or_path: Union["np.ndarray", str, "TextIO"],
caption: Optional[str] = None,
fps: int = 4,
format: Optional[str] = None,
):
super(Video, self).__init__()
self._fps = fps
self._format = format or "gif"
self._width = None
self._height = None
self._channels = None
self._caption = caption
if self._format not in Video.EXTS:
raise ValueError("wandb.Video accepts %s formats" % ", ".join(Video.EXTS))
if isinstance(data_or_path, six.BytesIO):
filename = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + self._format
)
with open(filename, "wb") as f:
f.write(data_or_path.read())
self._set_file(filename, is_tmp=True)
elif isinstance(data_or_path, six.string_types):
_, ext = os.path.splitext(data_or_path)
ext = ext[1:].lower()
if ext not in Video.EXTS:
raise ValueError(
"wandb.Video accepts %s formats" % ", ".join(Video.EXTS)
)
self._set_file(data_or_path, is_tmp=False)
# ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 data_or_path
else:
if hasattr(data_or_path, "numpy"): # TF data eager tensors
self.data = data_or_path.numpy() # type: ignore
elif _is_numpy_array(data_or_path):
self.data = data_or_path
else:
raise ValueError(
"wandb.Video accepts a file path or numpy like data as input"
)
self.encode()
def encode(self) -> None:
mpy = util.get_module(
"moviepy.editor",
required='wandb.Video requires moviepy and imageio when passing raw data. Install with "pip install moviepy imageio"',
)
tensor = self._prepare_video(self.data)
_, self._height, self._width, self._channels = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=self._fps)
filename = os.path.join(
_MEDIA_TMP.name, util.generate_id() + "." + self._format
)
if TYPE_CHECKING:
kwargs: Dict[str, Optional[bool]] = {}
try: # older versions of moviepy do not support logger argument
kwargs = {"logger": None}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
except TypeError:
try: # even older versions of moviepy do not support progress_bar argument
kwargs = {"verbose": False, "progress_bar": False}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
except TypeError:
kwargs = {
"verbose": False,
}
if self._format == "gif":
clip.write_gif(filename, **kwargs)
else:
clip.write_videofile(filename, **kwargs)
self._set_file(filename, is_tmp=True)
@classmethod
def get_media_subdir(cls: Type["Video"]) -> str:
return os.path.join("media", "videos")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Video, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._caption:
json_dict["caption"] = self._caption
return json_dict
def _prepare_video(self, video: "np.ndarray") -> "np.ndarray":
"""This logic was mostly taken from tensorboardX"""
np = util.get_module(
"numpy",
required='wandb.Video requires numpy when passing raw data. To get it, run "pip install numpy".',
)
if video.ndim < 4:
raise ValueError(
"Video must be atleast 4 dimensions: time, channels, height, width"
)
if video.ndim == 4:
video = video.reshape(1, *video.shape)
b, t, c, h, w = video.shape
if video.dtype != np.uint8:
logging.warning("Converting video data to uint8")
video = video.astype(np.uint8)
def is_power2(num: int) -> bool:
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(video.shape[0]):
len_addition = int(2 ** video.shape[0].bit_length() - video.shape[0])
video = np.concatenate(
(video, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = video.shape[0] // n_rows
video = np.reshape(video, newshape=(n_rows, n_cols, t, c, h, w))
video = np.transpose(video, axes=(2, 0, 4, 1, 5, 3))
video = np.reshape(video, newshape=(t, n_rows * h, n_cols * w, c))
return video
@classmethod
def seq_to_json(
cls: Type["Video"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
base_path = os.path.join(run.dir, cls.get_media_subdir())
util.mkdir_exists_ok(base_path)
meta = {
"_type": "videos",
"count": len(seq),
"videos": [v.to_json(run) for v in seq],
"captions": Video.captions(seq),
}
return meta
# Allows encoding of arbitrary JSON structures
# as a file
#
# This class should be used as an abstract class
# extended to have validation methods
class JSONMetadata(Media):
"""
JSONMetadata is a type for encoding arbitrary metadata as files.
"""
def __init__(self, val: dict) -> None:
super(JSONMetadata, self).__init__()
self.validate(val)
self._val = val
ext = "." + self.type_name() + ".json"
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ext)
util.json_dump_uncompressed(
self._val, codecs.open(tmp_path, "w", encoding="utf-8")
)
self._set_file(tmp_path, is_tmp=True, extension=ext)
@classmethod
def get_media_subdir(cls: Type["JSONMetadata"]) -> str:
return os.path.join("media", "metadata", cls.type_name())
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(JSONMetadata, self).to_json(run_or_artifact)
json_dict["_type"] = self.type_name()
return json_dict
# These methods should be overridden in the child class
@classmethod
def type_name(cls) -> str:
return "metadata"
def validate(self, val: dict) -> bool:
return True
class ImageMask(Media):
"""
Wandb class for image masks or overlays, useful for tasks like semantic segmentation.
Arguments:
val: (dictionary)
One of these two keys to represent the image:
mask_data : (2D numpy array) The mask containing an integer class label
for each pixel in the image
path : (string) The path to a saved image file of the mask
class_labels : (dictionary of integers to strings, optional) A mapping of the
integer class labels in the mask to readable class names. These will default
to class_0, class_1, class_2, etc.
key: (string)
The readable name or id for this mask type (e.g. predictions, ground_truth)
Examples:
Log a mask overlay for a given image
```python
predicted_mask = np.array([[1, 2, 2, ... , 3, 2, 1], ...])
ground_truth_mask = np.array([[1, 1, 1, ... , 2, 3, 1], ...])
class_labels = {
0: "person",
1: "tree",
2: "car",
3: "road"
}
masked_image = wandb.Image(image, masks={
"predictions": {
"mask_data": predicted_mask,
"class_labels": class_labels
},
"ground_truth": {
"mask_data": ground_truth_mask,
"class_labels": class_labels
}
}
wandb.log({"img_with_masks" : masked_image})
```
Prepare an image mask to be added to a wandb.Table
```python
raw_image_path = "sample_image.png"
predicted_mask_path = "predicted_mask.png"
class_set = wandb.Classes([
{"name" : "person", "id" : 0},
{"name" : "tree", "id" : 1},
{"name" : "car", "id" : 2},
{"name" : "road", "id" : 3}
])
masked_image = wandb.Image(raw_image_path, classes=class_set,
masks={"prediction" : {"path" : predicted_mask_path}})
```
"""
_log_type = "mask"
def __init__(self, val: dict, key: str) -> None:
"""
Arguments:
val: (dictionary)
One of these two keys to represent the image:
mask_data : (2D numpy array) The mask containing an integer class label
for each pixel in the image
path : (string) The path to a saved image file of the mask
class_labels : (dictionary of integers to strings, optional) A mapping of the
integer class labels in the mask to readable class names. These will default
to class_0, class_1, class_2, etc.
key: (string)
The readable name or id for this mask type (e.g. predictions, ground_truth)
"""
super(ImageMask, self).__init__()
if "path" in val:
self._set_file(val["path"])
else:
np = util.get_module("numpy", required="Image mask support requires numpy")
# Add default class mapping
if "class_labels" not in val:
classes = np.unique(val["mask_data"]).astype(np.int32).tolist()
class_labels = dict((c, "class_" + str(c)) for c in classes)
val["class_labels"] = class_labels
self.validate(val)
self._val = val
self._key = key
ext = "." + self.type_name() + ".png"
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ext)
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
image = pil_image.fromarray(val["mask_data"].astype(np.int8), mode="L")
image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True, extension=ext)
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
) -> None:
# bind_to_run key argument is the Image parent key
# the self._key value is the mask's sub key
super(ImageMask, self).bind_to_run(run, key, step, id_=id_)
class_labels = self._val["class_labels"]
run._add_singleton(
"mask/class_labels",
str(key) + "_wandb_delimeter_" + self._key,
class_labels,
)
@classmethod
def get_media_subdir(cls: Type["ImageMask"]) -> str:
return os.path.join("media", "images", cls.type_name())
@classmethod
def from_json(
cls: Type["ImageMask"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "ImageMask":
return cls(
{"path": source_artifact.get_path(json_obj["path"]).download()}, key="",
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(ImageMask, self).to_json(run_or_artifact)
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
json_dict["_type"] = self.type_name()
return json_dict
elif isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
# Nothing special to add (used to add "digest", but no longer used.)
return json_dict
else:
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
@classmethod
def type_name(cls: Type["ImageMask"]) -> str:
return cls._log_type
def validate(self, val: dict) -> bool:
np = util.get_module("numpy", required="Image mask support requires numpy")
# 2D Make this work with all tensor(like) types
if "mask_data" not in val:
raise TypeError(
'Missing key "mask_data": An image mask requires mask data: a 2D array representing the predictions'
)
else:
error_str = "mask_data must be a 2D array"
shape = val["mask_data"].shape
if len(shape) != 2:
raise TypeError(error_str)
if not (
(val["mask_data"] >= 0).all() and (val["mask_data"] <= 255).all()
) and issubclass(val["mask_data"].dtype.type, np.integer):
raise TypeError("Mask data must be integers between 0 and 255")
# Optional argument
if "class_labels" in val:
for k, v in list(val["class_labels"].items()):
if (not isinstance(k, numbers.Number)) or (
not isinstance(v, six.string_types)
):
raise TypeError(
"Class labels must be a dictionary of numbers to strings"
)
return True
class BoundingBoxes2D(JSONMetadata):
"""
Wandb class for logging 2D bounding boxes on images, useful for tasks like object detection
Arguments:
val: (dictionary) A dictionary of the following form:
box_data: (list of dictionaries) One dictionary for each bounding box, containing:
position: (dictionary) the position and size of the bounding box, in one of two formats
Note that boxes need not all use the same format.
{"minX", "minY", "maxX", "maxY"}: (dictionary) A set of coordinates defining
the upper and lower bounds of the box (the bottom left and top right corners)
{"middle", "width", "height"}: (dictionary) A set of coordinates defining the
center and dimensions of the box, with "middle" as a list [x, y] for the
center point and "width" and "height" as numbers
domain: (string) One of two options for the bounding box coordinate domain
null: By default, or if no argument is passed, the coordinate domain
is assumed to be relative to the original image, expressing this box as a fraction
or percentage of the original image. This means all coordinates and dimensions
passed into the "position" argument are floating point numbers between 0 and 1.
"pixel": (string literal) The coordinate domain is set to the pixel space. This means all
coordinates and dimensions passed into "position" are integers within the bounds
of the image dimensions.
class_id: (integer) The class label id for this box
scores: (dictionary of string to number, optional) A mapping of named fields
to numerical values (float or int), can be used for filtering boxes in the UI
based on a range of values for the corresponding field
box_caption: (string, optional) A string to be displayed as the label text above this
box in the UI, often composed of the class label, class name, and/or scores
class_labels: (dictionary, optional) A map of integer class labels to their readable class names
key: (string)
The readable name or id for this set of bounding boxes (e.g. predictions, ground_truth)
Examples:
Log a set of predicted and ground truth bounding boxes for a given image
```python
class_labels = {
0: "person",
1: "car",
2: "road",
3: "building"
}
img = wandb.Image(image, boxes={
"predictions": {
"box_data": [
{
# one box expressed in the default relative/fractional domain
"position": {
"minX": 0.1,
"maxX": 0.2,
"minY": 0.3,
"maxY": 0.4
},
"class_id" : 1,
"box_caption": class_labels[1],
"scores" : {
"acc": 0.2,
"loss": 1.2
}
},
{
# another box expressed in the pixel domain
"position": {
"middle": [150, 20],
"width": 68,
"height": 112
},
"domain" : "pixel",
"class_id" : 3,
"box_caption": "a building",
"scores" : {
"acc": 0.5,
"loss": 0.7
}
},
...
# Log as many boxes an as needed
],
"class_labels": class_labels
},
# Log each meaningful group of boxes with a unique key name
"ground_truth": {
...
}
})
wandb.log({"driving_scene": img})
```
Prepare an image with bounding boxes to be added to a wandb.Table
```python
raw_image_path = "sample_image.png"
class_set = wandb.Classes([
{"name" : "person", "id" : 0},
{"name" : "car", "id" : 1},
{"name" : "road", "id" : 2},
{"name" : "building", "id" : 3}
])
image_with_boxes = wandb.Image(raw_image_path, classes=class_set,
boxes=[...identical to previous example...])
```
"""
_log_type = "bounding-boxes"
# TODO: when the change is made to have this produce a dict with a _type, define
# it here as _log_type, associate it in to_json
def __init__(self, val: dict, key: str) -> None:
"""
Arguments:
val: (dictionary) A dictionary of the following form:
box_data: (list of dictionaries) One dictionary for each bounding box, containing:
position: (dictionary) the position and size of the bounding box, in one of two formats
Note that boxes need not all use the same format.
{"minX", "minY", "maxX", "maxY"}: (dictionary) A set of coordinates defining
the upper and lower bounds of the box (the bottom left and top right corners)
{"middle", "width", "height"}: (dictionary) A set of coordinates defining the
center and dimensions of the box, with "middle" as a list [x, y] for the
center point and "width" and "height" as numbers
domain: (string) One of two options for the bounding box coordinate domain
null: By default, or if no argument is passed, the coordinate domain
is assumed to be relative to the original image, expressing this box as a fraction
or percentage of the original image. This means all coordinates and dimensions
passed into the "position" argument are floating point numbers between 0 and 1.
"pixel": (string literal) The coordinate domain is set to the pixel space. This means all
coordinates and dimensions passed into "position" are integers within the bounds
of the image dimensions.
class_id: (integer) The class label id for this box
scores: (dictionary of string to number, optional) A mapping of named fields
to numerical values (float or int), can be used for filtering boxes in the UI
based on a range of values for the corresponding field
box_caption: (string, optional) A string to be displayed as the label text above this
box in the UI, often composed of the class label, class name, and/or scores
class_labels: (dictionary, optional) A map of integer class labels to their readable class names
key: (string)
The readable name or id for this set of bounding boxes (e.g. predictions, ground_truth)
"""
super(BoundingBoxes2D, self).__init__(val)
self._val = val["box_data"]
self._key = key
# Add default class mapping
if "class_labels" not in val:
np = util.get_module(
"numpy", required="Bounding box support requires numpy"
)
classes = (
np.unique(list([box["class_id"] for box in val["box_data"]]))
.astype(np.int32)
.tolist()
)
class_labels = dict((c, "class_" + str(c)) for c in classes)
self._class_labels = class_labels
else:
self._class_labels = val["class_labels"]
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
) -> None:
# bind_to_run key argument is the Image parent key
# the self._key value is the mask's sub key
super(BoundingBoxes2D, self).bind_to_run(run, key, step, id_=id_)
run._add_singleton(
"bounding_box/class_labels",
str(key) + "_wandb_delimeter_" + self._key,
self._class_labels,
)
@classmethod
def type_name(cls) -> str:
return "boxes2D"
def validate(self, val: dict) -> bool:
# Optional argument
if "class_labels" in val:
for k, v in list(val["class_labels"].items()):
if (not isinstance(k, numbers.Number)) or (
not isinstance(v, six.string_types)
):
raise TypeError(
"Class labels must be a dictionary of numbers to string"
)
boxes = val["box_data"]
if not isinstance(boxes, list):
raise TypeError("Boxes must be a list")
for box in boxes:
# Required arguments
error_str = "Each box must contain a position with: middle, width, and height or \
\nminX, maxX, minY, maxY."
if "position" not in box:
raise TypeError(error_str)
else:
valid = False
if (
"middle" in box["position"]
and len(box["position"]["middle"]) == 2
and has_num(box["position"], "width")
and has_num(box["position"], "height")
):
valid = True
elif (
has_num(box["position"], "minX")
and has_num(box["position"], "maxX")
and has_num(box["position"], "minY")
and has_num(box["position"], "maxY")
):
valid = True
if not valid:
raise TypeError(error_str)
# Optional arguments
if ("scores" in box) and not isinstance(box["scores"], dict):
raise TypeError("Box scores must be a dictionary")
elif "scores" in box:
for k, v in list(box["scores"].items()):
if not isinstance(k, six.string_types):
raise TypeError("A score key must be a string")
if not isinstance(v, numbers.Number):
raise TypeError("A score value must be a number")
if ("class_id" in box) and not isinstance(
box["class_id"], six.integer_types
):
raise TypeError("A box's class_id must be an integer")
# Optional
if ("box_caption" in box) and not isinstance(
box["box_caption"], six.string_types
):
raise TypeError("A box's caption must be a string")
return True
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
return super(BoundingBoxes2D, self).to_json(run_or_artifact)
elif isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
# TODO (tim): I would like to log out a proper dictionary representing this object, but don't
# want to mess with the visualizations that are currently available in the UI. This really should output
# an object with a _type key. Will need to push this change to the UI first to ensure backwards compat
return self._val
else:
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
@classmethod
def from_json(
cls: Type["BoundingBoxes2D"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "BoundingBoxes2D":
return cls({"box_data": json_obj}, "")
class Classes(Media):
_log_type = "classes"
_class_set: Sequence[dict]
def __init__(self, class_set: Sequence[dict]) -> None:
"""Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts
Args:
class_set (list): list of dicts in the form of {"id":int|str, "name":str}
"""
super(Classes, self).__init__()
for class_obj in class_set:
assert "id" in class_obj and "name" in class_obj
self._class_set = class_set
@classmethod
def from_json(
cls: Type["Classes"],
json_obj: dict,
source_artifact: Optional["PublicArtifact"],
) -> "Classes":
return cls(json_obj.get("class_set")) # type: ignore
def to_json(
self, run_or_artifact: Optional[Union["LocalRun", "LocalArtifact"]]
) -> dict:
json_obj = {}
# This is a bit of a hack to allow _ClassesIdType to
# be able to operate fully without an artifact in play.
# In all other cases, artifact should be a true artifact.
if run_or_artifact is not None:
json_obj = super(Classes, self).to_json(run_or_artifact)
json_obj["_type"] = Classes._log_type
json_obj["class_set"] = self._class_set
return json_obj
def get_type(self) -> "_ClassesIdType":
return _ClassesIdType(self)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if isinstance(other, Classes):
return self._class_set == other._class_set
else:
return False
class Image(BatchableMedia):
"""
Wandb class for images.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[str]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, six.string_types):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[str] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
if classes is not None:
if not isinstance(classes, Classes):
self._classes = Classes(classes)
else:
self._classes = classes
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
boxes_final[key] = BoundingBoxes2D(box_item, key)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
masks_final[key] = ImageMask(mask_item, key)
self._masks = masks_final
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = six.BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
) -> None:
super(Image, self).bind_to_run(run, key, step, id_)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(run, key, step, id_)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(run, key, step, id_)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
# Here, rather than give each class definition it's own name (and entry), we
# purposely are giving a non-unique class name of /media/cls.classes.json.
# This may create user confusion if if multiple different class definitions
# are expected in a single artifact. However, we want to catch this user pattern
# if it exists and dive deeper. The alternative code is provided below.
#
class_name = os.path.join("media", "cls")
#
# class_name = os.path.join(
# "media", "classes", os.path.basename(self._path) + "_cls"
# )
#
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
class Plotly(Media):
"""
Wandb class for plotly plots.
Arguments:
val: matplotlib or plotly figure
"""
_log_type = "plotly-file"
@classmethod
def make_plot_media(
cls: Type["Plotly"], val: Union["plotly.Figure", "matplotlib.artist.Artist"]
) -> Union[Image, "Plotly"]:
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
return Image(val)
val = util.matplotlib_to_plotly(val)
return cls(val)
def __init__(self, val: Union["plotly.Figure", "matplotlib.artist.Artist"]):
super(Plotly, self).__init__()
# First, check to see if the incoming `val` object is a plotfly figure
if not util.is_plotly_figure_typename(util.get_full_typename(val)):
# If it is not, but it is a matplotlib figure, then attempt to convert it to plotly
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
raise ValueError(
"Plotly does not currently support converting matplotlib figures containing images. \
You can convert the plot to a static image with `wandb.Image(plt)` "
)
val = util.matplotlib_to_plotly(val)
else:
raise ValueError(
"Logged plots must be plotly figures, or matplotlib plots convertible to plotly via mpl_to_plotly"
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".plotly.json")
val = _numpy_arrays_to_lists(val.to_plotly_json())
util.json_dump_safer(val, codecs.open(tmp_path, "w", encoding="utf-8"))
self._set_file(tmp_path, is_tmp=True, extension=".plotly.json")
@classmethod
def get_media_subdir(cls: Type["Plotly"]) -> str:
return os.path.join("media", "plotly")
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Plotly, self).to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
def history_dict_to_json(
run: "Optional[LocalRun]", payload: dict, step: Optional[int] = None
) -> dict:
# Converts a History row dict's elements so they're friendly for JSON serialization.
if step is None:
# We should be at the top level of the History row; assume this key is set.
step = payload["_step"]
# We use list here because we were still seeing cases of RuntimeError dict changed size
for key in list(payload):
val = payload[key]
if isinstance(val, dict):
payload[key] = history_dict_to_json(run, val, step=step)
else:
payload[key] = val_to_json(run, key, val, namespace=step)
return payload
# TODO: refine this
def val_to_json(
run: "Optional[LocalRun]",
key: str,
val: "ValToJsonType",
namespace: Optional[Union[str, int]] = None,
) -> Union[Sequence, dict]:
# Converts a wandb datatype to its JSON representation.
if namespace is None:
raise ValueError(
"val_to_json must be called with a namespace(a step number, or 'summary') argument"
)
converted = val
typename = util.get_full_typename(val)
if util.is_pandas_data_frame(val):
val = wandb.Table(dataframe=val)
elif util.is_matplotlib_typename(typename) or util.is_plotly_typename(typename):
val = Plotly.make_plot_media(val)
elif isinstance(val, SixSequence) and all(isinstance(v, WBValue) for v in val):
assert run
# This check will break down if Image/Audio/... have child classes.
if (
len(val)
and isinstance(val[0], BatchableMedia)
and all(isinstance(v, type(val[0])) for v in val)
):
if TYPE_CHECKING:
val = cast(Sequence["BatchableMedia"], val)
items = _prune_max_seq(val)
for i, item in enumerate(items):
item.bind_to_run(run, key, namespace, id_=i)
return items[0].seq_to_json(items, run, key, namespace)
else:
# TODO(adrian): Good idea to pass on the same key here? Maybe include
# the array index?
# There is a bug here: if this array contains two arrays of the same type of
# anonymous media objects, their eventual names will collide.
# This used to happen. The frontend doesn't handle heterogenous arrays
# raise ValueError(
# "Mixed media types in the same list aren't supported")
return [val_to_json(run, key, v, namespace=namespace) for v in val]
if isinstance(val, WBValue):
assert run
if isinstance(val, Media) and not val.is_bound():
if hasattr(val, "_log_type") and val._log_type in [
"table",
"partitioned-table",
"joined-table",
]:
# Special conditional to log tables as artifact entries as well.
# I suspect we will generalize this as we transition to storing all
# files in an artifact
# we sanitize the key to meet the constraints defined in wandb_artifacts.py
# in this case, leaving only alpha numerics or underscores.
sanitized_key = re.sub(r"[^a-zA-Z0-9_]+", "", key)
art = wandb.wandb_sdk.wandb_artifacts.Artifact(
"run-{}-{}".format(run.id, sanitized_key), "run_table"
)
art.add(val, key)
run.log_artifact(art)
# Partitioned tables and joined tables do not support being bound to runs.
if not (
hasattr(val, "_log_type")
and val._log_type in ["partitioned-table", "joined-table"]
):
val.bind_to_run(run, key, namespace)
return val.to_json(run)
return converted # type: ignore
def _is_numpy_array(data: object) -> bool:
np = util.get_module(
"numpy", required="Logging raw point cloud data requires numpy"
)
return isinstance(data, np.ndarray)
def _wb_filename(
key: Union[str, int], step: Union[str, int], id: Union[str, int], extension: str
) -> str:
return "{}_{}_{}{}".format(str(key), str(step), str(id), extension)
def _numpy_arrays_to_lists(
payload: Union[dict, Sequence, "np.ndarray"]
) -> Union[Sequence, dict, str, int, float, bool]:
# Casts all numpy arrays to lists so we don't convert them to histograms, primarily for Plotly
if isinstance(payload, dict):
res = {}
for key, val in six.iteritems(payload):
res[key] = _numpy_arrays_to_lists(val)
return res
elif isinstance(payload, SixSequence) and not isinstance(payload, six.string_types):
return [_numpy_arrays_to_lists(v) for v in payload]
elif util.is_numpy_array(payload):
if TYPE_CHECKING:
payload = cast("np.ndarray", payload)
return [_numpy_arrays_to_lists(v) for v in payload.tolist()]
# Protects against logging non serializable objects
elif isinstance(payload, Media):
return str(payload.__class__.__name__)
return payload
def _prune_max_seq(seq: Sequence["BatchableMedia"]) -> Sequence["BatchableMedia"]:
# If media type has a max respect it
items = seq
if hasattr(seq[0], "MAX_ITEMS") and seq[0].MAX_ITEMS < len(seq): # type: ignore
logging.warning(
"Only %i %s will be uploaded."
% (seq[0].MAX_ITEMS, seq[0].__class__.__name__) # type: ignore
)
items = seq[: seq[0].MAX_ITEMS] # type: ignore
return items
def _data_frame_to_json(
df: "pd.DataFrame", run: "LocalRun", key: str, step: Union[int, str]
) -> dict:
"""!NODOC Encode a Pandas DataFrame into the JSON/backend format.
Writes the data to a file and returns a dictionary that we use to represent
it in `Summary`'s.
Arguments:
df (pandas.DataFrame): The DataFrame. Must not have columns named
"wandb_run_id" or "wandb_data_frame_id". They will be added to the
DataFrame here.
run (wandb_run.Run): The Run the DataFrame is associated with. We need
this because the information we store on the DataFrame is derived
from the Run it's in.
key (str): Name of the DataFrame, ie. the summary key path in which it's
stored. This is for convenience, so people exploring the
directory tree can have some idea of what is in the Parquet files.
step: History step or "summary".
Returns:
A dict representing the DataFrame that we can store in summaries or
histories. This is the format:
{
'_type': 'data-frame',
# Magic field that indicates that this object is a data frame as
# opposed to a normal dictionary or anything else.
'id': 'asdf',
# ID for the data frame that is unique to this Run.
'format': 'parquet',
# The file format in which the data frame is stored. Currently can
# only be Parquet.
'project': 'wfeas',
# (Current) name of the project that this Run is in. It'd be
# better to store the project's ID because we know it'll never
# change but we don't have that here. We store this just in
# case because we use the project name in identifiers on the
# back end.
'path': 'media/data_frames/sdlk.parquet',
# Path to the Parquet file in the Run directory.
}
"""
pandas = util.get_module("pandas")
fastparquet = util.get_module("fastparquet")
missing_reqs = []
if not pandas:
missing_reqs.append("pandas")
if not fastparquet:
missing_reqs.append("fastparquet")
if len(missing_reqs) > 0:
raise wandb.Error(
"Failed to save data frame. Please run 'pip install %s'"
% " ".join(missing_reqs)
)
data_frame_id = util.generate_id()
df = df.copy() # we don't want to modify the user's DataFrame instance.
for _, series in df.items():
for i, val in enumerate(series):
if isinstance(val, WBValue):
series.iat[i] = six.text_type(
json.dumps(val_to_json(run, key, val, namespace=step))
)
# We have to call this wandb_run_id because that name is treated specially by
# our filtering code
df["wandb_run_id"] = pandas.Series(
[six.text_type(run.id)] * len(df.index), index=df.index
)
df["wandb_data_frame_id"] = pandas.Series(
[six.text_type(data_frame_id)] * len(df.index), index=df.index
)
frames_dir = os.path.join(run.dir, _DATA_FRAMES_SUBDIR)
util.mkdir_exists_ok(frames_dir)
path = os.path.join(frames_dir, "{}-{}.parquet".format(key, data_frame_id))
fastparquet.write(path, df)
return {
"id": data_frame_id,
"_type": "data-frame",
"format": "parquet",
"project": run.project_name(), # we don't have the project ID here
"entity": run.entity,
"run": run.id,
"path": path,
}
class _ClassesIdType(_dtypes.Type):
name = "classesId"
legacy_names = ["wandb.Classes_id"]
types = [Classes]
def __init__(
self,
classes_obj: Optional[Classes] = None,
valid_ids: Optional["_dtypes.UnionType"] = None,
):
if valid_ids is None:
valid_ids = _dtypes.UnionType()
elif isinstance(valid_ids, list):
valid_ids = _dtypes.UnionType(
[_dtypes.ConstType(item) for item in valid_ids]
)
elif isinstance(valid_ids, _dtypes.UnionType):
valid_ids = valid_ids
else:
raise TypeError("valid_ids must be None, list, or UnionType")
if classes_obj is None:
classes_obj = Classes(
[
{"id": _id.params["val"], "name": str(_id.params["val"])}
for _id in valid_ids.params["allowed_types"]
]
)
elif not isinstance(classes_obj, Classes):
raise TypeError("valid_ids must be None, or instance of Classes")
else:
valid_ids = _dtypes.UnionType(
[
_dtypes.ConstType(class_obj["id"])
for class_obj in classes_obj._class_set
]
)
self.wb_classes_obj_ref = classes_obj
self.params.update({"valid_ids": valid_ids})
def assign(self, py_obj: Optional[Any] = None) -> "_dtypes.Type":
return self.assign_type(_dtypes.ConstType(py_obj))
def assign_type(self, wb_type: "_dtypes.Type") -> "_dtypes.Type":
valid_ids = self.params["valid_ids"].assign_type(wb_type)
if not isinstance(valid_ids, _dtypes.InvalidType):
return self
return _dtypes.InvalidType()
@classmethod
def from_obj(cls, py_obj: Optional[Any] = None) -> "_dtypes.Type":
return cls(py_obj)
def to_json(self, artifact: Optional["LocalArtifact"] = None) -> Dict[str, Any]:
cl_dict = super(_ClassesIdType, self).to_json(artifact)
# TODO (tss): Refactor this block with the similar one in wandb.Image.
# This is a bit of a smell that the classes object does not follow
# the same file-pattern as other media types.
if artifact is not None:
class_name = os.path.join("media", "cls")
classes_entry = artifact.add(self.wb_classes_obj_ref, class_name)
cl_dict["params"]["classes_obj"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest, # is this needed really?
}
else:
cl_dict["params"]["classes_obj"] = self.wb_classes_obj_ref.to_json(artifact)
return cl_dict
@classmethod
def from_json(
cls, json_dict: Dict[str, Any], artifact: Optional["PublicArtifact"] = None,
) -> "_dtypes.Type":
classes_obj = None
if (
json_dict.get("params", {}).get("classes_obj", {}).get("type")
== "classes-file"
):
if artifact is not None:
classes_obj = artifact.get(
json_dict.get("params", {}).get("classes_obj", {}).get("path")
)
else:
raise RuntimeError("Expected artifact to be non-null.")
else:
classes_obj = Classes.from_json(
json_dict["params"]["classes_obj"], artifact
)
return cls(classes_obj)
class _VideoFileType(_dtypes.Type):
name = "video-file"
types = [Video]
class _HtmlFileType(_dtypes.Type):
name = "html-file"
types = [Html]
class _Object3DFileType(_dtypes.Type):
name = "object3D-file"
types = [Object3D]
_dtypes.TypeRegistry.add(_ClassesIdType)
_dtypes.TypeRegistry.add(_VideoFileType)
_dtypes.TypeRegistry.add(_HtmlFileType)
_dtypes.TypeRegistry.add(_Object3DFileType)
__all__ = [
"Histogram",
"Object3D",
"Molecule",
"Html",
"Video",
"ImageMask",
"BoundingBoxes2D",
"Classes",
"Image",
"Plotly",
"history_dict_to_json",
"val_to_json",
]
| 38.166168
| 131
| 0.572691
|
6b15151dd779517b88b70fb5e81b165f5547bb68
| 5,016
|
py
|
Python
|
src/python/pants/init/target_roots_calculator.py
|
SergeKireev/pants
|
cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541
|
[
"Apache-2.0"
] | 1
|
2020-08-26T03:30:31.000Z
|
2020-08-26T03:30:31.000Z
|
src/python/pants/init/target_roots_calculator.py
|
SergeKireev/pants
|
cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541
|
[
"Apache-2.0"
] | 1
|
2019-07-29T16:58:21.000Z
|
2019-07-29T16:58:21.000Z
|
src/python/pants/init/target_roots_calculator.py
|
SergeKireev/pants
|
cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.specs import SingleAddress, Specs
from pants.base.target_roots import TargetRoots
from pants.engine.addressable import BuildFileAddresses
from pants.engine.legacy.graph import OwnersRequest
from pants.goal.workspace import ScmWorkspace
from pants.scm.subsystems.changed import ChangedRequest
logger = logging.getLogger(__name__)
class InvalidSpecConstraint(Exception):
"""Raised when invalid constraints are given via target specs and arguments like --changed*."""
class TargetRootsCalculator:
"""Determines the target roots for a given pants run."""
@classmethod
def parse_specs(cls, target_specs, build_root=None, exclude_patterns=None, tags=None):
"""Parse string specs into unique `Spec` objects.
:param iterable target_specs: An iterable of string specs.
:param string build_root: The path to the build root.
:returns: A `Specs` object.
"""
build_root = build_root or get_buildroot()
spec_parser = CmdLineSpecParser(build_root)
dependencies = tuple(OrderedSet(spec_parser.parse_spec(spec_str) for spec_str in target_specs))
return Specs(
dependencies=dependencies,
exclude_patterns=exclude_patterns if exclude_patterns else tuple(),
tags=tags)
@classmethod
def changed_files(cls, scm, changes_since=None, diffspec=None):
"""Determines the files changed according to SCM/workspace and options."""
workspace = ScmWorkspace(scm)
if diffspec:
return workspace.changes_in(diffspec)
changes_since = changes_since or scm.current_rev_identifier()
return workspace.touched_files(changes_since)
@classmethod
def create(cls, options, session, build_root=None, exclude_patterns=None, tags=None):
"""
:param Options options: An `Options` instance to use.
:param session: The Scheduler session
:param string build_root: The build root.
"""
# Determine the literal target roots.
spec_roots = cls.parse_specs(
target_specs=options.target_specs,
build_root=build_root,
exclude_patterns=exclude_patterns,
tags=tags)
# Determine `Changed` arguments directly from options to support pre-`Subsystem`
# initialization paths.
changed_options = options.for_scope('changed')
changed_request = ChangedRequest.from_options(changed_options)
# Determine the `--owner-of=` arguments provided from the global options
owned_files = options.for_global_scope().owner_of
logger.debug('spec_roots are: %s', spec_roots)
logger.debug('changed_request is: %s', changed_request)
logger.debug('owned_files are: %s', owned_files)
targets_specified = sum(1 for item
in (changed_request.is_actionable(), owned_files, spec_roots.dependencies)
if item)
if targets_specified > 1:
# We've been provided more than one of: a change request, an owner request, or spec roots.
raise InvalidSpecConstraint(
'Multiple target selection methods provided. Please use only one of '
'--changed-*, --owner-of, or target specs'
)
if changed_request.is_actionable():
scm = get_scm()
if not scm:
raise InvalidSpecConstraint(
'The --changed-* options are not available without a recognized SCM (usually git).'
)
changed_files = cls.changed_files(
scm,
changes_since=changed_request.changes_since,
diffspec=changed_request.diffspec)
# We've been provided no spec roots (e.g. `./pants list`) AND a changed request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(changed_files),
include_dependees=str(changed_request.include_dependees))
changed_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('changed addresses: %s', changed_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in changed_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
if owned_files:
# We've been provided no spec roots (e.g. `./pants list`) AND a owner request. Compute
# alternate target roots.
request = OwnersRequest(sources=tuple(owned_files), include_dependees=str('none'))
owner_addresses, = session.product_request(BuildFileAddresses, [request])
logger.debug('owner addresses: %s', owner_addresses)
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in owner_addresses)
return TargetRoots(Specs(dependencies=dependencies, exclude_patterns=exclude_patterns, tags=tags))
return TargetRoots(spec_roots)
| 41.8
| 104
| 0.730662
|
4eb897de3f2d35893003ebbe213f5293adff5518
| 12,796
|
py
|
Python
|
crossword-gen.py
|
DillionLeeL/crossword-gen
|
58ee4aad86d8e7bdcfc6fb3db7174f5f21c42c19
|
[
"MIT"
] | null | null | null |
crossword-gen.py
|
DillionLeeL/crossword-gen
|
58ee4aad86d8e7bdcfc6fb3db7174f5f21c42c19
|
[
"MIT"
] | null | null | null |
crossword-gen.py
|
DillionLeeL/crossword-gen
|
58ee4aad86d8e7bdcfc6fb3db7174f5f21c42c19
|
[
"MIT"
] | null | null | null |
import random
import re
import string
from copy import deepcopy
import time
import argparse
import api_requests
start_time = time.time()
# ---------------Classes---------------
class Coord(object):
def __init__(self, start, end, vert, num):
self.start = start # tuple coordinate
self.end = end # tuple coordinate
self.length = (end[0] - start[0]) + (end[1] - start[1]) + 1
self.vertical = vert
self.number = num
self.collisions =[]
def __str__(self):
return str(self.start)+ str(self.end)+ " "+str(self.number)+" "+self.get_direction()
def get_direction(self):
if self.vertical:
return "down"
else:
return "across"
def get_number(self):
return self.number
class Word(object):
def __init__(self, coord, word_tuple):
self.coord = coord
self.text = word_tuple[0]
self.clue = word_tuple[1]
def __str__(self):
return self.text+"-"+self.clue
class Board(object):
def __init__(self, shape, debug=False):
self.shape = shape # board layout with symbols
self.generated = shape # board with words
self.coords = []
self.width = len(shape[0])
self.height = len(shape)
self.collision_list = []
self.debug = debug
self.iterations = 0 # how many recursions were needed to build
self.wordlist = []
def add_coord(self, coord):
self.coords.append(coord)
def get_coords(self):
return self.coords
def print_coords(self):
for coord in self.coords:
print(coord.start, coord.end, coord.length, coord.number, coord.get_direction())
for ccoord in coord.collisions:
print(ccoord)
def print_board(self):
for row in self.shape:
print("|",end="")
for item in row:
if str(item)[0]=="#":
print(" ",end="|")
else:
print("-", end='|')
print()
print()
def print_solution(self):
#print the board
for row in self.generated:
print("|",end="")
for item in row:
if str(item)[0]=="#":
print(" ",end="|")
else:
print(str(item)[0], end='|')
print()
print()
# print words
for word in self.wordlist[::-1]:
print(word.coord.get_number(), word.coord.get_direction(), "-", word.text)
def print_clues(self):
for word in self.wordlist[::-1]:
print(word.coord.get_number(), word.coord.get_direction(), "-", len(word.text), "letters -",word.clue)
def print_iterations(self):
if self.debug:
print("Number of iterations:",self.iterations)
else:
print("Run in debug mode to track iterations")
def print_complexity(self):
# divide by two because each coord saves its collision with each other ex: 1->2, 2->1
print("Complexity of this crossword:", int(sum(len(coord.collisions) for coord in self.coords)/2)+1)
# ---------------FUNCTIONS---------------
def generate_coordinates(crossword):
num_rows = crossword.height
num_cols = crossword.width
horizontal_num = 1
vertical_num = 1
# find coords by row
for r in range(num_rows):
start = None
end = None
for c in range(num_cols):
# start a new word
if crossword.shape[r][c]=='-' and not start:
# it isn't just one character
if c+1 < num_cols and crossword.shape[r][c+1] != '#':
start = (r,c)
# place the horizontal number to be used for collision checking
crossword.shape[r][c]=horizontal_num
else:
continue
# continue word
if start and not end:
crossword.shape[r][c]=horizontal_num
# This is the last letter
if (c+1==num_cols or crossword.shape[r][c+1]=='#'):
end = (r,c)
h_coord = Coord(start, end, False, horizontal_num)
crossword.add_coord(h_coord)
horizontal_num+=1
start = None
end = None
continue
# find coords by col
for c in range(num_cols):
start = None
end = None
for r in range(num_rows):
# start a new word, either the across number or '-'
if (isinstance(crossword.shape[r][c], int) or crossword.shape[r][c]=='-') and not start:
# it isn't just one character
if r+1 < num_rows and crossword.shape[r+1][c] != '#':
start = (r,c)
else:
continue
# continue word
if start and not end:
# This is the last letter
if (r+1==num_rows or crossword.shape[r+1][c]=='#'):
end = (r,c)
v_coord = Coord(start, end, True, vertical_num)
# collisions
for x in range(start[0],end[0]+1):
# if the number of a row is found (ie collision) add to collision list
if (isinstance(crossword.shape[x][c], int)):
crossword.coords[crossword.shape[x][c]-1].collisions.append(v_coord)
v_coord.collisions.append(crossword.coords[crossword.shape[x][c]-1])
crossword.add_coord(v_coord)
vertical_num+=1
start = None
end = None
continue
# recursively gets collisions, typical usage starts at coords[0]
def generate_collisions(board, coord):
if coord not in board.collision_list:
if board.debug:
print("generating ",coord)
board.collision_list.append(coord)
for coll in coord.collisions:
generate_collisions(board,coll)
else:
return
def generate_crossword(board, wordlist):
board.print_board()
generate_coordinates(board)
generate_collisions(board, board.coords[0])
colls = board.collision_list
if not find_and_place(board,board,colls,wordlist):
print("Could not generate a crossword with the given wordlist")
exit()
board.print_clues()
if board.debug:
board.print_complexity()
return board
# recursively places words down until no more collision exist
def find_and_place(board, new_board, collision_list, wordlist):
if board.debug:
board.iterations +=1
if len(collision_list) > 0:
print("START:",collision_list[0],"LENGTH:",len(collision_list))
found = False
copy = deepcopy(new_board)
# we handled all collisions
if collision_list==[]:
board.generated = new_board.generated
return True
constraints = ""
# only add the first character of the constraint in the case that it is a two+ digit number
# this number is the across word number that was used to make the collision list in generate_coordinates()
for x in range(collision_list[0].length):
if collision_list[0].vertical:
constraints += str(copy.generated[collision_list[0].start[0]+x][collision_list[0].start[1]])[0]
else:
constraints += str(copy.generated[collision_list[0].start[0]][collision_list[0].start[1]+x])[0]
# Generate regex from the constraints
# example1: 1236 -> .... example2: 1CO9T -> .CO.T
regex = ""
for char in constraints:
if char in string.ascii_lowercase:
regex+=char
else:
regex+="."
if board.debug:
print("constraints are:", constraints,"->",regex, "Iteration:", board.iterations)
found_matches = [word for word in wordlist[len(constraints)-2] if re.match(regex, word[0]) is not None]
random.shuffle(found_matches)
for word in found_matches:
# place the word down
for x in range(collision_list[0].length):
if collision_list[0].vertical:
copy.generated[collision_list[0].start[0]+x][collision_list[0].start[1]] = word[0][x]
else:
copy.generated[collision_list[0].start[0]][collision_list[0].start[1]+x] = word[0][x]
if board.debug:
copy.print_solution()
# pass this iteration on and try to fit the next collision
found = find_and_place(board,copy,collision_list[1:], wordlist)
# All iterations down this path worked, so confirm the word
if found:
fitting_word = Word(collision_list[0], word)
board.wordlist.append(fitting_word)
return True
elif board.debug:
print("Returning False for:",word)
return False
# function to import wordlist from file
def import_words(filename, wordlist, has_definitions):
try:
f = open(filename, "r", encoding="utf-8")
lines = f.readlines()
if has_definitions:
for word, definition in zip(lines[0::2], lines[1::2]):
if len(word) <2 or len(word) > 20:
continue
wordlist[len(word.strip())-2].append((word.strip().lower(),definition.strip()))
else:
definition = "Testing values"
for word in lines:
if len(word) <2 or len(word) > 20:
continue
wordlist[len(word.strip())-2].append((word.strip().lower,definition))
f.close()
except Exception as e:
raise SystemExit(e)
# function to check import shape of crossword puzzle from file
def import_shape(filename):
try:
with open(filename, encoding="utf-8") as f:
lines = [line.rstrip() for line in f]
if len(lines) < 2 or is_jagged(lines):
print("The Puzzle is incorrectly drawn.")
exit()
if len(lines) > 50:
print("Warning: Puzzles this large may take a very, VERY, long time to finish.")
shape = [[] for x in range(len(lines))]
for x in range(len(lines)):
for char in lines[x]:
#TODO: symbol checking
shape[x].append(char)
return shape
except Exception as e:
print(e)
def is_jagged(twod_list):
try:
for row in twod_list:
if len(row.strip()) != len(twod_list[0].strip()):
return True
return False
except Exception as e:
raise SystemExit(e)
def merge_lists(l1, l2):
return [x+y for x,y in zip(l1,l2)]
def main(args):
# have the shape and either a wordbank or use the API
if (args.s and (args.w or args.xword is not None)):
shapefile = args.s
has_definitions = args.no_defs
debug_mode = args.debug
else:
print("Please include files. See '--help'")
exit()
# Wordlist is a 2d list of tuples
# 0 index is two letter words
wordlist = [[] for x in range(20)]
# word list from file
if args.w:
wordfile = args.w
import_words(wordfile, wordlist, has_definitions)
# word list from xword info API
# start/end, just start, random
if args.xword is not None:
if len(args.xword) > 2:
end = args.xword[2]
else:
end = None
if len(args.xword) > 1:
start = args.xword[1]
else:
start = "random"
xlist = api_requests.xword_get_words(start, end)
wordlist = merge_lists(wordlist, xlist)
shape = import_shape(shapefile)
crossword = Board(shape, debug_mode)
generate_crossword(crossword, wordlist)
crossword.print_solution()
print("Time taken:",time.time() - start_time)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--s', type=str, help="File to import shape from")
parser.add_argument('--w', type=str, help="File to import wordbank from")
parser.add_argument('--no_defs', action='store_false', help="Wordbank does not include definitions")
parser.add_argument('--debug', action='store_true', help="Print additional information, including iterations")
parser.add_argument('--xword', nargs='*', type=str, help="Import words from the Xcode Info API. You can put \
a starting/ending date in the format YYYY/MM/DD or just a starting date to get that day + ten days after")
args = parser.parse_args()
main(args)
| 34.031915
| 114
| 0.564708
|
3b1fad3d2ab5564eb0ec6fa295a897f91a805944
| 3,317
|
py
|
Python
|
pytorch_toolkit/instance_segmentation/segmentoly/rcnn/model_zoo/resnet_panet_mask_rcnn.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 3
|
2020-12-29T02:47:32.000Z
|
2021-11-12T08:12:51.000Z
|
pytorch_toolkit/instance_segmentation/segmentoly/rcnn/model_zoo/resnet_panet_mask_rcnn.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 28
|
2020-09-25T22:40:36.000Z
|
2022-03-12T00:37:36.000Z
|
pytorch_toolkit/instance_segmentation/segmentoly/rcnn/model_zoo/resnet_panet_mask_rcnn.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 1
|
2021-03-12T10:08:44.000Z
|
2021-03-12T10:08:44.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
from .panet_mask_rcnn_base import PANetMaskRCNN
from ..backbones.resnet import ResNet
from ..panet import BboxHead as PANetBboxHead
class ResNeXt101PANetMaskRCNN(PANetMaskRCNN):
def __init__(self, cls_num, force_max_output_size=False, heavier_head=False, deformable_conv=True, **kwargs):
backbone = ResNet(base_arch='ResNet101', num_groups=32, width_per_group=8)
backbone.freeze_stages_params(range(2))
backbone.freeze_stages_bns(range(5))
backbone.set_output_stages((1, 2, 3, 4))
super().__init__(cls_num, backbone, force_max_output_size=force_max_output_size,
heavier_head=heavier_head,
deformable_conv=deformable_conv, **kwargs)
self.mask_head = self.add_segmentation_head(self.bupa.dims_out, self.cls_num, afp_levels_num=4,
fully_connected_fusion=True, group_norm=True)
self.detection_head = self.BboxHead(self.bupa.dims_out[0], 1024, PANetMaskRCNN.detection_roi_featuremap_resolution,
self.cls_num,
cls_agnostic_bbox_regression=False,
afp_levels_num=4,
heavier_head=heavier_head, group_norm=False)
class BboxHead(PANetBboxHead):
"""BboxHead from PANet without ReLu after fc1"""
def __init__(self, dim_in, dim_out, resolution_in, cls_num, cls_agnostic_bbox_regression=False,
afp_levels_num=4, heavier_head=False, conv_head_dim=256, num_convs=4,
group_norm=False):
super().__init__(dim_in, dim_out, resolution_in, cls_num, cls_agnostic_bbox_regression,
afp_levels_num, heavier_head, conv_head_dim, num_convs, group_norm)
def forward(self, x):
batch_size = int(x[0].shape[0])
for i in range(self.levels_num):
if self.heavier_head:
y = self.fc1[i](x[i])
else:
y = self.fc1[i](x[i].view(batch_size, -1))
if i == 0:
pooled_feature = y
else:
pooled_feature = torch.max(pooled_feature, y)
x = self.fc2(pooled_feature)
if self.heavier_head:
x = nn.functional.relu(self.fc(x.view(batch_size, -1)), inplace=True)
cls_score = self.cls_score(x)
if not self.training:
cls_score = nn.functional.softmax(cls_score, dim=1)
bbox_pred = self.bbox_pred(x)
return cls_score, bbox_pred
| 44.824324
| 123
| 0.622249
|
3fc3323c2029f01ff00c875c237c5d2fd3ad5385
| 20,361
|
py
|
Python
|
sympy/polys/polyoptions.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | 2
|
2015-05-11T12:26:38.000Z
|
2016-08-19T00:11:03.000Z
|
sympy/polys/polyoptions.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/polys/polyoptions.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Options manager for :class:`Poly` and public API functions. """
from sympy.core import S, Basic, sympify
from sympy.utilities import numbered_symbols, topological_sort
from sympy.polys.polyerrors import (
GeneratorsError,
OptionError,
FlagError,
)
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
* Repr --- option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError("both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.iteritems():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).iteritems():
if key in self:
del defaults[key]
else:
for option in self.keys():
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.iteritems():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError("cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.iteritems():
obj[option] = value
for option, value in updates.iteritems():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.iteritems():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.iteritems():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.iteritems():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption):
"""``expand`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(Option):
"""``gens`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif len(set(gens)) != len(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(Option):
"""``wrt`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option):
"""``sort`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option):
"""``order`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
return sympy.polys.monomialtools.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.monomialtools.monomial_key(order)
class Field(BooleanOption):
"""``field`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption):
"""``greedy`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(BooleanOption):
""" """
__metaclass__ = OptionType
option = 'composite'
@classmethod
def default(cls):
return True
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(Option):
"""``domain`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
_re_finitefield = re.compile("^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile("^(Z|ZZ|Q|QQ)\[(.+)\]$")
_re_fraction = re.compile("^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile("^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if not isinstance(domain, str):
return domain
else:
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain in ['R', 'RR']:
return sympy.polys.domains.RR
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = map(sympify, gens.split(','))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
else:
return sympy.polys.domains.QQ.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = map(sympify, gens.split(','))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = map(sympify, r.groups()[1].split(','))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].gens) & set(options['gens'])):
raise GeneratorsError("ground domain and generators interferes together")
class Split(BooleanOption):
"""``split`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption):
"""``gaussian`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(Option):
"""``extension`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus', 'symmetric']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(*options['extension'])
class Modulus(Option):
"""``modulus`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError("'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(BooleanOption):
"""``symmetric`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(BooleanOption):
"""``strict`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'strict'
@classmethod
def default(cls):
return True
class Repr(Option):
"""``repr`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'repr'
@classmethod
def default(cls):
return sympy.polys.densepolys.DensePoly
@classmethod
def preprocess(cls, repr):
if isinstance(repr, str):
if repr == 'sparse':
return sympy.polys.sparsepolys.SparsePoly
elif repr == 'dense':
return sympy.polys.densepolys.DensePoly
else:
raise OptionError("'%s' is not a valid value 'repr' option" % repr)
elif isinstance(repr, sympy.polys.polyclasses.GenericPoly):
return repr
else:
raise OptionError("'repr' must a string or a class, got %s" % repr)
class Auto(BooleanOption, Flag):
"""``auto`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag):
"""``auto`` option to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag):
"""``formal`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag):
"""``polys`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'polys'
class Include(BooleanOption, Flag):
"""``include`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag):
"""``all`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag):
"""``gen`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag):
"""``symbols`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
class Method(Flag):
"""``method`` flag to polynomial manipulation functions. """
__metaclass__ = OptionType
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.iterkeys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError("'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| 26.442857
| 114
| 0.577477
|
457763dd9f6f8f8fcb7e3f7183d20cbc367f073a
| 3,636
|
py
|
Python
|
bread/contrib/reports/models.py
|
basxsoftwareassociation/bread
|
062ec82a565f81eff51ea91dc0f211139bc7fe96
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 13
|
2021-01-19T08:28:08.000Z
|
2022-01-28T03:44:34.000Z
|
bread/contrib/reports/models.py
|
basxsoftwareassociation/bread
|
062ec82a565f81eff51ea91dc0f211139bc7fe96
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 80
|
2020-12-07T04:37:39.000Z
|
2022-03-08T14:42:13.000Z
|
bread/contrib/reports/models.py
|
basxsoftwareassociation/bread
|
062ec82a565f81eff51ea91dc0f211139bc7fe96
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-12-03T15:06:53.000Z
|
2021-03-16T03:47:29.000Z
|
import htmlgenerator as hg
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import gettext_lazy as _
from bread import layout
from ...layout.components.datatable import DataTableColumn
from .fields.queryfield import QuerysetField, parsequeryexpression
def available_report_filters(modelfield, request, report):
from django.conf import settings
return tuple((i, i) for i in getattr(settings, "REPORT_FILTERS", {}).keys())
class Report(models.Model):
created = models.DateField(_("Created"), auto_now_add=True)
name = models.CharField(_("Name"), max_length=255)
model = models.ForeignKey(
ContentType,
on_delete=models.PROTECT,
)
model.verbose_name = _("Model")
filter = QuerysetField(_("Filter"), modelfieldname="model")
custom_queryset = models.CharField(
_("Custom Filter"),
max_length=255,
help_text=_(
"Key in 'settings.REPORT_FILTERS' must be a function returning a queryset"
),
blank=True,
)
custom_queryset.lazy_choices = available_report_filters
@property
def preview(self):
columns = []
for column in self.columns.all():
columns.append(
DataTableColumn(column.name, layout.FC(f"row.{column.column}"))
)
qs = self.queryset
if qs is None:
return hg.BaseElement("Model does no longer exists!")
return hg.BaseElement(
hg.H3(_("Preview")),
layout.datatable.DataTable.from_queryset(
qs[:25], columns=columns, primary_button=""
),
)
@property
def queryset(self):
if self.custom_queryset and self.custom_queryset in getattr(
settings, "REPORT_FILTERS", {}
):
# do not check whether the settings exists, an exception can be raised automatically
ret = getattr(settings, "REPORT_FILTERS", {}).get(
self.custom_queryset, lambda model: model.objects.none()
)(self.model.model_class())
if not isinstance(ret, models.QuerySet):
raise ValueError(
_(
'settings.REPORT_FILTERS["%s"] did not return a queryset but returned: %s'
)
% (self.custom_queryset, ret)
)
if ret.model != self.model.model_class():
raise ValueError(
_(
'settings.REPORT_FILTERS["%s"] did not return a queryset for %s but for %s'
)
% (self.custom_queryset, self.model.model_class(), ret.model)
)
return parsequeryexpression(ret, self.filter.raw).queryset
return self.filter.queryset
def __str__(self):
return self.name
class Meta:
verbose_name = _("Report")
verbose_name_plural = _("Reports")
ordering = ["name"]
class ReportColumn(models.Model):
AGGREGATIONS = {
"count": "",
"sum": "",
}
report = models.ForeignKey(Report, on_delete=models.CASCADE, related_name="columns")
column = models.CharField(_("Column"), max_length=255)
name = models.CharField(_("Name"), max_length=255)
aggregation = models.CharField(
_("Aggregation"), max_length=64, choices=tuple(AGGREGATIONS.items()), blank=True
)
class Meta:
verbose_name = _("Column")
verbose_name_plural = _("Columns")
order_with_respect_to = "report"
| 33.981308
| 99
| 0.606161
|
16864db703e37581d3a447d4df7d78cce0e2aeeb
| 2,131
|
py
|
Python
|
driver/excel.py
|
efineshi/medicine_number_count
|
2792b15b78c855fec2af4078b0b97d28e4d4d648
|
[
"Apache-2.0"
] | null | null | null |
driver/excel.py
|
efineshi/medicine_number_count
|
2792b15b78c855fec2af4078b0b97d28e4d4d648
|
[
"Apache-2.0"
] | null | null | null |
driver/excel.py
|
efineshi/medicine_number_count
|
2792b15b78c855fec2af4078b0b97d28e4d4d648
|
[
"Apache-2.0"
] | null | null | null |
import xlrd
from utils import utils
def count_data_from_excel(sheet1,
rows_numbers,
patient_column,
medicine_numbers_column,
project_column,
room_column,
doctor_column):
room_dict = {}
for row in range(rows_numbers):
if row == 0:
continue
# 获取医生开药数量
doctor_medicine_numbers = float(sheet1.row_values(row)[medicine_numbers_column])
# 获取本条数据病人姓名
patient_name = sheet1.row_values(row)[patient_column]
# 获取本条数据医生姓名
doctor_name = sheet1.row_values(row)[doctor_column]
# 获取药房名称
room = sheet1.row_values(row)[room_column]
# 获取科室名称
project = sheet1.row_values(row)[project_column]
# 判断药房是否存在
room_dict = utils.count_room(room_dict, room, project, doctor_name, doctor_medicine_numbers, patient_name)
return room_dict
class Data:
def __init__(self, file_path):
# 获取 excel 数据
try:
data = xlrd.open_workbook(file_path)
except Exception:
raise Exception("错误:输入的 excel 文件不存在,或文件格式不为 xls 或 xlsx")
# 获取 sheet1 数据
sheet1 = data.sheets()[0]
# 获取 sheet1 行数
rows_numbers = sheet1.nrows
# 更新实际列
line1_info = sheet1.row_values(0)
line2_info = sheet1.row_values(1)
patient_column, medicine_column, medicine_numbers_column, \
project_column, room_column, doctor_column = utils.get_data_column(line1_info)
self.medicine_info, self.medicine_name = utils.get_medicine_info(line2_info, medicine_column)
self.room_dict = count_data_from_excel(sheet1,
rows_numbers,
patient_column,
medicine_numbers_column,
project_column,
room_column,
doctor_column)
| 38.745455
| 114
| 0.544815
|
2abf069f89cfcfe0506375aafa285f2e86cc7654
| 3,329
|
py
|
Python
|
about/views.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | null | null | null |
about/views.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | 1
|
2022-02-17T20:28:19.000Z
|
2022-02-17T20:28:19.000Z
|
about/views.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.files.storage import default_storage
from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from .forms import ImageForm
@login_required
def upload_file(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, "Das Gruppenbild wurde aktualisiert!")
return HttpResponseRedirect(reverse('about:index'))
else:
form = ImageForm()
return render(request, 'about/upload.html', {'form': form})
class AboutView(generic.TemplateView):
template_name = 'about/index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['image'] = default_storage.url('about/team.jpeg') if default_storage.exists('about/team.jpeg') else None
context['text'] = """
Wenn wir uns gerade mal nicht mit den Professorinnen und Professoren um bessere Noten für uns selbst streiten, kämpfen
wir als Fachschaftsrat mit Händen und Füßen jeden Tag dafür die
[Otto-von-Guericke-Universität Magdeburg](https://www.ovgu.de) und unsere
[Fakultät für Maschinenbau](https://www.fmb.ovgu.de) ein kleines Stück besser zu machen.
Wir sind das offizielle Vertretungsgremium der Studierenden an der Fakultät für Maschinenbau. Das heißt wir setzen uns
dafür ein, dass eure Vorschläge und auch eure Kritik an die Fakultät herangetragen wird. Die weiteren Aufgaben, die wir
bewältigen, sind in unserer Satzung für euch alle online nachzulesen.
Wie versuchen uns kontinuierlich zu verbessern und euch Informationen über Neuerungen an der Universität leichter
zugänglich zu machen. Außerdem setzen wir uns dafür ein euch eine einfachere Kommunikation mit den Professoren zu
ermöglichen und euch bessere und wechselnde Veranstaltungen zu bieten.
Um dies kontinuierlich durchsetzen zu können brauchen wir auch immer neuen Input und freuen uns somit über neue
Mitglieder, die neue Ansichten und Dynamiken bei uns in den Fachschaftsrat bringen.
\#jointhecrew
---
_English_
When we are not fighting with the professors for better grades for ourselves, we as the student council fight tooth and
nail every day to make [Otto von Guericke University Magdeburg](https://www.ovgu.de/en/) and our
[Faculty of Mechanical Engineering](https://www.fmb.ovgu.de/en/) a little bit better.
We are the official representative body of the students at the Faculty of Mechanical Engineering. This means that we
make sure that your suggestions and also your criticism are brought to the attention of the faculty. The other tasks we
deal with can be read online in our constitution.
We try to improve ourselves continuously and to make information about innovations at the university more accessible to
you. We are also committed to making it easier for you to communicate with your professors and to offer you better and
more varied events.
In order to be able to implement this continuously, we always need new input and are therefore happy to welcome new
members who bring new views and dynamics to the student council.
\#jointhecrew
"""
return context
| 46.236111
| 120
| 0.775008
|
baaf45a77dd2cbb2c625585d55d61a71dcb67134
| 280
|
py
|
Python
|
appengine/chrome_infra_packages/cipd/__init__.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 1
|
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/chrome_infra_packages/cipd/__init__.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chrome_infra_packages/cipd/__init__.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .api import PackageRepositoryApi
from .handlers import get_frontend_routes
from .impl import get_backend_routes
| 35
| 72
| 0.810714
|
f532f59fe3e5c5342dce54b4d97ea3df2de48774
| 926
|
py
|
Python
|
data/groups.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T16:10:36.000Z
|
2020-05-05T16:10:36.000Z
|
data/groups.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | null | null | null |
data/groups.py
|
lev2454/VGA-Web-Edition
|
4d8fb7b93373ee00fb78889cab2213aaa5a4cdc9
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import sqlalchemy
from .db_session import SqlAlchemyBase
from sqlalchemy_serializer import SerializerMixin
class Group(SqlAlchemyBase, SerializerMixin):
__tablename__ = 'groups'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
screen_name = sqlalchemy.Column(sqlalchemy.String, index=True)
description = sqlalchemy.Column(sqlalchemy.String, nullable=True)
is_closed = sqlalchemy.Column(sqlalchemy.Boolean)
deactivated = sqlalchemy.Column(sqlalchemy.String, nullable=True)
city = sqlalchemy.Column(sqlalchemy.String, nullable=True)
country = sqlalchemy.Column(sqlalchemy.String, nullable=True)
icon = sqlalchemy.Column(sqlalchemy.String, nullable=True)
update_time = sqlalchemy.Column(sqlalchemy.DateTime, nullable=True)
added_date = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.datetime.now)
| 44.095238
| 86
| 0.790497
|
202961fd5a09c6cbdf1b1b9e23a566564e050d99
| 3,022
|
py
|
Python
|
macropy/test/tracing.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 2,061
|
2015-01-02T16:53:18.000Z
|
2022-03-31T12:01:07.000Z
|
macropy/test/tracing.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 41
|
2015-02-25T02:54:46.000Z
|
2022-01-28T19:08:45.000Z
|
macropy/test/tracing.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 151
|
2015-01-01T22:07:55.000Z
|
2022-03-03T07:55:20.000Z
|
import ast
import unittest
from macropy.tracing import macros, trace, log, require, show_expanded
from macropy.core.quotes import macros, q
result = []
def log(x):
result.append(x)
class Tests(unittest.TestCase):
def test_basic(self):
log[1 + 2]
log["omg" * 3]
assert(result[-2:] == [
"1 + 2 -> 3",
"\"omg\" * 3 -> 'omgomgomg'"
])
def test_combo(self):
trace[1 + 2 + 3 + 4]
self.assertEqual(result[-3:], [
"1 + 2 -> 3",
"1 + 2 + 3 -> 6",
"1 + 2 + 3 + 4 -> 10"
])
def test_fancy(self):
trace[[len(x)*3 for x in ['omg', 'wtf', 'b' * 2 + 'q', 'lo' * 3 + 'l']]]
assert(result[-14:] == [
"'b' * 2 -> 'bb'",
"'b' * 2 + 'q' -> 'bbq'",
"'lo' * 3 -> 'lololo'",
"'lo' * 3 + 'l' -> 'lololol'",
"['omg', 'wtf', 'b' * 2 + 'q', 'lo' * 3 + 'l'] -> ['omg', 'wtf', 'bbq', 'lololol']",
"len(x) -> 3",
"len(x)*3 -> 9",
"len(x) -> 3",
"len(x)*3 -> 9",
"len(x) -> 3",
"len(x)*3 -> 9",
"len(x) -> 7",
"len(x)*3 -> 21",
"[len(x)*3 for x in ['omg', 'wtf', 'b' * 2 + 'q', 'lo' * 3 + 'l']] -> [9, 9, 9, 21]"
])
def test_function_call(self):
trace[sum([sum([1, 2, 3]), min(4, 5, 6), max(7, 8, 9)])]
assert(result[-5:] == [
"sum([1, 2, 3]) -> 6",
"min(4, 5, 6) -> 4",
"max(7, 8, 9) -> 9",
"[sum([1, 2, 3]), min(4, 5, 6), max(7, 8, 9)] -> [6, 4, 9]",
"sum([sum([1, 2, 3]), min(4, 5, 6), max(7, 8, 9)]) -> 19"
])
def test_require(self):
with self.assertRaises(AssertionError) as cm:
require[1 == 10]
assert str(cm.exception) == "Require Failed\n1 == 10 -> False"
require[1 == 1]
with self.assertRaises(AssertionError) as cm:
require[3**2 + 4**2 != 5**2]
require[3**2 + 4**2 == 5**2]
def test_require_block(self):
with self.assertRaises(AssertionError) as cm:
a = 10
b = 2
with require:
a > 5
a * b == 20
a < 2
assert str(cm.exception) == "Require Failed\na < 2 -> False"
def test_show_expanded(self):
from macropy.core import ast_repr
show_expanded[q[1 + 2]]
assert ("ast.BinOp(left=ast.Num(n=1), op=ast.Add(), "
"right=ast.Num(n=2))" in result[-1])
with show_expanded:
a = 1
b = 2
with q as code:
return(a + u[b + 1])
assert result[-3] == '\na = 1'
assert result[-2] == '\nb = 2'
self.assertEqual("\ncode = [ast.Return(value=ast.BinOp("
"left=ast.Name(id='a'"", ctx=ast.Load()), "
"op=ast.Add(), right=ast_repr((b + 1))))]",
result[-1])
| 27.472727
| 96
| 0.401721
|
b6c323f2ebcd7e109348472c28ce3eceef56f318
| 5,352
|
py
|
Python
|
nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 9,680
|
2019-05-07T01:42:30.000Z
|
2022-03-31T16:48:33.000Z
|
nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,957
|
2019-05-06T21:44:21.000Z
|
2022-03-31T09:21:53.000Z
|
nni/compression/pytorch/quantization_speedup/frontend_to_onnx.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,571
|
2019-05-07T06:42:55.000Z
|
2022-03-31T03:19:24.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import onnx
import onnx.numpy_helper
"""
The main function of this page is to convert pytorch model to onnx model.
Convertion from pytorch model to onnx model is primary so that a critical
problem is caused that Layer name of pytorch model fail to convert to onnx
layer name directly. To solve it, we wrap pytorch model in new wrapper which
multiply bits number and input before computation of each op. Only in this
way can onnx model get bits number of corresponded layer.
"""
class LayernameModuleWrapper(torch.nn.Module):
def __init__(self, module, module_bits) -> None:
"""
Parameters
----------
module : torch.nn.Module
Layer module of pytorch model
module_bits : int
Bits width setting for module
"""
super().__init__()
self.module = module
self.module_bits = module_bits
def forward(self, inputs):
inputs = inputs*self.module_bits
inputs = self.module(inputs)
return inputs
def _setattr(model, name, module):
"""
Parameters
----------
model : pytorch model
The model to speed up by quantization
name : str
name of pytorch module
module : torch.nn.Module
Layer module of pytorch model
"""
name_list = name.split(".")
for name in name_list[:-1]:
model = getattr(model, name)
setattr(model, name_list[-1], module)
def unwrapper(model_onnx, index2name, config):
"""
Fill onnx config and remove wrapper node in onnx
Parameters
----------
model_onnx : onnx model
Onnx model which is converted from pytorch model
index2name : dict
Dictionary of layer index and name
config : dict
Config recording name of layers and calibration parameters
Returns
-------
onnx model
Onnx model which is converted from pytorch model
dict
The configuration of onnx model layers and calibration parameters
"""
# Support Gemm, Conv, Relu, Clip(Relu6) and Maxpool
support_op = ['Gemm', 'Conv', 'Relu', 'Clip', 'MaxP']
idx = 0
onnx_config = {}
while idx < len(model_onnx.graph.node):
nd = model_onnx.graph.node[idx]
if nd.name[0:4] in support_op and idx > 1:
# Grad constant node and multiply node
const_nd = model_onnx.graph.node[idx-2]
mul_nd = model_onnx.graph.node[idx-1]
# Get index number which is transferred by constant node
index = int(onnx.numpy_helper.to_array(const_nd.attribute[0].t))
if index != -1:
name = index2name[index]
onnx_config[nd.name] = config[name]
nd.input[0] = mul_nd.input[0]
# Remove constant node and multiply node
model_onnx.graph.node.remove(const_nd)
model_onnx.graph.node.remove(mul_nd)
idx = idx-2
idx = idx+1
return model_onnx, onnx_config
def torch_to_onnx(model, config, input_shape, model_path, input_names, output_names):
"""
Convert torch model to onnx model and get layer bits config of onnx model.
Parameters
----------
model : pytorch model
The model to speed up by quantization
config : dict
Config recording bits number and name of layers
input_shape : tuple
The input shape of model, shall pass it to torch.onnx.export
model_path : str
The path user want to store onnx model which is converted from pytorch model
input_names : list
Input name of onnx model providing for torch.onnx.export to generate onnx model
output_name : list
Output name of onnx model providing for torch.onnx.export to generate onnx model
Returns
-------
onnx model
Onnx model which is converted from pytorch model
dict
The configuration of onnx model layers and calibration parameters
"""
# Support Gemm, Conv, Relu, Clip(Relu6) and MaxPool
support_op = [torch.nn.Conv2d, torch.nn.Linear, torch.nn.ReLU, torch.nn.ReLU6, torch.nn.MaxPool2d]
# Transfer bits number to onnx layer by using wrapper
index2name = {}
name2index = {}
if config is not None:
for i, name in enumerate(config.keys()):
index2name[i] = name
name2index[name] = i
for name, module in model.named_modules():
if config is not None and name in config:
assert type(module) in support_op
wrapper_module = LayernameModuleWrapper(module, name2index[name])
_setattr(model, name, wrapper_module)
elif type(module) in support_op:
wrapper_module = LayernameModuleWrapper(module, -1)
_setattr(model, name, wrapper_module)
# Convert torch model to onnx model and save it in model_path
dummy_input = torch.randn(input_shape)
model.to('cpu')
torch.onnx.export(model, dummy_input, model_path, verbose=False, input_names=input_names, output_names=output_names, export_params=True)
# Load onnx model
model_onnx = onnx.load(model_path)
model_onnx, onnx_config = unwrapper(model_onnx, index2name, config)
onnx.save(model_onnx, model_path)
onnx.checker.check_model(model_onnx)
return model_onnx, onnx_config
| 36.162162
| 140
| 0.65639
|
ab9ff66e9f493d07d211027405f1a52974c0a031
| 855
|
py
|
Python
|
src/oscutil/oscplayback.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | 7
|
2015-01-05T06:32:49.000Z
|
2020-10-30T19:29:07.000Z
|
src/oscutil/oscplayback.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | null | null | null |
src/oscutil/oscplayback.py
|
neonkingfr/VizBench
|
e41f559cb6e761d717f2f5b202482d5d8dacd2d8
|
[
"MIT"
] | 4
|
2016-03-09T22:29:26.000Z
|
2021-04-07T13:52:28.000Z
|
import re
from nosuch.oscutil import *
if len(sys.argv) < 3:
print "Usage: oscplayback.py {port@host} {file}"
sys.exit(1)
if __name__ == '__main__':
oscmsg = []
porthost = sys.argv[1]
print("porthost=",porthost)
port = re.compile(".*@").search(porthost).group()[:-1]
host = re.compile("@.*").search(porthost).group()[1:]
filename = sys.argv[2]
f = open(filename)
r = OscRecipient(host,port)
time0 = time.time();
while 1:
line = f.readline()
if not line:
break
if line[0] != '[':
continue
sys.stdout.write("SENDING %s"%line)
oscbundle = eval(line)
tm = oscbundle[0]
while (time.time()-time0) < tm:
time.sleep(0.001)
bundle = createBundle()
n = len(oscbundle)
for i in range(1,n):
m = oscbundle[i]
sys.stdout.write("M = "+str(m))
appendToBundle(bundle,m[0],m[2:])
r.sendbundle(bundle)
f.close()
| 20.357143
| 55
| 0.624561
|
356d3d86ffb6e8573f6cc38d445d6d52fc3075ff
| 90
|
py
|
Python
|
kitchen/check_in/apps.py
|
ZhukovGreen/kitchen-check-in
|
ce5327353cd9db32389da1678a37ac3d647b7c32
|
[
"MIT"
] | null | null | null |
kitchen/check_in/apps.py
|
ZhukovGreen/kitchen-check-in
|
ce5327353cd9db32389da1678a37ac3d647b7c32
|
[
"MIT"
] | null | null | null |
kitchen/check_in/apps.py
|
ZhukovGreen/kitchen-check-in
|
ce5327353cd9db32389da1678a37ac3d647b7c32
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CheckInConfig(AppConfig):
name = 'check_in'
| 15
| 33
| 0.755556
|
51953eec2609308e93a36a2cdf4f2db05b2bc556
| 2,582
|
py
|
Python
|
market/utils/widgets.py
|
katomaso/django-market
|
84c4fa10aefbd792a956cef3d727623ca78cb5fd
|
[
"MIT"
] | null | null | null |
market/utils/widgets.py
|
katomaso/django-market
|
84c4fa10aefbd792a956cef3d727623ca78cb5fd
|
[
"MIT"
] | null | null | null |
market/utils/widgets.py
|
katomaso/django-market
|
84c4fa10aefbd792a956cef3d727623ca78cb5fd
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""Provide generally usable special widgets."""
from django.forms.widgets import Input, TextInput, NumberInput, ClearableFileInput
from django.utils.html import format_html, conditional_escape
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from market.core.templatetags.core_tags import as_media
class ClearableImageInput(ClearableFileInput):
"""Redefine clearable template."""
template_with_initial = (
'<img src="%(media_url)s" class="clearable-image-field" data-checkbox="%(clear_checkbox_id)s"/>'
'%(clear_template)s<br /><label>%(input_text)s</label> %(input)s'
)
template_with_clear = '<span style="display:none">%(clear)s</span>'
def __init__(self, template=None, attrs=None):
self.template = template
super(ClearableImageInput, self).__init__(attrs)
def get_template_substitution_values(self, value):
"""Return value-related substitutions."""
return {
'media_url': conditional_escape(
as_media(value.thumbnail if hasattr(value, "thumbnail") else value)
)
}
def render(self, name, value, attrs=None):
"""Render image with thumbnail."""
if self.template is not None:
return render_to_string(self.template, context=dict(name=name, value=value, **attrs))
return super(ClearableImageInput, self).render(name, value, attrs)
class AppendInput(Input):
"""Specialized widget for twitter's Bootstrap with append ability."""
appended_text = "$"
def render(self, name, value, attrs=None):
"""Add bootstrap's append ability."""
append = self.attrs.pop("append", self.appended_text)
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<div class="inputs"><input {}/><span>{}</span></div>',
" ".join("{}={}".format(*attr) for attr in final_attrs.items()),
append)
class AppendTextInput(TextInput, AppendInput):
pass
class AppendNumberInput(NumberInput, AppendInput):
pass
class CurrencyInput(AppendNumberInput):
appended_text = _("$")
class CurrencyWithoutVATInput(AppendNumberInput):
appended_text = _("$ without VAT")
| 33.532468
| 104
| 0.665376
|
9b564f098f3fd4ec96e776c88d49097eb758609b
| 583
|
py
|
Python
|
e2e/Classes/Consensus/Element.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 66
|
2019-01-14T08:39:52.000Z
|
2022-01-06T11:39:15.000Z
|
e2e/Classes/Consensus/Element.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 228
|
2019-01-16T15:42:44.000Z
|
2022-02-05T07:48:07.000Z
|
e2e/Classes/Consensus/Element.py
|
kayabaNerve/Currency
|
260ebc20f1704f42ad6183fee39ad58ec6d07961
|
[
"CC0-1.0"
] | 19
|
2019-01-14T08:53:04.000Z
|
2021-11-03T20:19:28.000Z
|
from typing import Dict, Any
from abc import ABC, abstractmethod
from e2e.Libs.BLS import Signature
class Element(
ABC
):
prefix: bytes
holder: int
@abstractmethod
def signatureSerialize(
self
) -> bytes:
pass
@abstractmethod
def serialize(
self
) -> bytes:
pass
@abstractmethod
def toJSON(
self
) -> Dict[str, Any]:
pass
class SignedElement(
ABC
):
signature: Signature
@abstractmethod
def signedSerialize(
self
) -> bytes:
pass
@abstractmethod
def toSignedJSON(
self
) -> Dict[str, Any]:
pass
| 12.673913
| 35
| 0.64494
|
94d57ae43dd8f71f883fea7a82e3b2c6bc475734
| 8,547
|
py
|
Python
|
test/shed_functional/functional/test_0040_repository_circular_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
test/shed_functional/functional/test_0040_repository_circular_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
test/shed_functional/functional/test_0040_repository_circular_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
from shed_functional.base.twilltestcase import common, ShedTwillTestCase
freebayes_repository_name = 'freebayes_0040'
freebayes_repository_description = "Galaxy's freebayes tool for test 0040"
freebayes_repository_long_description = "Long description of Galaxy's freebayes tool for test 0040"
filtering_repository_name = 'filtering_0040'
filtering_repository_description = "Galaxy's filtering tool for test 0040"
filtering_repository_long_description = "Long description of Galaxy's filtering tool for test 0040"
class TestRepositoryCircularDependencies(ShedTwillTestCase):
'''Verify that the code correctly displays repositories with circular repository dependencies.'''
def test_0000_initiate_users(self):
"""Create necessary user accounts."""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role(admin_user)
def test_0005_create_category(self):
"""Create a category for this test suite"""
self.create_category(name='test_0040_repository_circular_dependencies', description='Testing handling of circular repository dependencies.')
def test_0010_create_freebayes_repository(self):
'''Create and populate freebayes_0040.'''
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name=freebayes_repository_name,
description=freebayes_repository_description,
long_description=freebayes_repository_long_description,
owner=common.test_user_1_name,
categories=['test_0040_repository_circular_dependencies'],
strings_displayed=[])
self.upload_file(repository,
filename='freebayes/freebayes.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded the tool tarball.',
strings_displayed=[],
strings_not_displayed=[])
def test_0015_create_filtering_repository(self):
'''Create and populate filtering_0040.'''
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name=filtering_repository_name,
description=filtering_repository_description,
long_description=filtering_repository_long_description,
owner=common.test_user_1_name,
categories=['test_0040_repository_circular_dependencies'],
strings_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded the tool tarball for filtering 1.1.0.',
strings_displayed=[],
strings_not_displayed=[])
def test_0020_create_dependency_on_freebayes(self):
'''Upload a repository_dependencies.xml file that specifies the current revision of freebayes to the filtering_0040 repository.'''
# The dependency structure should look like:
# Filtering revision 0 -> freebayes revision 0.
# Freebayes revision 0 -> filtering revision 1.
# Filtering will have two revisions, one with just the filtering tool, and one with the filtering tool and a dependency on freebayes.
repository = self.test_db_util.get_repository_by_name_and_owner(freebayes_repository_name, common.test_user_1_name)
filtering_repository = self.test_db_util.get_repository_by_name_and_owner(filtering_repository_name, common.test_user_1_name)
repository_dependencies_path = self.generate_temp_path('test_0040', additional_paths=['filtering'])
repository_tuple = (self.url, repository.name, repository.user.username, self.get_repository_tip(repository))
self.create_repository_dependency(repository=filtering_repository, repository_tuples=[repository_tuple], filepath=repository_dependencies_path)
def test_0025_create_dependency_on_filtering(self):
'''Upload a repository_dependencies.xml file that specifies the current revision of filtering to the freebayes_0040 repository.'''
# The dependency structure should look like:
# Filtering revision 0 -> freebayes revision 0.
# Freebayes revision 0 -> filtering revision 1.
# Filtering will have two revisions, one with just the filtering tool, and one with the filtering tool and a dependency on freebayes.
repository = self.test_db_util.get_repository_by_name_and_owner(filtering_repository_name, common.test_user_1_name)
freebayes_repository = self.test_db_util.get_repository_by_name_and_owner(freebayes_repository_name, common.test_user_1_name)
repository_dependencies_path = self.generate_temp_path('test_0040', additional_paths=['freebayes'])
repository_tuple = (self.url, repository.name, repository.user.username, self.get_repository_tip(repository))
self.create_repository_dependency(repository=freebayes_repository, repository_tuples=[repository_tuple], filepath=repository_dependencies_path)
def test_0030_verify_repository_dependencies(self):
'''Verify that each repository can depend on the other without causing an infinite loop.'''
filtering_repository = self.test_db_util.get_repository_by_name_and_owner(filtering_repository_name, common.test_user_1_name)
freebayes_repository = self.test_db_util.get_repository_by_name_and_owner(freebayes_repository_name, common.test_user_1_name)
# The dependency structure should look like:
# Filtering revision 0 -> freebayes revision 0.
# Freebayes revision 0 -> filtering revision 1.
# Filtering will have two revisions, one with just the filtering tool, and one with the filtering tool and a dependency on freebayes.
# In this case, the displayed dependency will specify the tip revision, but this will not always be the case.
self.check_repository_dependency(filtering_repository, freebayes_repository, self.get_repository_tip(freebayes_repository))
self.check_repository_dependency(freebayes_repository, filtering_repository, self.get_repository_tip(filtering_repository))
def test_0035_verify_repository_metadata(self):
'''Verify that resetting the metadata does not change it.'''
freebayes_repository = self.test_db_util.get_repository_by_name_and_owner(freebayes_repository_name, common.test_user_1_name)
filtering_repository = self.test_db_util.get_repository_by_name_and_owner(filtering_repository_name, common.test_user_1_name)
for repository in [freebayes_repository, filtering_repository]:
self.verify_unchanged_repository_metadata(repository)
def test_0040_verify_tool_dependencies(self):
'''Verify that freebayes displays tool dependencies.'''
repository = self.test_db_util.get_repository_by_name_and_owner(freebayes_repository_name, common.test_user_1_name)
self.display_manage_repository_page(repository,
strings_displayed=['freebayes', '0.9.4_9696d0ce8a9', 'samtools', '0.1.18', 'Valid tools', 'package'],
strings_not_displayed=['Invalid tools'])
| 73.051282
| 151
| 0.701182
|
2c15318deda37d21e26505741fb4c50f1449da06
| 2,824
|
py
|
Python
|
tensorflow/lite/testing/op_tests/concat.py
|
leike666666/tensorflow
|
a3fd0ddfcb716be124e95b51e96e6c1e4507ef64
|
[
"Apache-2.0"
] | 56
|
2018-06-21T13:47:23.000Z
|
2020-05-13T09:31:47.000Z
|
tensorflow/lite/testing/op_tests/concat.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/lite/testing/op_tests/concat.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 15
|
2018-09-06T14:18:32.000Z
|
2020-05-14T06:35:30.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for concat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
"fully_quantize": [False]
}, {
"base_shape": [[1, 3, 4, 3], [3, 4], [2, 3, 4, 3]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [1, 2, 3, -3, -2, -1],
"type": [tf.float32],
"fully_quantize": [True]
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"],
get_shape(parameters, n),
min_value=-1,
max_value=1)
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=75)
| 34.024096
| 80
| 0.64483
|
7850385120ae059a36f8260a1b8176e00a66e300
| 3,704
|
py
|
Python
|
on_path/filesync.py
|
yuyuko-C/pyworkkit
|
7785356bcbc93f56c81f3d78362598d1a6ba10c2
|
[
"Apache-2.0"
] | null | null | null |
on_path/filesync.py
|
yuyuko-C/pyworkkit
|
7785356bcbc93f56c81f3d78362598d1a6ba10c2
|
[
"Apache-2.0"
] | null | null | null |
on_path/filesync.py
|
yuyuko-C/pyworkkit
|
7785356bcbc93f56c81f3d78362598d1a6ba10c2
|
[
"Apache-2.0"
] | null | null | null |
from .path import Path
import typing
class SyncGroup:
def __init__(self, source: str, target: str, *filters: str, filter_mode: bool = True) -> None:
source_pth = Path(source).absolute()
target_pth = Path(target).absolute()
if source_pth.is_dir() and target_pth.is_dir():
self.source = source_pth
self.target = target_pth
else:
raise ValueError("")
self.__set_filter(*filters, filter_mode=filter_mode)
def __set_filter(self, *filters: str, filter_mode: bool):
filters_path = []
for p_filter in filters:
real_f = self.source.joinpath(p_filter).relative_to(self.source)
filters_path.append(real_f)
self.__path_filters:typing.List[Path] = filters_path
self.__filters_mode:bool = filter_mode
def __filter_check(self, relative_path: Path):
for p_filter in self.__path_filters:
if relative_path.is_relative_to(p_filter):
return not self.__filters_mode
return self.__filters_mode
def __filter_parent_check(self, relative_path: Path):
for p_filter in self.__path_filters:
if p_filter.is_relative_to(relative_path):
return not self.__filters_mode
return self.__filters_mode
def __clean_target(self):
# 先比较文件夹
for target in self.target.walk_dir():
relpath = target.relative_to(self.target)
# 检查此路径能否通过过滤器
if self.__filter_check(relpath) or self.__filter_parent_check(relpath):
source = self.source.joinpath(relpath)
# 如果源路径被删除,也要移除
if not source.exists():
target.rmdir(True)
else:
# 不能通过过滤器则不该同步,直接移除
target.rmdir(True)
# 再比较文件
for target in self.target.walk_file():
relpath = target.relative_to(self.target)
# 检查此路径能否通过过滤器
if self.__filter_check(relpath):
source = self.source.joinpath(relpath)
# 如果源路径被删除,也要移除
if not source.exists():
target.unlink()
else:
# 不能通过过滤器则不该同步,直接移除
target.unlink()
def __source_to_target(self):
# 遍历没有的文件夹,复制并收录到copy_folder
copy_folder = []
for source in self.source.walk_dir():
relpath = source.relative_to(self.source)
# 检查此路径能否通过过滤器
if self.__filter_check(relpath):
target = self.target.joinpath(relpath)
if not target.exists():
source.copy_to(target)
copy_folder.append(source)
# 遍历文件,跳过copy_folder中的文件夹,若文件修改时间不同则判定为不同
for source in self.source.walk_file(*copy_folder):
relpath = source.relative_to(self.source)
# 检查此路径能否通过过滤器
if self.__filter_check(relpath):
target = self.target.joinpath(relpath)
if not target.exists():
source.copy_to(target)
else:
if target.stat().st_mtime != source.stat().st_mtime:
source.copy_to(target)
def execute_same(self):
# 1.检查target比source多出来的路径,删除
# 2.检查source比target多出来的路径,复制
# 3.检查source与target一致的路径,检查
# 1.如果文件信息一致,不变
# 2.如果文件信息不一致,替换
self.__clean_target()
self.__source_to_target()
pass
def execute_addition(self):
# 1.检查source比target多出来的路径,复制
# 2.检查source与target一致的路径,检查
# 1.如果文件信息一致,不变
# 2.如果文件信息不一致,替换
self.__source_to_target()
pass
| 33.981651
| 98
| 0.580724
|
6280062c1bddc98f42f86ad2a2cc2d35c30c6efd
| 4,779
|
py
|
Python
|
automatization_scripts/tools/glance.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | null | null | null |
automatization_scripts/tools/glance.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | null | null | null |
automatization_scripts/tools/glance.py
|
hmunfru/fiware-paas
|
dd808e986f5463dcbb85370b295404f167838ea1
|
[
"Apache-2.0"
] | 2
|
2016-08-22T16:03:25.000Z
|
2018-03-05T23:28:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'henar'
from urlparse import urlparse
import httplib, json
from tools import http
###
### http://docs.openstack.org/developer/glance/glanceapi.html
class GlanceDemo:
def __init__(self, keystone_url, tenant, user, password, glance_url):
self.keystone_url = keystone_url
self.tenant = tenant
self.user = user
self.password = password
self.public_url = glance_url
self.ks_token = self.__get__token()
self.images = None
def __get__token(self):
return http.get_token(self.keystone_url + '/tokens', self.tenant, self.user, self.password)
##
## get_images - Obtiene la lista de imagenes --- Detalle images/detail
##
def get_images(self):
url = "%s/%s" % (self.public_url, "images/detail")
headers = {'X-Auth-Token': self.ks_token,
'Accept': "application/json"}
response = self.__get(url, headers)
## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
if response.status == 200:
res_data = response.read()
self.images = json.loads(res_data)
##
## Obtengo los metadatos de una imagen -- Los devuelve como lista de tuplas... [(,),...(,)]
## Y los devuelve en la cabecera....
##
## Usa metodo -- GET http://130.206.80.63:9292/v1/images/<image_id>
def metadata(self, image_id):
url = "%s/%s/%s" % (self.public_url, "images", image_id)
headers = {'X-Auth-Token': self.ks_token}
response = self.__get(url, headers)
metadata = response.getheaders()
return metadata
##
## Pone/Cambia el valor de un metadato en la imagen. Esta informacion va en las cabeceras HTTP
##
##
## Usa metodo -- PUT http://130.206.80.63:9292/v1/images/<image_id>
def put_metadata(self, image_id, metadata):
url = "%s/%s/%s" % (self.public_url, "images", image_id)
metadata['X-Auth-Token'] = self.ks_token
response = self.__put(url, metadata)
print "PUT: ", response.status
def __do_http_req(self, method, url, headers):
parsed_url = urlparse(url)
con = httplib.HTTPConnection(parsed_url.netloc)
con.request(method, parsed_url.path, None, headers)
return con.getresponse()
##
## Metod que hace el HTTP-GET
##
def __get(self, url, headers):
return self.__do_http_req("GET", url, headers)
##
## Metod que hace el HTTP-PUT
##
def __put(self, url, headers):
return self.__do_http_req("PUT", url, headers)
##
## Metodo que lista algunos datos de las imagenes por pantalla.....
## OJO -- El filtro!!!
##
def list_images(self):
if self.images:
for i in self.images['images']:
res = ""
### Si es "sdc_aware",
try:
res = "****" if i['properties']['sdc_aware'] == 'True' else ""
except:
#No existe la clave => tira excepcion.....
pass
print res, i['id'], i['name'], '\t\t\t', i['size'] ## Lista un par de datos por pantalla
####
####
#### PROGRAMA PRINCIPAL....
###
if __name__ == "__main__":
config = {}
execfile("sdc.conf", config)
g = GlanceDemo(config['keystone_url'], config['tenant'], config['user'], config['password'], config['glance_url'])
g.get_images() ### Consulto el listado de las imagenes
g.list_images() ### Las listo ---
###
### Pongo a 'True' (por poner un valor) esta propiedad -- Para la imagen sdc-template-paas
### Tiene que ser x-image-meta-property-* para que no casque!!!
g.put_metadata('44dcdba3-a75d-46a3-b209-5e9035d2435e', {'x-image-meta-property-sdc_aware': 'True'})
print "---------------------------"
## Obtengo, por obtener los metadatos de una imagen.
m = g.metadata('add95618-4f13-4e46-9a38-a86cf4be80dd')
print m
| 34.381295
| 118
| 0.611006
|
572582b653925dc45db84583fb2d97fccd4bb0d1
| 15,705
|
py
|
Python
|
python/ccxt/__init__.py
|
vankiaio/ccxt-vkt
|
cc855bbb0c8cc8d2a7449d9a08e3bd7c7304ba8b
|
[
"MIT"
] | null | null | null |
python/ccxt/__init__.py
|
vankiaio/ccxt-vkt
|
cc855bbb0c8cc8d2a7449d9a08e3bd7c7304ba8b
|
[
"MIT"
] | null | null | null |
python/ccxt/__init__.py
|
vankiaio/ccxt-vkt
|
cc855bbb0c8cc8d2a7449d9a08e3bd7c7304ba8b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library"""
# MIT License
# Copyright (c) 2017 Igor Kroitor
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
__version__ = '1.18.384'
# ----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt._1btcxe import _1btcxe # noqa: F401
from ccxt.acx import acx # noqa: F401
from ccxt.allcoin import allcoin # noqa: F401
from ccxt.anxpro import anxpro # noqa: F401
from ccxt.anybits import anybits # noqa: F401
from ccxt.bcex import bcex # noqa: F401
from ccxt.bibox import bibox # noqa: F401
from ccxt.bigone import bigone # noqa: F401
from ccxt.binance import binance # noqa: F401
from ccxt.bit2c import bit2c # noqa: F401
from ccxt.bitbank import bitbank # noqa: F401
from ccxt.bitbay import bitbay # noqa: F401
from ccxt.bitfinex import bitfinex # noqa: F401
from ccxt.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.bitflyer import bitflyer # noqa: F401
from ccxt.bitforex import bitforex # noqa: F401
from ccxt.bithumb import bithumb # noqa: F401
from ccxt.bitibu import bitibu # noqa: F401
from ccxt.bitkk import bitkk # noqa: F401
from ccxt.bitlish import bitlish # noqa: F401
from ccxt.bitmarket import bitmarket # noqa: F401
from ccxt.bitmex import bitmex # noqa: F401
from ccxt.bitsane import bitsane # noqa: F401
from ccxt.bitso import bitso # noqa: F401
from ccxt.bitstamp import bitstamp # noqa: F401
from ccxt.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.bittrex import bittrex # noqa: F401
from ccxt.bitz import bitz # noqa: F401
from ccxt.bl3p import bl3p # noqa: F401
from ccxt.bleutrade import bleutrade # noqa: F401
from ccxt.braziliex import braziliex # noqa: F401
from ccxt.btcalpha import btcalpha # noqa: F401
from ccxt.btcbox import btcbox # noqa: F401
from ccxt.btcchina import btcchina # noqa: F401
from ccxt.btcexchange import btcexchange # noqa: F401
from ccxt.btcmarkets import btcmarkets # noqa: F401
from ccxt.btctradeim import btctradeim # noqa: F401
from ccxt.btctradeua import btctradeua # noqa: F401
from ccxt.btcturk import btcturk # noqa: F401
from ccxt.buda import buda # noqa: F401
from ccxt.bxinth import bxinth # noqa: F401
from ccxt.ccex import ccex # noqa: F401
from ccxt.cex import cex # noqa: F401
from ccxt.chbtc import chbtc # noqa: F401
from ccxt.chilebit import chilebit # noqa: F401
from ccxt.cobinhood import cobinhood # noqa: F401
from ccxt.coinbase import coinbase # noqa: F401
from ccxt.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.coinbasepro import coinbasepro # noqa: F401
from ccxt.coincheck import coincheck # noqa: F401
from ccxt.coinegg import coinegg # noqa: F401
from ccxt.coinex import coinex # noqa: F401
from ccxt.coinexchange import coinexchange # noqa: F401
from ccxt.coinfalcon import coinfalcon # noqa: F401
from ccxt.coinfloor import coinfloor # noqa: F401
from ccxt.coingi import coingi # noqa: F401
from ccxt.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.coinmate import coinmate # noqa: F401
from ccxt.coinnest import coinnest # noqa: F401
from ccxt.coinone import coinone # noqa: F401
from ccxt.coinspot import coinspot # noqa: F401
from ccxt.cointiger import cointiger # noqa: F401
from ccxt.coolcoin import coolcoin # noqa: F401
from ccxt.coss import coss # noqa: F401
from ccxt.crex24 import crex24 # noqa: F401
from ccxt.crypton import crypton # noqa: F401
from ccxt.cryptopia import cryptopia # noqa: F401
from ccxt.deribit import deribit # noqa: F401
from ccxt.dsx import dsx # noqa: F401
from ccxt.ethfinex import ethfinex # noqa: F401
from ccxt.exmo import exmo # noqa: F401
from ccxt.exx import exx # noqa: F401
from ccxt.fcoin import fcoin # noqa: F401
from ccxt.fcoinjp import fcoinjp # noqa: F401
from ccxt.flowbtc import flowbtc # noqa: F401
from ccxt.foxbit import foxbit # noqa: F401
from ccxt.fybse import fybse # noqa: F401
from ccxt.fybsg import fybsg # noqa: F401
from ccxt.gateio import gateio # noqa: F401
from ccxt.gdax import gdax # noqa: F401
from ccxt.gemini import gemini # noqa: F401
from ccxt.getbtc import getbtc # noqa: F401
from ccxt.hadax import hadax # noqa: F401
from ccxt.hitbtc import hitbtc # noqa: F401
from ccxt.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.huobipro import huobipro # noqa: F401
from ccxt.huobiru import huobiru # noqa: F401
from ccxt.ice3x import ice3x # noqa: F401
from ccxt.independentreserve import independentreserve # noqa: F401
from ccxt.indodax import indodax # noqa: F401
from ccxt.itbit import itbit # noqa: F401
from ccxt.jubi import jubi # noqa: F401
from ccxt.kkex import kkex # noqa: F401
from ccxt.kraken import kraken # noqa: F401
from ccxt.kucoin import kucoin # noqa: F401
from ccxt.kucoin2 import kucoin2 # noqa: F401
from ccxt.kuna import kuna # noqa: F401
from ccxt.lakebtc import lakebtc # noqa: F401
from ccxt.lbank import lbank # noqa: F401
from ccxt.liqui import liqui # noqa: F401
from ccxt.liquid import liquid # noqa: F401
from ccxt.livecoin import livecoin # noqa: F401
from ccxt.luno import luno # noqa: F401
from ccxt.lykke import lykke # noqa: F401
from ccxt.mercado import mercado # noqa: F401
from ccxt.mixcoins import mixcoins # noqa: F401
from ccxt.negociecoins import negociecoins # noqa: F401
from ccxt.nova import nova # noqa: F401
from ccxt.okcoincny import okcoincny # noqa: F401
from ccxt.okcoinusd import okcoinusd # noqa: F401
from ccxt.okex import okex # noqa: F401
from ccxt.paymium import paymium # noqa: F401
from ccxt.poloniex import poloniex # noqa: F401
from ccxt.quadrigacx import quadrigacx # noqa: F401
from ccxt.rightbtc import rightbtc # noqa: F401
from ccxt.southxchange import southxchange # noqa: F401
from ccxt.stronghold import stronghold # noqa: F401
from ccxt.surbitcoin import surbitcoin # noqa: F401
from ccxt.theocean import theocean # noqa: F401
from ccxt.therock import therock # noqa: F401
from ccxt.tidebit import tidebit # noqa: F401
from ccxt.tidex import tidex # noqa: F401
from ccxt.uex import uex # noqa: F401
from ccxt.upbit import upbit # noqa: F401
from ccxt.urdubit import urdubit # noqa: F401
from ccxt.vaultoro import vaultoro # noqa: F401
from ccxt.vbtc import vbtc # noqa: F401
from ccxt.virwox import virwox # noqa: F401
from ccxt.xbtce import xbtce # noqa: F401
from ccxt.yobit import yobit # noqa: F401
from ccxt.yunbi import yunbi # noqa: F401
from ccxt.zaif import zaif # noqa: F401
from ccxt.zb import zb # noqa: F401
exchanges = [
'_1btcxe',
'acx',
'allcoin',
'anxpro',
'anybits',
'bcex',
'bibox',
'bigone',
'binance',
'bit2c',
'bitbank',
'bitbay',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bithumb',
'bitibu',
'bitkk',
'bitlish',
'bitmarket',
'bitmex',
'bitsane',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcchina',
'btcexchange',
'btcmarkets',
'btctradeim',
'btctradeua',
'btcturk',
'buda',
'bxinth',
'ccex',
'cex',
'chbtc',
'chilebit',
'cobinhood',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinexchange',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinnest',
'coinone',
'coinspot',
'cointiger',
'coolcoin',
'coss',
'crex24',
'crypton',
'cryptopia',
'deribit',
'dsx',
'ethfinex',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'fybse',
'fybsg',
'gateio',
'gdax',
'gemini',
'getbtc',
'hadax',
'hitbtc',
'hitbtc2',
'huobipro',
'huobiru',
'ice3x',
'independentreserve',
'indodax',
'itbit',
'jubi',
'kkex',
'kraken',
'kucoin',
'kucoin2',
'kuna',
'lakebtc',
'lbank',
'liqui',
'liquid',
'livecoin',
'luno',
'lykke',
'mercado',
'mixcoins',
'negociecoins',
'nova',
'okcoincny',
'okcoinusd',
'okex',
'paymium',
'poloniex',
'quadrigacx',
'rightbtc',
'southxchange',
'stronghold',
'surbitcoin',
'theocean',
'therock',
'tidebit',
'tidex',
'uex',
'upbit',
'urdubit',
'vaultoro',
'vbtc',
'virwox',
'xbtce',
'yobit',
'yunbi',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 45.65407
| 80
| 0.529577
|
456267a1ab9293c58f78f226dc47a545b3cb41ed
| 15,776
|
py
|
Python
|
gunicorn/util.py
|
ykusakabe/gunicorn
|
c171c15c633ec5f70fd1e6c3f11dab8aa834ef27
|
[
"MIT"
] | null | null | null |
gunicorn/util.py
|
ykusakabe/gunicorn
|
c171c15c633ec5f70fd1e6c3f11dab8aa834ef27
|
[
"MIT"
] | null | null | null |
gunicorn/util.py
|
ykusakabe/gunicorn
|
c171c15c633ec5f70fd1e6c3f11dab8aa834ef27
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import print_function
import email.utils
import fcntl
import io
import os
import pkg_resources
import pwd
import random
import socket
import sys
import textwrap
import time
import traceback
import inspect
import errno
import warnings
import logging
from gunicorn import _compat
from gunicorn.errors import AppImportError
from gunicorn.six import text_type
from gunicorn.workers import SUPPORTED_WORKERS
REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
# Server and Date aren't technically hop-by-hop
# headers, but they are in the purview of the
# origin server which the WSGI spec says we should
# act like. So we drop them and add our own.
#
# In the future, concatenation server header values
# might be better, but nothing else does it and
# dropping them is easier.
hop_headers = set("""
connection keep-alive proxy-authenticate proxy-authorization
te trailers transfer-encoding upgrade
server date
""".split())
try:
from setproctitle import setproctitle
def _setproctitle(title):
setproctitle("gunicorn: %s" % title)
except ImportError:
def _setproctitle(title):
return
try:
from importlib import import_module
except ImportError:
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for _ in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
msg = "attempted relative import beyond top-level package"
raise ValueError(msg)
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_class(uri, default="gunicorn.workers.sync.SyncWorker",
section="gunicorn.workers"):
if inspect.isclass(uri):
return uri
if uri.startswith("egg:"):
# uses entry points
entry_str = uri.split("egg:")[1]
try:
dist, name = entry_str.rsplit("#", 1)
except ValueError:
dist = entry_str
name = default
try:
return pkg_resources.load_entry_point(dist, section, name)
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
else:
components = uri.split('.')
if len(components) == 1:
while True:
if uri.startswith("#"):
uri = uri[1:]
if uri in SUPPORTED_WORKERS:
components = SUPPORTED_WORKERS[uri].split(".")
break
try:
return pkg_resources.load_entry_point("gunicorn",
section, uri)
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
klass = components.pop(-1)
try:
mod = import_module('.'.join(components))
except:
exc = traceback.format_exc()
msg = "class uri %r invalid or not found: \n\n[%s]"
raise RuntimeError(msg % (uri, exc))
return getattr(mod, klass)
def get_username(uid):
""" get the username for a user id"""
return pwd.getpwuid(uid).pw_name
def set_owner_process(uid, gid, initgroups=False):
""" set user and group of workers processes """
if gid:
if uid:
try:
username = get_username(uid)
except KeyError:
initgroups = False
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
gid = abs(gid) & 0x7FFFFFFF
if initgroups:
os.initgroups(username, gid)
else:
os.setgid(gid)
if uid:
os.setuid(uid)
def chown(path, uid, gid):
gid = abs(gid) & 0x7FFFFFFF # see note above.
os.chown(path, uid, gid)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def is_ipv6(addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error: # not a valid address
return False
except ValueError: # ipv6 not supported on this platform
return False
return True
def parse_address(netloc, default_port=8000):
if netloc.startswith("unix://"):
return netloc.split("unix://")[1]
if netloc.startswith("unix:"):
return netloc.split("unix:")[1]
if netloc.startswith("tcp://"):
netloc = netloc.split("tcp://")[1]
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
#get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return (host, port)
def close_on_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def set_non_blocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def close(sock):
try:
sock.close()
except socket.error:
pass
try:
from os import closerange
except ImportError:
def closerange(fd_low, fd_high):
# Iterate through and close all file descriptors.
for fd in range(fd_low, fd_high):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
def write_chunk(sock, data):
if isinstance(data, text_type):
data = data.encode('utf-8')
chunk_size = "%X\r\n" % len(data)
chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"])
sock.sendall(chunk)
def write(sock, data, chunked=False):
if chunked:
return write_chunk(sock, data)
sock.sendall(data)
def write_nonblock(sock, data, chunked=False):
timeout = sock.gettimeout()
if timeout != 0.0:
try:
sock.setblocking(0)
return write(sock, data, chunked)
finally:
sock.setblocking(1)
else:
return write(sock, data, chunked)
def write_error(sock, status_int, reason, mesg):
html = textwrap.dedent("""\
<html>
<head>
<title>%(reason)s</title>
</head>
<body>
<h1><p>%(reason)s</p></h1>
%(mesg)s
</body>
</html>
""") % {"reason": reason, "mesg": _compat.html_escape(mesg)}
http = textwrap.dedent("""\
HTTP/1.1 %s %s\r
Connection: close\r
Content-Type: text/html\r
Content-Length: %d\r
\r
%s""") % (str(status_int), reason, len(html), html)
write_nonblock(sock, http.encode('latin1'))
def import_app(module):
parts = module.split(":", 1)
if len(parts) == 1:
module, obj = module, "application"
else:
module, obj = parts[0], parts[1]
try:
__import__(module)
except ImportError:
if module.endswith(".py") and os.path.exists(module):
msg = "Failed to find application, did you mean '%s:%s'?"
raise ImportError(msg % (module.rsplit(".", 1)[0], obj))
else:
raise
mod = sys.modules[module]
is_debug = logging.root.level == logging.DEBUG
try:
app = eval(obj, vars(mod))
except NameError:
if is_debug:
traceback.print_exception(*sys.exc_info())
raise AppImportError("Failed to find application: %r" % module)
if app is None:
raise AppImportError("Failed to find application object: %r" % obj)
if not callable(app):
raise AppImportError("Application object must be callable.")
return app
def getcwd():
# get current path, try to use PWD env first
try:
a = os.stat(os.environ['PWD'])
b = os.stat(os.getcwd())
if a.st_ino == b.st_ino and a.st_dev == b.st_dev:
cwd = os.environ['PWD']
else:
cwd = os.getcwd()
except:
cwd = os.getcwd()
return cwd
def http_date(timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)
return s
def is_hoppish(header):
return header.lower().strip() in hop_headers
def daemonize(enable_stdio_inheritance=False):
"""\
Standard daemonization of a process.
http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
"""
if 'GUNICORN_FD' not in os.environ:
if os.fork():
os._exit(0)
os.setsid()
if os.fork():
os._exit(0)
os.umask(0o22)
# In both the following any file descriptors above stdin
# stdout and stderr are left untouched. The inheritence
# option simply allows one to have output go to a file
# specified by way of shell redirection when not wanting
# to use --error-log option.
if not enable_stdio_inheritance:
# Remap all of stdin, stdout and stderr on to
# /dev/null. The expectation is that users have
# specified the --error-log option.
closerange(0, 3)
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
if fd_null != 0:
os.dup2(fd_null, 0)
os.dup2(fd_null, 1)
os.dup2(fd_null, 2)
else:
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
# Always redirect stdin to /dev/null as we would
# never expect to need to read interactive input.
if fd_null != 0:
os.close(0)
os.dup2(fd_null, 0)
# If stdout and stderr are still connected to
# their original file descriptors we check to see
# if they are associated with terminal devices.
# When they are we map them to /dev/null so that
# are still detached from any controlling terminal
# properly. If not we preserve them as they are.
#
# If stdin and stdout were not hooked up to the
# original file descriptors, then all bets are
# off and all we can really do is leave them as
# they were.
#
# This will allow 'gunicorn ... > output.log 2>&1'
# to work with stdout/stderr going to the file
# as expected.
#
# Note that if using --error-log option, the log
# file specified through shell redirection will
# only be used up until the log file specified
# by the option takes over. As it replaces stdout
# and stderr at the file descriptor level, then
# anything using stdout or stderr, including having
# cached a reference to them, will still work.
def redirect(stream, fd_expect):
try:
fd = stream.fileno()
if fd == fd_expect and stream.isatty():
os.close(fd)
os.dup2(fd_null, fd)
except AttributeError:
pass
redirect(sys.stdout, 1)
redirect(sys.stderr, 2)
def seed():
try:
random.seed(os.urandom(64))
except NotImplementedError:
random.seed('%s.%s' % (time.time(), os.getpid()))
def check_is_writeable(path):
try:
f = open(path, 'a')
except IOError as e:
raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e))
f.close()
def to_bytestring(value, encoding="utf8"):
"""Converts a string argument to a byte string"""
if isinstance(value, bytes):
return value
if not isinstance(value, text_type):
raise TypeError('%r is not a string' % value)
return value.encode(encoding)
def has_fileno(obj):
if not hasattr(obj, "fileno"):
return False
# check BytesIO case and maybe others
try:
obj.fileno()
except (AttributeError, IOError, io.UnsupportedOperation):
return False
return True
def warn(msg):
print("!!!", file=sys.stderr)
lines = msg.splitlines()
for i, line in enumerate(lines):
if i == 0:
line = "WARNING: %s" % line
print("!!! %s" % line, file=sys.stderr)
print("!!!\n", file=sys.stderr)
sys.stderr.flush()
def make_fail_app(msg):
msg = to_bytestring(msg)
def app(environ, start_response):
start_response("500 Internal Server Error", [
("Content-Type", "text/plain"),
("Content-Length", str(len(msg)))
])
return [msg]
return app
| 28.788321
| 82
| 0.581643
|
6e63d1fc00819cce6964c191ae1846bed7ca5bc4
| 1,735
|
py
|
Python
|
test/test_phones.py
|
fairwind2k/python_training
|
61e0f6f2685f61bc5b74058bae47dcb95fa3d67c
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
fairwind2k/python_training
|
61e0f6f2685f61bc5b74058bae47dcb95fa3d67c
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
fairwind2k/python_training
|
61e0f6f2685f61bc5b74058bae47dcb95fa3d67c
|
[
"Apache-2.0"
] | null | null | null |
import allure
from model.contact import Contact
import re
def test_phones_on_home_page(app):
with allure.step('Given a phones list from home page'):
contact_from_home_page = app.contacts.get_contacts_list()[0]
with allure.step('Given a phones list from edit page'):
contact_from_edit_page = app.contacts.get_contact_info_from_edit_page(0)
with allure.step('Then the pones list from home page is equal to pones list from edit page'):
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
with allure.step('Given a phones list from view page'):
contact_from_view_page = app.contacts.get_contact_from_view_page(0)
with allure.step('Given a phones list from edit page'):
contact_from_edit_page = app.contacts.get_contact_info_from_edit_page(0)
with allure.step('Then the pones list from view page is equal to pones list from edit page'):
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
assert contact_from_view_page.mobile == contact_from_edit_page.mobile
assert contact_from_view_page.secondaryphone == contact_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobile, contact.workphone, contact.secondaryphone]))))
| 40.348837
| 121
| 0.71585
|
6780cb88a001c6f67b0b3c0190a2f9e331271465
| 4,708
|
py
|
Python
|
src/routes/streammap.py
|
Chikiyaflix/server
|
8f8aad4ef7bdd154fea2d8dfdd6fca9f79122449
|
[
"MIT"
] | null | null | null |
src/routes/streammap.py
|
Chikiyaflix/server
|
8f8aad4ef7bdd154fea2d8dfdd6fca9f79122449
|
[
"MIT"
] | null | null | null |
src/routes/streammap.py
|
Chikiyaflix/server
|
8f8aad4ef7bdd154fea2d8dfdd6fca9f79122449
|
[
"MIT"
] | null | null | null |
import os
import urllib
import flask
import requests
import src.functions.config
streammapBP = flask.Blueprint("streammap", __name__)
@streammapBP.route("/api/v1/streammap")
async def streammapFunction():
a = flask.request.args.get("a") # AUTH
id = flask.request.args.get("id") # ID
name = flask.request.args.get("name") # NAME
server = flask.request.args.get("server") # SERVER
config = src.functions.config.readConfig()
if config.get("kill_switch") == True:
return flask.jsonify(
{
"code": 200,
"content": [{"name": "UNAVAILABLE", "url": "", "type": "normal"}],
"message": "Stream list generated successfully.",
"success": True,
}
)
if (
any(a == account["auth"] for account in config["account_list"])
or config.get("auth") == False
):
stream_list = [
{
"name": "Original",
"url": "%s/api/v1/redirectdownload/%s?a=%s&id=%s"
% (server, urllib.parse.quote(name), a, id),
"type": "normal",
}
]
if config.get("transcoded") == True:
req = requests.get(
"https://drive.google.com/get_video_info?docid=%s" % (id),
headers={"Authorization": "Bearer %s" % (config.get("access_token"))},
)
parsed = urllib.parse.parse_qs(urllib.parse.unquote(req.text))
if parsed.get("status") == ["ok"]:
for fmt in parsed["fmt_list"][0].split(","):
fmt_data = fmt.split("/")
stream_list.append(
{
"name": fmt_data[1],
"url": "%s/api/v1/redirectdownload/%s?a=%s&id=%s&itag=%s"
% (server, urllib.parse.quote(name), a, id, fmt_data[0]),
"type": "auto",
}
)
subtitle = {"url": ""}
if config.get("subtitles") == True:
config, drive = src.functions.credentials.refreshCredentials(
src.functions.config.readConfig()
)
params = {
"supportsAllDrives": True,
"fields": "parents",
"fileId": id,
}
parent = drive.files().get(**params).execute()["parents"][0]
params = {
"pageToken": None,
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
"fields": "files(id,name,mimeType,parents), incompleteSearch, nextPageToken",
"q": "'%s' in parents and trashed = false and (name contains '.srt' or name contains '.vtt')"
% (parent),
"orderBy": "name",
}
while True:
response = drive.files().list(**params).execute()
for file in response["files"]:
name_split = os.path.splitext(name)[0]
if name_split in file["name"]:
subtitle = {
"url": "%s/api/v1/subtitledownload/%s?a=%s&id=%s"
% (server, file["name"], a, file["id"])
}
try:
params["pageToken"] = response["nextPageToken"]
except KeyError:
break
if (
config.get("prefer_mkv") == False
and config.get("prefer_mp4") == False
and len(stream_list) > 1
):
default_quality = 1
elif config.get("prefer_mp4", True) == True and name.endswith(".mp4"):
default_quality = 0
elif config.get("prefer_mkv", False) == True and name.endswith(".mkv"):
default_quality = 0
elif len(stream_list) > 1:
default_quality = 1
else:
default_quality = 0
return flask.jsonify(
{
"code": 200,
"content": {
"default_quality": default_quality,
"sources": stream_list,
"subtitle": subtitle,
},
"message": "Stream list generated successfully!",
"success": True,
}
)
else:
return (
flask.jsonify(
{
"code": 401,
"content": None,
"message": "Your credentials are invalid.",
"success": False,
}
),
401,
)
| 35.666667
| 109
| 0.44966
|
29424d8e460a15593e45365f9f36fbff62302b1f
| 223
|
py
|
Python
|
setup.py
|
aaronbell/Roboto
|
09d561e84aff97ca5ca1374b3e53b099ab84568e
|
[
"Apache-2.0"
] | 120
|
2018-07-25T03:05:31.000Z
|
2021-12-13T08:41:42.000Z
|
setup.py
|
sannorozco/Roboto
|
3852c00a1d483bf044d62d0c885daf57355027a6
|
[
"Apache-2.0"
] | 74
|
2018-06-29T17:57:43.000Z
|
2022-01-09T19:39:49.000Z
|
setup.py
|
sannorozco/Roboto
|
3852c00a1d483bf044d62d0c885daf57355027a6
|
[
"Apache-2.0"
] | 14
|
2018-07-04T14:23:05.000Z
|
2022-01-27T05:55:58.000Z
|
from setuptools import setup
setup(
name='Roboto-build',
version='0.0.1',
author='Google',
packages=['scripts', 'tests'],
license='LICENSE.txt',
description='Build chain to make v3 Roboto fonts',
)
| 20.272727
| 54
| 0.64574
|
e66294345d2416701616a3d484b82ccc2aea86e7
| 442
|
py
|
Python
|
pelican/plugins/seafoam/constants.py
|
MinchinWeb/seafoam
|
cf81eda8b9d6ff0f4a2e7ceaafda47e6ba1aad45
|
[
"MIT"
] | 3
|
2018-12-05T03:45:53.000Z
|
2021-05-17T00:03:05.000Z
|
pelican/plugins/seafoam/constants.py
|
MinchinWeb/seafoam
|
cf81eda8b9d6ff0f4a2e7ceaafda47e6ba1aad45
|
[
"MIT"
] | 13
|
2017-01-10T03:00:56.000Z
|
2021-07-05T17:23:04.000Z
|
pelican/plugins/seafoam/constants.py
|
MinchinWeb/seafoam
|
cf81eda8b9d6ff0f4a2e7ceaafda47e6ba1aad45
|
[
"MIT"
] | 2
|
2017-02-22T08:39:45.000Z
|
2021-05-17T08:35:58.000Z
|
__title__ = "seafoam"
__version__ = "2.7.1-dev"
__description__ = "Pelican theme, first used for Minchin.ca."
__author__ = "W. Minchin"
__email__ = "w_minchin@hotmail.com"
__url__ = "http://blog.minchin.ca/label/seafoam/"
__license__ = "MIT License"
LOG_PREFIX = "[Seafoam]"
PLUGIN_LIST = [
"pelican.plugins.seafoam",
"pelican.plugins.jinja_filters",
]
PRODUCTION_PLUGIN_LIST = PLUGIN_LIST + [
"pelican.plugins.image_process",
]
| 26
| 61
| 0.723982
|
cfa37e5ba6f2893489621a191ec9920c72353636
| 43
|
py
|
Python
|
user/__init__.py
|
TinlokLee/OA
|
ee11343117dcffb28930b6d21c99823c95b8d651
|
[
"MIT"
] | null | null | null |
user/__init__.py
|
TinlokLee/OA
|
ee11343117dcffb28930b6d21c99823c95b8d651
|
[
"MIT"
] | null | null | null |
user/__init__.py
|
TinlokLee/OA
|
ee11343117dcffb28930b6d21c99823c95b8d651
|
[
"MIT"
] | null | null | null |
default_app_config = "user.apps.UserConfig"
| 43
| 43
| 0.837209
|
79bf791dcb461486c1a617c9ae5fe68f4739bcf4
| 21,703
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/aio/operations/_express_route_circuit_peerings_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/aio/operations/_express_route_circuit_peerings_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_02_01/aio/operations/_express_route_circuit_peerings_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitPeeringListResult"]:
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
| 50.472093
| 220
| 0.674792
|
400cff349477d78fdbb9892669250540af3e00cf
| 2,445
|
py
|
Python
|
graph_construction/checking/sanity.py
|
EmmaRocheteau/eICU-GNN-LSTM
|
5167eea88bfe7a3146ccb6194f54e8e57f52128b
|
[
"MIT"
] | 53
|
2021-01-14T11:34:31.000Z
|
2022-03-15T15:05:03.000Z
|
graph_construction/checking/sanity.py
|
Wizard-gq/eICU-GNN-LSTM
|
5167eea88bfe7a3146ccb6194f54e8e57f52128b
|
[
"MIT"
] | 2
|
2021-02-13T02:29:10.000Z
|
2021-02-26T10:24:40.000Z
|
graph_construction/checking/sanity.py
|
Wizard-gq/eICU-GNN-LSTM
|
5167eea88bfe7a3146ccb6194f54e8e57f52128b
|
[
"MIT"
] | 15
|
2021-01-25T04:10:38.000Z
|
2022-01-16T07:08:41.000Z
|
import pandas as pd
import json
import copy
with open('paths.json', 'r') as f:
eICU_path = json.load(f)["eICU_path"]
graph_dir = json.load(f)["graph_dir"]
u = open("{}bert_u_k=5.txt".format(graph_dir), "r")
v = open("{}bert_v_k=5.txt".format(graph_dir), "r")
u_list = u.read()
v_list = v.read()
u_list = u_list.split("\n")
v_list = v_list.split("\n")
train_labels = pd.read_csv('{}train/labels.csv'.format(eICU_path), index_col='patient')
val_labels = pd.read_csv('{}val/labels.csv'.format(eICU_path), index_col='patient')
test_labels = pd.read_csv('{}test/labels.csv'.format(eICU_path), index_col='patient')
all_labels = pd.concat([train_labels, val_labels, test_labels], sort=False)
both_died_outcome = 0
both_survived_outcome = 0
diff_outcome = 0
num_comparisons = 100000
tracker = copy.copy(num_comparisons)
for edge in zip(u_list, v_list):
if all_labels.iloc[int(edge[0])]['actualhospitalmortality'] == all_labels.iloc[int(edge[1])]['actualhospitalmortality']:
if all_labels.iloc[int(edge[0])]['actualhospitalmortality'] == 1:
both_died_outcome += 1
else:
both_survived_outcome += 1
else:
diff_outcome += 1
tracker -= 1
if tracker < 0:
break
perc_both_died = both_died_outcome/num_comparisons * 100
perc_both_survived = both_survived_outcome/num_comparisons * 100
perc_died_and_survived = diff_outcome/num_comparisons * 100
print('==> GRAPH IN QUESTION')
print(str(perc_both_died)[:4] + '% of the connections both died')
print(str(perc_both_survived)[:4] + '% of the connections both survived')
print(str(perc_died_and_survived)[:4] + '% of the connections had one death and one survival')
perc_involving_died = perc_both_died + perc_died_and_survived * 0.5
print(str(perc_involving_died)[:4] + '% of the nodes involved in edges have died')
probs = all_labels['actualhospitalmortality'].value_counts()/len(all_labels)
perc_both_died = probs[1]**2 * 100
perc_both_survived = probs[0]**2 * 100
perc_died_and_survived = probs[0] * probs[1] * 100
print('==> RANDOM GRAPH')
print(str(perc_both_died)[:4] + '% of the connections both died')
print(str(perc_both_survived)[:4] + '% of the connections both survived')
print(str(perc_died_and_survived)[:4] + '% of the connections had one death and one survival')
perc_involving_died = perc_both_died + perc_died_and_survived * 0.5
print(str(perc_involving_died)[:4] + '% of the nodes involved in edges have died')
| 42.894737
| 124
| 0.724744
|
316cf81ffafccfedc7b3056dd16e9c1bee468d27
| 4,257
|
py
|
Python
|
test/examples/integrated/ubus/py/ubus_slave_driver.py
|
mgielda/uvm-python
|
7750bc163130f59741e464bb5fcf8fb5324dbf56
|
[
"Apache-2.0"
] | 1
|
2020-09-14T13:21:24.000Z
|
2020-09-14T13:21:24.000Z
|
test/examples/integrated/ubus/py/ubus_slave_driver.py
|
gitCyan/uvm-python
|
45b8075518f828e0d77980e35c68a8527713b0e1
|
[
"Apache-2.0"
] | null | null | null |
test/examples/integrated/ubus/py/ubus_slave_driver.py
|
gitCyan/uvm-python
|
45b8075518f828e0d77980e35c68a8527713b0e1
|
[
"Apache-2.0"
] | null | null | null |
#//----------------------------------------------------------------------
#// Copyright 2007-2010 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2019 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
import cocotb
from cocotb.triggers import RisingEdge, Timer, FallingEdge
from uvm.base import *
from uvm.comps import UVMDriver
from uvm.macros import uvm_component_utils
from ubus_transfer import *
#//------------------------------------------------------------------------------
#//
#// CLASS: ubus_slave_driver
#//
#//------------------------------------------------------------------------------
#class ubus_slave_driver extends uvm_driver #(ubus_transfer);
class ubus_slave_driver(UVMDriver):
# // new - constructor
def __init__(self, name, parent):
UVMDriver.__init__(self, name, parent)
# The virtual interface used to drive and view HDL signals.
self.vif = None
self.tag = "UBUS_SLAVE_DRIVER_" + name
def build_phase(self, phase):
super().build_phase(phase)
uvm_info(self.tag, "build_phase started", UVM_MEDIUM)
arr = []
if UVMConfigDb.get(self, "", "vif", arr):
self.vif = arr[0]
if self.vif is None:
self.uvm_report_fatal("NOVIF", "virtual interface must be set for: " +
self.get_full_name() + ".vif")
# run phase
async def run_phase(self, phase):
# fork
fork_get = cocotb.fork(self.get_and_drive())
fork_reset = cocotb.fork(self.reset_signals())
#await [fork_get.join(), fork_reset.join()]
await sv.fork_join([fork_get, fork_reset])
# join
# endtask : run_phase
# // get_and_drive
# virtual protected task get_and_drive();
async def get_and_drive(self):
await Timer(0)
await FallingEdge(self.vif.sig_reset)
while True:
await RisingEdge(self.vif.sig_clock)
req = []
await self.seq_item_port.get_next_item(req)
await self.respond_to_transfer(req[0])
self.seq_item_port.item_done()
# endtask : get_and_drive
# // reset_signals
async def reset_signals(self):
while True:
await RisingEdge(self.vif.sig_reset)
self.vif.sig_error <= 0
self.vif.sig_wait <= 0
self.vif.slave_en <= 0
# endtask : reset_signals
# // respond_to_transfer
async def respond_to_transfer(self, resp):
if resp.read_write != NOP:
self.vif.sig_error <= 0
for i in range(resp.size):
if resp.read_write == READ:
self.vif.slave_en <= 1
self.vif.sig_data_out <= resp.data[i]
if resp.wait_state[i] > 0:
self.vif.sig_wait <= 1
for j in range(resp.wait_state[i]):
await RisingEdge(self.vif.sig_clock)
self.vif.sig_wait <= 0
await RisingEdge(self.vif.sig_clock)
resp.data[i] = int(self.vif.sig_data)
self.vif.slave_en <= 0
self.vif.sig_wait <= 0 # 1'bz
self.vif.sig_error <= 0 # 1'bz
else:
await Timer(0)
# endtask : respond_to_transfer
#
#endclass : ubus_slave_driver
# Provide implementations of virtual methods such as get_type_name and create
uvm_component_utils(ubus_slave_driver)
| 34.893443
| 82
| 0.564247
|
372da27dca398da0ba4c6827a4666e1356a2bcdc
| 21,433
|
py
|
Python
|
Lib/test/test_concurrent_futures.py
|
certik/python-3.2
|
8c024c1e08248a4640429e3761905ae308d64e44
|
[
"PSF-2.0"
] | 1
|
2019-12-31T18:13:30.000Z
|
2019-12-31T18:13:30.000Z
|
Lib/test/test_concurrent_futures.py
|
priya-sharma-prog/python-3.2
|
8c024c1e08248a4640429e3761905ae308d64e44
|
[
"PSF-2.0"
] | 1
|
2019-07-04T09:18:21.000Z
|
2019-07-04T19:14:03.000Z
|
Lib/test/test_concurrent_futures.py
|
priya-sharma-prog/python-3.2
|
8c024c1e08248a4640429e3761905ae308d64e44
|
[
"PSF-2.0"
] | 13
|
2015-04-02T16:49:38.000Z
|
2021-10-17T20:14:14.000Z
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import sys
import threading
import time
import unittest
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
import concurrent.futures.process
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes:
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes:
p.join()
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
pass
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 3],
timeout=1.5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
pass
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
pass
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised IOError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
| 34.347756
| 79
| 0.617972
|
81808341feab034d2ba8a68e22b06f840df76611
| 23,325
|
py
|
Python
|
tests/test_math.py
|
DaveMcEwan/dmppl
|
68e8a121d4591360080cd40121add1796ae48a1b
|
[
"MIT"
] | 1
|
2020-05-05T19:46:43.000Z
|
2020-05-05T19:46:43.000Z
|
tests/test_math.py
|
DaveMcEwan/dmppl
|
68e8a121d4591360080cd40121add1796ae48a1b
|
[
"MIT"
] | null | null | null |
tests/test_math.py
|
DaveMcEwan/dmppl
|
68e8a121d4591360080cd40121add1796ae48a1b
|
[
"MIT"
] | null | null | null |
from dmppl.math import *
import math
import os
import tempfile
import shutil
import unittest
class Test_isEven(unittest.TestCase): # {{{
def test_Pos0(self):
result = isEven(3)
self.assertFalse(result)
def test_Pos1(self):
result = isEven(4)
self.assertTrue(result)
def test_Zero(self):
result = isEven(0)
self.assertTrue(result)
def test_Neg0(self):
result = isEven(-7)
self.assertFalse(result)
def test_Neg1(self):
result = isEven(-8)
self.assertTrue(result)
def test_Float0(self):
self.assertRaises(AssertionError, isEven, 7.0)
def test_Float1(self):
self.assertRaises(AssertionError, isEven, 8.0)
# }}} class Test_isEven
class Test_isOdd(unittest.TestCase): # {{{
def test_Pos0(self):
result = isOdd(3)
self.assertTrue(result)
def test_Pos1(self):
result = isOdd(4)
self.assertFalse(result)
def test_Zero(self):
result = isOdd(0)
self.assertFalse(result)
def test_Neg0(self):
result = isOdd(-7)
self.assertTrue(result)
def test_Neg1(self):
result = isOdd(-8)
self.assertFalse(result)
def test_Float0(self):
self.assertRaises(AssertionError, isOdd, 7.0)
def test_Float1(self):
self.assertRaises(AssertionError, isOdd, 8.0)
# }}} class Test_isEven
class Test_isPow2(unittest.TestCase): # {{{
def test_Basic0(self):
result = isPow2(3)
self.assertFalse(result)
def test_Basic1(self):
result = isPow2(4)
self.assertTrue(result)
def test_Int0(self):
result = isPow2(7)
self.assertFalse(result)
def test_Int1(self):
result = isPow2(8)
self.assertTrue(result)
def test_Neg0(self):
self.assertRaises(AssertionError, isPow2, -8)
def test_Float0(self):
self.assertRaises(AssertionError, isPow2, 7.0)
# }}} class Test_isPow2
class Test_clog2(unittest.TestCase): # {{{
def test_Basic0(self):
result = clog2(5)
self.assertEqual(result, 3)
def test_Int0(self):
result = clog2(9)
self.assertEqual(result, 4)
def test_Float0(self):
result = clog2(4.1)
self.assertEqual(result, 3)
def test_Neg0(self):
self.assertRaises(AssertionError, clog2, -8)
# }}} class Test_clog2
class Test_dotp(unittest.TestCase): # {{{
def test_Basic0(self):
result = dotp([1, 2, 5], (-10.0, 11.1, 0.5))
self.assertEqual(result, 14.7)
def test_Int0(self):
result = dotp([1, 2, 5], (-10, 11, 5))
self.assertEqual(result, 37)
def test_Float0(self):
result = dotp([1.1, 2.2, 5.5], (-10.0, 11.1, 5.5))
self.assertEqual(result, 43.67)
def test_Iters(self):
result = dotp(range(0, 5), range(5, 55, 3)) # unequal length
self.assertEqual(result, 140)
# }}} class Test_dotp
class Test_clipNorm(unittest.TestCase): # {{{
def test_Basic0(self):
result = clipNorm(0.3)
self.assertEqual(result, 0.3)
def test_Basic1(self):
result = clipNorm(3, 0, 10)
self.assertEqual(result, 0.3)
def test_Basic2(self):
result = clipNorm(0, -1, 1)
self.assertEqual(result, 0.5)
def test_Basic3(self):
result = clipNorm(-0.25, -2.5, 2.5)
self.assertEqual(result, 0.45)
def test_Ints0(self):
result = clipNorm(2, 1, 3)
self.assertAlmostEqual(result, 0.5)
def test_Floats0(self):
result = clipNorm(2.0, 1.0, 3.0)
self.assertAlmostEqual(result, 0.5)
def test_Floats1(self):
result = clipNorm(0.2, 0.1, 0.3)
self.assertAlmostEqual(result, 0.5)
def test_TypeMix0(self):
result = clipNorm(2, 1, 3.0)
self.assertAlmostEqual(result, 0.5)
def test_TypeMix1(self):
result = clipNorm(2, 1.0, 3)
self.assertAlmostEqual(result, 0.5)
def test_TypeMix2(self):
result = clipNorm(2.0, 1, 3.0)
self.assertAlmostEqual(result, 0.5)
def test_TypeMix3(self):
result = clipNorm(2.0, 1.0, 3)
self.assertAlmostEqual(result, 0.5)
def test_OutOfRangeLo0(self):
result = clipNorm(0.1, 0.2, 0.3)
self.assertEqual(result, 0.0)
def test_OutOfRangeLo1(self):
result = clipNorm(1, 2, 3)
self.assertEqual(result, 0.0)
def test_OutOfRangeLo2(self):
result = clipNorm(0.1, 0.3, 0.2)
self.assertEqual(result, 0.0)
def test_OutOfRangeLo3(self):
result = clipNorm(1, 3, 2)
self.assertEqual(result, 0.0)
def test_OutOfRangeHi0(self):
result = clipNorm(0.4, 0.2, 0.3)
self.assertEqual(result, 1.0)
def test_OutOfRangeHi1(self):
result = clipNorm(4, 2, 3)
self.assertEqual(result, 1.0)
def test_OutOfRangeHi2(self):
result = clipNorm(0.4, 0.3, 0.2)
self.assertEqual(result, 1.0)
def test_OutOfRangeHi3(self):
result = clipNorm(4, 3, 2)
self.assertEqual(result, 1.0)
# }}} class Test_clipNorm
class Test_int2base(unittest.TestCase): # {{{
def test_Basic0(self):
result = int2base(5, 2)
self.assertEqual(result, "101")
def test_Basic1(self):
result = int2base(15, 16)
self.assertEqual(result, "f")
def test_Basic2(self):
result = int2base(35**10-1, 35)
self.assertEqual(result, "y"*10)
def test_Float0(self):
self.assertRaises(AssertionError, int2base, 8.0, 10)
def test_Float1(self):
self.assertRaises(AssertionError, int2base, 8, 10.0)
def test_Neg0(self):
self.assertRaises(AssertionError, int2base, -8, 10)
def test_Neg1(self):
self.assertRaises(AssertionError, int2base, 8, -10)
# }}} class Test_int2base
class Test_powsineCoeffs(unittest.TestCase): # {{{
def test_Basic0(self):
n = 10
alpha = 0
result = powsineCoeffs(n, alpha)
self.assertEqual(len(result), n)
self.assertEqual(result.shape, (n,))
self.assertSequenceEqual(result.tolist(), np.ones(n).tolist())
def test_Basic1(self):
n = 5
alpha = 1
result = powsineCoeffs(n, alpha)
self.assertEqual(result.shape, (n,))
golden = np.array([0.0, 0.707, 1.0, 0.707, 0.0])
self.assertTupleEqual(result.shape, golden.shape)
for r,g in zip(result, golden):
self.assertAlmostEqual(r, g, places=3)
def test_Basic2(self):
n = 10
alpha = 2
result = powsineCoeffs(n, alpha)
self.assertEqual(result.shape, (n,))
golden = np.array([0.000, 0.117, 0.413, 0.750, 0.970,
0.970, 0.750, 0.413, 0.117, 0.000])
self.assertTupleEqual(result.shape, golden.shape)
for r,g in zip(result, golden):
self.assertAlmostEqual(r, g, places=3)
# }}} class Test_powsineCoeffs
class Test_saveNpy(unittest.TestCase): # {{{
def setUp(self):
self.tstDir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tstDir)
def test_Basic0(self):
arr = np.arange(5)
fname = os.path.join(self.tstDir, "Basic0")
result = saveNpy(arr, fname)
self.assertEqual(result, None)
# Load back in again with standard NumPy method.
with gzip.GzipFile(fname + ".npy.gz", 'rb') as fd:
rdbk = np.load(fd)
for r,g in zip(rdbk, arr):
self.assertEqual(r, g)
def test_Basic1(self):
arr = np.arange(6)
fname = os.path.join(self.tstDir, "Basic1.npy.gz")
result = saveNpy(arr, fname)
self.assertEqual(result, None)
# Load back in again with standard NumPy method.
with gzip.GzipFile(fname, 'rb') as fd:
rdbk = np.load(fd)
for r,g in zip(rdbk, arr):
self.assertEqual(r, g)
def test_Basic2(self):
arr = np.arange(7)
fname = os.path.join(self.tstDir, "Basic2.npy")
result = saveNpy(arr, fname)
self.assertEqual(result, None)
# Load back in again with standard NumPy method.
with open(fname, 'rb') as fd:
rdbk = np.load(fd)
for r,g in zip(rdbk, arr):
self.assertEqual(r, g)
# }}} class Test_saveNpy
class Test_loadNpy(unittest.TestCase): # {{{
def setUp(self):
self.tstDir = tempfile.mkdtemp()
self.arr0 = np.arange(5)
fname0 = os.path.join(self.tstDir, "arr0.npy.gz")
with gzip.GzipFile(fname0, 'wb') as fd:
np.save(fd, self.arr0)
self.arr1 = np.ones(6)
fname1 = os.path.join(self.tstDir, "arr1.npy")
with open(fname1, 'wb') as fd:
np.save(fd, self.arr1)
def tearDown(self):
shutil.rmtree(self.tstDir)
def test_Basic0(self):
fname = os.path.join(self.tstDir, "arr0")
result = loadNpy(fname)
for r,g in zip(result, self.arr0):
self.assertEqual(r, g)
def test_Basic1(self):
fname = os.path.join(self.tstDir, "arr1.npy")
result = loadNpy(fname)
for r,g in zip(result, self.arr1):
self.assertEqual(r, g)
def test_Basic2(self):
fname = os.path.join(self.tstDir, "arr0.npy.gz")
result = loadNpy(fname)
for r,g in zip(result, self.arr0):
self.assertEqual(r, g)
# }}} class Test_loadNpy
class Test_ptScale(unittest.TestCase): # {{{
def test_Int0(self):
pt = (1, 2)
scale = 3
result = ptScale(pt, scale)
self.assertTupleEqual(result, (3, 6))
for r in result:
self.assertIsInstance(r, int)
def test_Float0(self):
pt = (1.0, 2.0)
scale = 3.5
result = ptScale(pt, scale)
self.assertTupleEqual(result, (3.5, 7.0))
for r in result:
self.assertIsInstance(r, float)
def test_0D(self):
self.assertRaises(AssertionError, ptScale, tuple(), 2.0)
def test_1D(self):
pt = (1,)
scale = 3
result = ptScale(pt, scale)
self.assertTupleEqual(result, (3,))
for r in result:
self.assertIsInstance(r, int)
def test_5D(self):
pt = (1, 2, 3, 4, 5)
scale = 10.0
result = ptScale(pt, scale)
self.assertTupleEqual(result, (10.0, 20.0, 30.0, 40.0, 50.0))
for r in result:
self.assertIsInstance(r, float)
# }}} class Test_ptScale
class Test_ptShift(unittest.TestCase): # {{{
def test_Int0(self):
pt = (1, 2)
shift = [3, 4]
result = ptShift(pt, shift)
self.assertTupleEqual(result, (4, 6))
for r in result:
self.assertIsInstance(r, int)
def test_Float0(self):
pt = (1.0, 2.0)
shift = [3.0, 4.0]
result = ptShift(pt, shift)
self.assertTupleEqual(result, (4.0, 6.0))
for r in result:
self.assertIsInstance(r, float)
def test_NDimMismatch(self):
self.assertRaises(AssertionError, ptShift, (1, 2), (3,))
def test_0D(self):
self.assertRaises(AssertionError, ptShift, tuple(), tuple())
def test_1D(self):
pt = (1,)
shift = [3]
result = ptShift(pt, shift)
self.assertTupleEqual(result, (4,))
for r in result:
self.assertIsInstance(r, int)
def test_5D(self):
pt = (1, 2, 3, 4, 5)
shift = [3, 4, 5, 6, 7]
result = ptShift(pt, shift)
self.assertTupleEqual(result, (4, 6, 8, 10, 12))
for r in result:
self.assertIsInstance(r, int)
# }}} class Test_ptShift
class Test_ptMirror(unittest.TestCase): # {{{
def test_Int0(self):
pt = (1, 2)
mirror = [None, 3]
result = ptMirror(pt, mirror)
self.assertTupleEqual(result, (1, 4))
for r in result:
self.assertIsInstance(r, int)
def test_Float0(self):
pt = (1.0, 2.0)
mirror = (3.5, None)
result = ptMirror(pt, mirror)
self.assertTupleEqual(result, (6.0, 2.0))
for r in result:
self.assertIsInstance(r, float)
def test_0D(self):
self.assertRaises(AssertionError, ptMirror, tuple(), [2.0])
def test_NDimMismatch(self):
self.assertRaises(AssertionError, ptShift, (1, 2), (3,))
def test_1D(self):
pt = (1,)
mirror = (5.0,)
result = ptMirror(pt, mirror)
self.assertTupleEqual(result, (9.0,))
for r in result:
self.assertIsInstance(r, float)
def test_5D(self):
pt = (1, 2, 3, 4, 5)
mirror = [10.0, None, 5, None, None]
result = ptMirror(pt, mirror)
self.assertTupleEqual(result, (19.0, 2, 7, 4, 5))
for r in result:
self.assertIsInstance(r, (float, int))
# }}} class Test_ptMirror
class Test_rotMat2D(unittest.TestCase): # {{{
def test_Degrees90(self):
result = rotMat2D(math.radians(90))
self.assertAlmostEqual(result[0][0], 0);
self.assertAlmostEqual(result[0][1], -1)
self.assertAlmostEqual(result[1][0], 1);
self.assertAlmostEqual(result[1][1], 0)
def test_Degrees90CW(self):
result = rotMat2D(math.radians(90), clockwise=True)
self.assertAlmostEqual(result[0][0], 0);
self.assertAlmostEqual(result[0][1], 1)
self.assertAlmostEqual(result[1][0], -1);
self.assertAlmostEqual(result[1][1], 0)
def test_Degrees90CCW(self):
result = rotMat2D(math.radians(90), clockwise=False)
self.assertAlmostEqual(result[0][0], 0);
self.assertAlmostEqual(result[0][1], -1)
self.assertAlmostEqual(result[1][0], 1);
self.assertAlmostEqual(result[1][1], 0)
# }}} class Test_rotMat2D
class Test_ptRotate(unittest.TestCase): # {{{
def test_Basic0(self):
rotation = rotMat2D(math.radians(90))
pt = (0, 1) # Vertical
result = ptRotate(pt, rotation)
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], -1)
self.assertAlmostEqual(result[1], 0)
def test_Basic1(self):
rotation = rotMat2D(math.radians(90))
pt = (1, 0) # Horizontal
result = ptRotate(pt, rotation)
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], 0)
self.assertAlmostEqual(result[1], 1)
def test_Basic2(self):
rotation = rotMat2D(math.radians(180))
pt = (3, 3) # Horizontal
center = (4, 4)
result = ptRotate(pt, rotation, center)
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], 5)
self.assertAlmostEqual(result[1], 5)
# }}} class Test_ptRotate
class Test_ptRotate2D(unittest.TestCase): # {{{
def test_Basic0(self):
pt = (0, 1) # Vertical
result = ptRotate2D(pt, math.radians(90))
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], -1)
self.assertAlmostEqual(result[1], 0)
def test_Basic1(self):
pt = (1, 0) # Horizontal
result = ptRotate2D(pt, math.radians(90))
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], 0)
self.assertAlmostEqual(result[1], 1)
def test_Basic2(self):
pt = (3, 3) # Horizontal
center = (4, 4)
result = ptRotate2D(pt, math.radians(180), center)
self.assertIsInstance(result, tuple)
self.assertAlmostEqual(result[0], 5)
self.assertAlmostEqual(result[1], 5)
# }}} class Test_ptRotate
class Test_ptsScale(unittest.TestCase): # {{{
def test_NDimMismatch(self):
pts = [(1, 2, 3), (4, 5)]
scale = 10.0
self.assertRaises(AssertionError, ptsScale, pts, scale)
def test_5D(self):
pts = [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
scale = 10.0
result = list(ptsScale(pts, scale))
self.assertListEqual(result, [(10.0, 20.0, 30.0, 40.0, 50.0),
(60.0, 70.0, 80.0, 90.0, 100.0)])
# }}} class Test_ptsScale
class Test_ptsShift(unittest.TestCase): # {{{
def test_NDimMismatch(self):
pts = [(1, 2, 3), (4, 5)]
shift = (1, 2)
self.assertRaises(AssertionError, ptsShift, pts, shift)
def test_5D(self):
pts = [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
shift = [3, 4, 5, 6, 7]
result = list(ptsShift(pts, shift))
self.assertListEqual(result, [(4, 6, 8, 10, 12),
(9, 11, 13, 15, 17)])
# }}} class Test_ptsShift
class Test_ptsMirror(unittest.TestCase): # {{{
def test_NDimMismatch(self):
pts = [(1, 2, 3), (4, 5)]
mirror = [10.0, None, 5]
self.assertRaises(AssertionError, ptsMirror, pts, mirror)
def test_5D(self):
pts = [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
mirror = [10.0, None, 5, None, None]
result = list(ptsMirror(pts, mirror))
self.assertListEqual(result, [(19.0, 2, 7, 4, 5),
(14.0, 7, 2, 9, 10)])
for pt in result:
for r in pt:
self.assertIsInstance(r, (float, int))
# }}} class Test_ptsMirror
class Test_ptsRotate2D(unittest.TestCase): # {{{
def test_Basic0(self):
pts = [(0, 1), (5, 0)]
result = list(ptsRotate2D(pts, math.radians(90)))
self.assertAlmostEqual(result[0][0], -1.0)
self.assertAlmostEqual(result[0][1], 0.0)
self.assertAlmostEqual(result[1][0], 0.0)
self.assertAlmostEqual(result[1][1], 5.0)
for pt in result:
for r in pt:
self.assertIsInstance(r, float)
# }}} class Test_ptsRotate2D
class Test_ptPairDifference(unittest.TestCase): # {{{
def test_Int0(self):
ptA = (1, 2)
ptB = [3, 4]
result = ptPairDifference(ptA, ptB)
self.assertTupleEqual(result, (2, 2))
for r in result:
self.assertIsInstance(r, int)
def test_Float0(self):
ptA = (1.0, 2.0)
ptB = [3.0, 4.0]
result = ptPairDifference(ptA, ptB)
self.assertTupleEqual(result, (2.0, 2.0))
for r in result:
self.assertIsInstance(r, float)
def test_NDimMismatch(self):
self.assertRaises(AssertionError, ptPairDifference, (1, 2), (3,))
def test_1D(self):
ptA = [3,]
ptB = (1,)
result = ptPairDifference(ptA, ptB)
self.assertTupleEqual(result, (-2,))
for r in result:
self.assertIsInstance(r, int)
def test_5D(self):
ptA = (1, 2, 3.0, 400, 5)
ptB = (3, 40, 5, 6, 7.5)
result = ptPairDifference(ptA, ptB)
self.assertTupleEqual(result, (2, 38, 2.0, -394, 2.5))
for r in result:
self.assertIsInstance(r, (int, float))
# }}} class Test_ptPairDifference
class Test_ptPairPtBetween(unittest.TestCase): # {{{
def test_Int0(self):
ptA = (1, 2)
ptB = [3, 4]
result = ptPairPtBetween(ptA, ptB)
self.assertTupleEqual(result, (2.0, 3.0))
for r in result:
self.assertIsInstance(r, float)
def test_Float0(self):
ptA = (1.0, 2.0)
ptB = [3.0, 4.0]
result = ptPairPtBetween(ptA, ptB)
self.assertTupleEqual(result, (2.0, 3.0))
for r in result:
self.assertIsInstance(r, float)
def test_Float1(self):
ptA = (1.0, 2.0)
ptB = [3.0, 4.0]
fraction = 0.75
result = ptPairPtBetween(ptA, ptB, fraction)
self.assertTupleEqual(result, (2.5, 3.5))
for r in result:
self.assertIsInstance(r, float)
def test_NDimMismatch(self):
self.assertRaises(AssertionError, ptPairPtBetween, (1, 2), (3,))
def test_NonFraction(self):
self.assertRaises(AssertionError, ptPairPtBetween, (1, 2), (3, 4), 1.5)
def test_1D(self):
ptA = [3,]
ptB = (1,)
result = ptPairPtBetween(ptA, ptB)
self.assertTupleEqual(result, (2.0,))
for r in result:
self.assertIsInstance(r, float)
def test_5D(self):
ptA = (1, 2, 3.0, 400, 5)
ptB = (3, 40, 5, 6, 7.5)
result = ptPairPtBetween(ptA, ptB)
self.assertTupleEqual(result, (2.0, 21.0, 4.0, 203.0, 6.25))
for r in result:
self.assertIsInstance(r, float)
# }}} class Test_ptPairPtBetween
class Test_ptPairDistance(unittest.TestCase): # {{{
def test_Int0(self):
ptA = (1, 2)
ptB = [3, 4]
result = ptPairDistance(ptA, ptB)
self.assertAlmostEqual(result, 2*math.sqrt(2))
def test_Float0(self):
ptA = (1.0, 2.0)
ptB = [3.0, 4.0]
result = ptPairDistance(ptA, ptB)
self.assertAlmostEqual(result, 2*math.sqrt(2))
def test_NDimMismatch(self):
self.assertRaises(AssertionError, ptPairDistance, (1, 2), (3,))
def test_1D(self):
ptA = [3,]
ptB = (1,)
result = ptPairDistance(ptA, ptB)
self.assertAlmostEqual(result, 2.0)
def test_5D(self):
ptA = (1, 2, 3.0, 400, 5)
ptB = (3, 40, 5, 6, 7.5)
result = ptPairDistance(ptA, ptB)
golden = math.sqrt(sum([2**2, 38**2, 2.0**2, (-394)**2, 2.5**2]))
self.assertAlmostEqual(result, golden)
# }}} class Test_ptPairDistance
class Test_ptPairsDifference(unittest.TestCase): # {{{
def test_Basic0(self):
ptPairs = [
[(1, 2), (3, 4)],
[(9, 8), (7, 6)],
((10, 20), (30, 40)),
((90, 80), (75, 65)),
((90, 80), (77, 67)),
]
result = list(ptPairsDifference(ptPairs))
self.assertListEqual(result, [(2, 2),
(-2, -2),
(20, 20),
(-15, -15),
(-13, -13)])
# }}} class Test_ptPairsDifference
class Test_ptPairsPtBetween(unittest.TestCase): # {{{
def test_Basic0(self):
ptPairs = [
[(1, 2), (3, 4)],
[(9, 8), (7, 6)],
]
result = list(ptPairsPtBetween(ptPairs, fraction=0.75))
self.assertListEqual(result, [(2.5, 3.5), (7.5, 6.5)])
# }}} class Test_ptPairsPtBetween
class Test_ptPairsDistance(unittest.TestCase): # {{{
def test_Basic0(self):
ptPairs = [
[(1, 2), (3, 4)],
[(9, 8), (7, 6)],
]
result = list(ptPairsDistance(ptPairs))
self.assertAlmostEqual(result[0], 2*math.sqrt(2))
self.assertAlmostEqual(result[1], 2*math.sqrt(2))
# }}} class Test_ptPairsDistance
class Test_ptsMkPolygon(unittest.TestCase): # {{{
def test_Basic0(self):
result = list(ptsMkPolygon(4))
golden = [(1,0), (0,1), (-1,0), (0,-1)]
np.testing.assert_almost_equal(result, golden)
def test_Basic1(self):
result = list(ptsMkPolygon(4, radius=[2]))
golden = [(2,0), (0,2), (-2,0), (0,-2)]
np.testing.assert_almost_equal(result, golden)
def test_Basic2(self):
result = list(ptsMkPolygon(4, radius=[2, 3]))
golden = [(2,0), (0,3), (-2,0), (0,-3)]
np.testing.assert_almost_equal(result, golden)
# }}} class Test_ptsMkPolygon
| 28.445122
| 79
| 0.568532
|
46316039b3c19eb3d615f096aab98b8b154ac8f6
| 8,446
|
py
|
Python
|
arcade/examples/sprite_rooms.py
|
Jayman2000/arcade-pull
|
82d8085bd446ac42c634f56a4d9b8d97ac01b417
|
[
"MIT"
] | null | null | null |
arcade/examples/sprite_rooms.py
|
Jayman2000/arcade-pull
|
82d8085bd446ac42c634f56a4d9b8d97ac01b417
|
[
"MIT"
] | null | null | null |
arcade/examples/sprite_rooms.py
|
Jayman2000/arcade-pull
|
82d8085bd446ac42c634f56a4d9b8d97ac01b417
|
[
"MIT"
] | null | null | null |
"""
Sprite move between different rooms.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_rooms
"""
import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
SCREEN_TITLE = "Sprite Rooms Example"
MOVEMENT_SPEED = 5
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture(":resources:images/backgrounds/abstract_1.jpg")
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture(":resources:images/backgrounds/abstract_2.jpg")
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.rooms = None
self.player_sprite = None
self.player_list = None
self.physics_engine = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png", SPRITE_SCALING)
self.player_sprite.center_x = 100
self.player_sprite.center_y = 100
self.player_list = arcade.SpriteList()
self.player_list.append(self.player_sprite)
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_lrwh_rectangle_textured(0, 0,
SCREEN_WIDTH, SCREEN_HEIGHT,
self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].wall_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
self.player_list.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.UP:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 34.194332
| 135
| 0.625503
|
0a13f03a4a76aca9c1866c8f1693df56f2190ea0
| 17,546
|
py
|
Python
|
opsdroid/connector/matrix/connector.py
|
abhiky/opsdroid
|
ebc7fe0d00f6d0ea0dad2aff09faf2bd633758a2
|
[
"Apache-2.0"
] | null | null | null |
opsdroid/connector/matrix/connector.py
|
abhiky/opsdroid
|
ebc7fe0d00f6d0ea0dad2aff09faf2bd633758a2
|
[
"Apache-2.0"
] | null | null | null |
opsdroid/connector/matrix/connector.py
|
abhiky/opsdroid
|
ebc7fe0d00f6d0ea0dad2aff09faf2bd633758a2
|
[
"Apache-2.0"
] | null | null | null |
"""Connector for Matrix (https://matrix.org)."""
import re
import json
import logging
import functools
from concurrent.futures import CancelledError
from urllib.parse import urlparse
import aiohttp
from matrix_api_async.api_asyncio import AsyncHTTPAPI
from matrix_client.errors import MatrixRequestError
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid import events
from .html_cleaner import clean
from .create_events import MatrixEventCreator
from . import events as matrixevents
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {
Required("mxid"): str,
Required("password"): str,
Required("rooms"): dict,
"homeserver": str,
"nick": str,
"room_specific_nicks": bool,
}
__all__ = ["ConnectorMatrix"]
def ensure_room_id_and_send(func):
"""
Ensure that the target for the event is a matrix room id.
Also retry the function call if the server disconnects.
"""
@functools.wraps(func)
async def ensure_room_id(self, event):
if not event.target.startswith(("!", "#")):
event.target = self.room_ids[event.target]
if not event.target.startswith("!"):
event.target = await self.connection.get_room_id(event.target)
try:
return await func(self, event)
except aiohttp.client_exceptions.ServerDisconnectedError:
_LOGGER.debug(_("Server had disconnected, retrying send."))
return await func(self, event)
return ensure_room_id
class ConnectorMatrix(Connector):
"""Connector for Matrix (https://matrix.org)."""
def __init__(self, config, opsdroid=None): # noqa: D107
"""Init the config for the connector."""
super().__init__(config, opsdroid=opsdroid)
self.name = "matrix" # The name of your connector
self.rooms = self._process_rooms_dict(config["rooms"])
self.room_ids = {}
self.default_target = self.rooms["main"]["alias"]
self.mxid = config["mxid"]
self.nick = config.get("nick", None)
self.homeserver = config.get("homeserver", "https://matrix.org")
self.password = config["password"]
self.room_specific_nicks = config.get("room_specific_nicks", False)
self.send_m_notice = config.get("send_m_notice", False)
self.session = None
self.filter_id = None
self.connection = None
self._event_creator = MatrixEventCreator(self)
def message_type(self, room):
"""Subtype to use to send into a specific room."""
if self.send_m_notice:
return "m.notice"
reverse_room_ids = {v: k for k, v in self.room_ids.items()}
room = reverse_room_ids.get(room, room)
if room in self.rooms:
if self.rooms[room].get("send_m_notice", False):
return "m.notice"
return "m.text"
def _process_rooms_dict(self, rooms):
out_rooms = {}
for name, room in rooms.items():
if isinstance(room, str):
room = {"alias": room}
out_rooms[name] = room
return out_rooms
@property
def filter_json(self):
"""Define JSON filter to apply to incoming events."""
return {
"event_format": "client",
"account_data": {"limit": 0, "types": []},
"presence": {"limit": 0, "types": []},
"room": {
"account_data": {"types": []},
"ephemeral": {"types": []},
"state": {"types": []},
},
}
async def make_filter(self, api):
"""Make a filter on the server for future syncs."""
fjson = self.filter_json
resp = await api.create_filter(user_id=self.mxid, filter_params=fjson)
return resp["filter_id"]
async def connect(self):
"""Create connection object with chat library."""
session = aiohttp.ClientSession(trust_env=True)
mapi = AsyncHTTPAPI(self.homeserver, session)
self.session = session
login_response = await mapi.login(
"m.login.password", user=self.mxid, password=self.password
)
mapi.token = login_response["access_token"]
mapi.sync_token = None
for roomname, room in self.rooms.items():
response = await mapi.join_room(room["alias"])
self.room_ids[roomname] = response["room_id"]
self.connection = mapi
# Create a filter now, saves time on each later sync
self.filter_id = await self.make_filter(mapi)
# Do initial sync so we don't get old messages later.
response = await self.connection.sync(
timeout_ms=3000,
filter='{ "room": { "timeline" : { "limit" : 1 } } }',
set_presence="online",
)
self.connection.sync_token = response["next_batch"]
if self.nick:
display_name = await self.connection.get_display_name(self.mxid)
if display_name != self.nick:
await self.connection.set_display_name(self.mxid, self.nick)
async def disconnect(self):
"""Close the matrix session."""
await self.session.close()
async def _parse_sync_response(self, response):
self.connection.sync_token = response["next_batch"]
# Emit Invite events for every room in the invite list.
for roomid, room in response["rooms"]["invite"].items():
# Process the invite list to extract the person who invited us.
invite_event = [
e
for e in room["invite_state"]["events"]
if "invite" == e.get("content", {}).get("membership")
][0]
sender = await self.get_nick(None, invite_event["sender"])
await self.opsdroid.parse(
events.UserInvite(
target=roomid,
user_id=invite_event["sender"],
user=sender,
connector=self,
raw_event=invite_event,
)
)
for roomid, room in response["rooms"]["join"].items():
if "timeline" in room:
for event in room["timeline"]["events"]:
if event["sender"] != self.mxid:
return await self._event_creator.create_event(event, roomid)
async def listen(self): # pragma: no cover
"""Listen for new messages from the chat service."""
while True: # pylint: disable=R1702
try:
response = await self.connection.sync(
self.connection.sync_token,
timeout_ms=int(60 * 1e3), # 1m in ms
filter=self.filter_id,
)
_LOGGER.debug(_("Matrix sync request returned."))
message = await self._parse_sync_response(response)
if message:
await self.opsdroid.parse(message)
except MatrixRequestError as mre:
# We can safely ignore timeout errors. The non-standard error
# codes are returned by Cloudflare.
if mre.code in [504, 522, 524]:
_LOGGER.info(_("Matrix sync timeout (code: %d)."), mre.code)
continue
_LOGGER.exception(_("Matrix sync error."))
except CancelledError:
raise
except Exception: # pylint: disable=W0703
_LOGGER.exception(_("Matrix sync error."))
async def get_nick(self, roomid, mxid):
"""
Get nickname from user ID.
Get the nickname of a sender depending on the room specific config
setting.
"""
if self.room_specific_nicks:
try:
return await self.connection.get_room_displayname(roomid, mxid)
except Exception: # pylint: disable=W0703
# Fallback to the non-room specific one
logging.exception("Failed to lookup room specific nick for %s.", mxid)
try:
return await self.connection.get_display_name(mxid)
except MatrixRequestError as mre:
# Log the error if it's not the 404 from the user not having a nick
if mre.code != 404:
logging.exception("Failed to lookup nick for %s.", mxid)
return mxid
def get_roomname(self, room):
"""Get the name of a room from alias or room ID."""
if room.startswith(("#", "!")):
for room_name, room_alias in self.rooms.items():
room_alias = room_alias["alias"]
if room in (room_alias, self.room_ids[room_name]):
return room_name
return room
@staticmethod
def _get_formatted_message_body(message, body=None, msgtype="m.text"):
"""
Get HTML from a message.
Return the json representation of the message in
"org.matrix.custom.html" format.
"""
# Markdown leaves a <p></p> around standard messages that we want to
# strip:
if message.startswith("<p>"):
message = message[3:]
if message.endswith("</p>"):
message = message[:-4]
clean_html = clean(message)
return {
# Strip out any tags from the markdown to make the body
"body": body if body else re.sub("<[^<]+?>", "", clean_html),
"msgtype": msgtype,
"format": "org.matrix.custom.html",
"formatted_body": clean_html,
}
@register_event(events.Message)
@ensure_room_id_and_send
async def _send_message(self, message):
"""Send `message.text` back to the chat service."""
return await self.connection.send_message_event(
message.target,
"m.room.message",
self._get_formatted_message_body(
message.text, msgtype=self.message_type(message.target)
),
)
@register_event(events.EditedMessage)
@ensure_room_id_and_send
async def _send_edit(self, message):
if isinstance(message.linked_event, events.EditedMessage):
# If we are attempting to edit an edit, move up the tree and then
# try again.
message.linked_event = message.linked_event.linked_event
return self._send_edit(message)
elif isinstance(message.linked_event, str):
edited_event_id = message.linked_event
else:
edited_event_id = message.linked_event.event_id
new_content = self._get_formatted_message_body(
message.text, msgtype=self.message_type(message.target)
)
content = {
"msgtype": self.message_type(message.target),
"m.new_content": new_content,
"body": f"* {new_content['body']}",
"m.relates_to": {"rel_type": "m.replace", "event_id": edited_event_id},
}
return (
await self.connection.send_message_event(
message.target, "m.room.message", content
),
)
@register_event(events.Reply)
@ensure_room_id_and_send
async def _send_reply(self, reply):
if isinstance(reply.linked_event, str):
reply_event_id = reply.linked_event
else:
reply_event_id = reply.linked_event.event_id
# TODO: Insert reply fallback here
content = self._get_formatted_message_body(
reply.text, msgtype=self.message_type(reply.target)
)
content["m.relates_to"] = {"m.in_reply_to": {"event_id": reply_event_id}}
return (
await self.connection.send_message_event(
reply.target, "m.room.message", content
),
)
@register_event(events.Reaction)
@ensure_room_id_and_send
async def _send_reaction(self, reaction):
content = {
"m.relates_to": {
"rel_type": "m.annotation",
"event_id": reaction.linked_event.event_id,
"key": reaction.emoji,
}
}
return await self.connection.send_message_event(
reaction.target, "m.reaction", content
)
async def _get_image_info(self, image):
width, height = await image.get_dimensions()
return {
"w": width,
"h": height,
"mimetype": await image.get_mimetype(),
"size": len(await image.get_file_bytes()),
}
async def _file_to_mxc_url(self, file_event):
"""Given a file event return the mxc url."""
uploaded = False
mxc_url = None
if file_event.url:
url = urlparse(file_event.url)
if url.scheme == "mxc":
mxc_url = file_event.url
if not mxc_url:
mxc_url = await self.connection.media_upload(
await file_event.get_file_bytes(), await file_event.get_mimetype()
)
mxc_url = mxc_url["content_uri"]
uploaded = True
return mxc_url, uploaded
@register_event(events.File)
@register_event(events.Image)
@ensure_room_id_and_send
async def _send_file(self, file_event):
mxc_url, uploaded = await self._file_to_mxc_url(file_event)
if isinstance(file_event, events.Image):
if uploaded:
extra_info = await self._get_image_info(file_event)
else:
extra_info = {}
msg_type = "m.image"
else:
extra_info = {}
msg_type = "m.file"
name = file_event.name or "opsdroid_upload"
await self.connection.send_content(
file_event.target, mxc_url, name, msg_type, extra_info
)
@register_event(events.NewRoom)
@ensure_room_id_and_send
async def _send_room_creation(self, creation_event):
params = creation_event.room_params
params = params.get("matrix", params)
response = await self.connection.create_room(**params)
room_id = response["room_id"]
if creation_event.name is not None:
await self._send_room_name_set(
events.RoomName(creation_event.name, target=room_id)
)
return room_id
@register_event(events.RoomName)
@ensure_room_id_and_send
async def _send_room_name_set(self, name_event):
return await self.connection.set_room_name(name_event.target, name_event.name)
@register_event(events.RoomAddress)
@ensure_room_id_and_send
async def _send_room_address(self, address_event):
try:
return await self.connection.set_room_alias(
address_event.target, address_event.address
)
except MatrixRequestError as err:
if err.code == 409:
_LOGGER.warning(
f"A room with the alias {address_event.address} already exists."
)
@register_event(events.JoinRoom)
@ensure_room_id_and_send
async def _send_join_room(self, join_event):
return await self.connection.join_room(join_event.target)
@register_event(events.UserInvite)
@ensure_room_id_and_send
async def _send_user_invitation(self, invite_event):
try:
return await self.connection.invite_user(
invite_event.target, invite_event.user_id
)
except MatrixRequestError as err:
content = json.loads(err.content)
if err.code == 403 and "is already in the room" in content["error"]:
_LOGGER.info(
f"{invite_event.user_id} is already in the room, ignoring."
)
@register_event(events.RoomDescription)
@ensure_room_id_and_send
async def _send_room_desciption(self, desc_event):
return await self.connection.set_room_topic(
desc_event.target, desc_event.description
)
@register_event(events.RoomImage)
@ensure_room_id_and_send
async def _send_room_image(self, image_event):
mxc_url, _ = await self._file_to_mxc_url(image_event.room_image)
return await image_event.respond(matrixevents.MatrixRoomAvatar(mxc_url))
@register_event(events.UserRole)
@ensure_room_id_and_send
async def _set_user_role(self, role_event):
role = role_event.role
room_id = role_event.target
if isinstance(role, str) and role.lower() in ["mod", "moderator"]:
power_level = 50
elif isinstance(role, str) and role.lower() in ["admin", "administrator"]:
power_level = 100
else:
try:
power_level = int(role)
except ValueError:
raise ValueError("Role must be one of 'mod', 'admin', or an integer")
power_levels = await self.connection.get_power_levels(room_id)
power_levels["users"][role_event.user_id] = power_level
return await role_event.respond(matrixevents.MatrixPowerLevels(power_levels))
@register_event(matrixevents.MatrixStateEvent, include_subclasses=True)
@ensure_room_id_and_send
async def _send_state_event(self, state_event):
_LOGGER.debug(f"Sending State Event {state_event}")
return await self.connection.send_state_event(
state_event.target,
state_event.key,
state_event.content,
state_key=state_event.state_key,
)
| 35.590264
| 86
| 0.600479
|
8db988903536fe9e4678efde7bf4fff369886133
| 912
|
py
|
Python
|
tests/arithmetic/test_HalfAdder.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
tests/arithmetic/test_HalfAdder.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
tests/arithmetic/test_HalfAdder.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
import bitwise as bw
class TestHalfAdder:
def test_HalfAdder(self):
input_1 = bw.wire.Wire()
input_2 = bw.wire.Wire()
carry_out = bw.wire.Wire()
sum_ = bw.wire.Wire()
a = bw.arithmetic.HalfAdder(input_1, input_2, carry_out, sum_)
input_1.value = 0
input_2.value = 0
assert (carry_out.value, sum_.value) == (0, 0)
input_1.value = 0
input_2.value = 1
assert (carry_out.value, sum_.value) == (0, 1)
input_1.value = 1
input_2.value = 0
assert (carry_out.value, sum_.value) == (0, 1)
input_1.value = 1
input_2.value = 1
assert (carry_out.value, sum_.value) == (1, 0)
print(a.__doc__)
print(a)
a(
a=0,
b=0,
carry_out=None,
sum=None
)
assert (carry_out.value, sum_.value) == (0, 0)
| 23.384615
| 70
| 0.520833
|
01809a8a4178e56ced1c5f5479613d597b6a57ff
| 821
|
py
|
Python
|
tests/app_multidb.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 1,992
|
2015-01-03T03:24:50.000Z
|
2022-03-27T15:28:30.000Z
|
tests/app_multidb.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 408
|
2015-01-12T09:44:02.000Z
|
2022-03-25T15:06:03.000Z
|
tests/app_multidb.py
|
mgorny/Flask-Migrate
|
dd389ba7a45b8a673c653346a15f8d08f4f2417f
|
[
"MIT"
] | 248
|
2015-01-06T20:02:25.000Z
|
2022-03-11T03:03:47.000Z
|
#!/bin/env python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app1.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_BINDS'] = {
"db1": "sqlite:///app2.db",
}
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
class Group(db.Model):
__bind_key__ = 'db1'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
migrate = Migrate(app, db)
@app.cli.command()
def add():
"""Add test users."""
db.session.add(User(name='test'))
db.session.add(Group(name='group'))
db.session.commit()
if __name__ == '__main__':
app.run()
| 20.525
| 59
| 0.679659
|
c688f08823d343c4776ef2ea14f179fb4f931c3a
| 1,863
|
py
|
Python
|
coverage/IN_CTS/0487-COVERAGE-brw-fs-cmod-propagation-172-283/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | null | null | null |
coverage/IN_CTS/0487-COVERAGE-brw-fs-cmod-propagation-172-283/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | 47
|
2021-03-11T07:42:51.000Z
|
2022-03-14T06:30:14.000Z
|
coverage/IN_CTS/0487-COVERAGE-brw-fs-cmod-propagation-172-283/generate_cts_test.py
|
asuonpaa/ShaderTests
|
6a3672040dcfa0d164d313224446496d1775a15e
|
[
"Apache-2.0"
] | 4
|
2021-03-09T13:37:19.000Z
|
2022-02-25T07:32:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a CTS test.
This module/script is copied next to a specific test in your repository of bugs
to generate an Amber script test suitable for adding to the CTS.
In particular, the Amber script test is suitable for use with |add_amber_tests_to_cts.py|.
"""
import sys
from pathlib import Path
from gfauto import tool, util
def main() -> None:
# Checklist:
# - check output_amber
# - check short_description
# - check comment_text
# - check copyright_year
# - check extra_commands
bug_dir = util.norm_path(Path(__file__).absolute()).parent
tool.glsl_shader_job_crash_to_amber_script_for_google_cts(
source_dir=bug_dir / "reduced_manual",
output_amber=bug_dir / "cov-condition-loop-index-bitwise-not.amber",
work_dir=bug_dir / "work",
# One sentence, 58 characters max., no period, no line breaks.
short_description="A fragment shader that covers specific BRW code paths",
comment_text="""The test passes because the shader always writes red.""",
copyright_year="2021",
extra_commands=tool.AMBER_COMMAND_EXPECT_RED,
is_coverage_gap=True,
)
if __name__ == "__main__":
main()
sys.exit(0)
| 32.12069
| 90
| 0.71927
|
de258b6801cf4b679b86d972e6673552c9c3fef1
| 6,038
|
py
|
Python
|
btc/mining.py
|
guolinxin/bitcoin
|
f76764a4c22327538c94a442a2de5fadb229a5a6
|
[
"MIT"
] | 3
|
2019-11-14T03:45:39.000Z
|
2021-08-02T14:24:27.000Z
|
btc/mining.py
|
guolinxin/bitcoin
|
f76764a4c22327538c94a442a2de5fadb229a5a6
|
[
"MIT"
] | 1
|
2020-01-04T00:50:52.000Z
|
2020-01-13T20:08:59.000Z
|
btc/mining.py
|
guolinxin/bitcoin
|
f76764a4c22327538c94a442a2de5fadb229a5a6
|
[
"MIT"
] | 4
|
2020-05-07T17:45:22.000Z
|
2021-12-03T05:30:52.000Z
|
####################################################
#
# Utility functions supporting mining
#
# MIT license
#
# Copyright (c) 2018 christianb93
# Permission is hereby granted, free of charge, to
# any person obtaining a copy of this software and
# associated documentation files (the "Software"),
# to deal in the Software without restriction,
# including without limitation the rights to use,
# copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY
# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
####################################################
from . import serialize
from . import script
from . import utils
from . import txn
from . import keys
from . import block
import binascii
import time
#
# Check whether a given block has a valid PoW
# Arguments:
# - the block
# - the global target
#
def checkPoW(block, bits):
#
# First determine the target in the block and verify that
# the hash does not exceed it
#
h = block.getBlockHeader().getBlockHash(byteorder="big")
h = int(h, 16)
#
# Do the same for the target
#
_bits = block.getBlockHeader().getBits()
coefficient = (float)(_bits & 0xFFFFFF)
size = _bits >> 24
target = int(coefficient * 2**(8*(size - 3)))
if h > target:
return False
#
# Next check that the block is in line with the global target
#
if bits != block.getBlockHeader().getBits():
return False
return True
#
# Check a given block. Parameters:
# - bits - current target in compact encoding (as an integer)
# - currentLastBlock - hash of current last block (as returned by the RPC call
# getblockchaininfo, i.e. in big endian)
# This will NOT check the PoW!
#
def checkBlock(block, currentLastBlock):
#
# First check that the previous block ID is equal to the current
# last block
#
if block.getBlockHeader().getPrevBlockId() != currentLastBlock:
return False
#
# Next check that the first transaction is a coinbase transaction
#
tx = block.getTx()
if 0 == len(tx):
return False
if tx[0].isCoinbase() == False:
return False
#
# Check that no other transactions are coinbase transactions
#
for i in range(len(tx)):
if i > 0:
if tx[i].isCoinbase():
return False
#
# Check Merkle root
#
merkleRoot = utils.blockMerkleRoot(block)
if merkleRoot != serialize.serializeString(block.getBlockHeader().getMerkleRoot(), 32):
return False
return True
#
# Create a coinbase transaction
# Parameter:
# - address - the address to be used for the transaction output
# - coinbase value in Satoshi, including feeds
# - the current height of the chain as integer
# - an extra nonce as integer
#
def createCoinbaseTxn(address, currentHeight, coinbasevalue, extraNonce = 1):
coinbase = txn.txn()
publicKeyHash = keys.ecAddressToPKH(address)
publicKeyHash = binascii.hexlify(publicKeyHash).decode('ascii')
#
# Create locking script
#
lockingScript = script.scriptPubKey(scriptType = script.SCRIPTTYPE_P2PKH,
pubKeyHash = publicKeyHash)
#
# and output
#
txout = txn.txout(value = int(coinbasevalue),
scriptPubKey = lockingScript)
coinbase.addOutput(txout)
#
# Next we do the input. The previous transaction ID is all zeros
#
prevTxId = "".join(["0"]*64)
#
# The signature script. To be compliant with BIP34, we put the height first
#
scriptSig = script.scriptSig()
scriptSig.pushData(serialize.serializeNumber(currentHeight + 1, 4))
#
# We then add the extra nonce
#
scriptSig.pushData(serialize.serializeNumber(currentHeight + 1, 4))
#
# and a signature ;-)
#
# scriptSig.pushData("cc")
txin = txn.txin(prevTxid = prevTxId, vout=0xFFFFFFFF, scriptSig = scriptSig)
coinbase.addInput(txin)
return coinbase
#
# This creates a new block with a preliminary nonce, i.e. this
# block will not yet pass the PoW test
# Parameter:
# - address - the address to be used for the transaction output
# - coinbase value in Satoshi, including feeds
# - the hash of the current last block
# - the current height of the chain as integer
# - bits in compact encoding
# - tx - a list of transactions to become part of the block, not including
# the coinbase transaction
# - mintime - the minimum time to use for the block as provided by getblocktemplate
def createNewBlock(address, currentLastBlockHash, currentHeight, coinbasevalue, bits, tx, mintime):
#
# First we build a block header
#
blockHeader = block.blockHeader(creationTime = mintime,
prevBlockId = currentLastBlockHash,
nonce = 1,
bits = bits)
#
# Next we do the block
#
_block = block.block(blockHeader = blockHeader)
#
# add the coinbase transaction, followed by all other
# transactions
#
_block.addTxn(createCoinbaseTxn(address, currentHeight, coinbasevalue))
for _tx in tx:
_block.addTxn(_tx)
assert(False == _tx.isCoinbase())
_block.updateMerkleRoot()
return _block
| 31.123711
| 99
| 0.6527
|
d85f8fab04f8af4092421ac1e2b249f8cdcfaba1
| 3,233
|
py
|
Python
|
vision/google/cloud/vision/helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2021-01-04T11:40:17.000Z
|
2021-01-04T11:40:17.000Z
|
vision/google/cloud/vision/helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
vision/google/cloud/vision/helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import io
from google.gax.utils import protobuf
class VisionHelpers(object):
"""A set of convenience methods to make the Vision GAPIC easier to use.
This class should be considered abstract; it is used as a superclass
in a multiple-inheritance construction alongside the applicable GAPIC.
See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.
"""
def annotate_image(self, request, options=None):
"""Run image detection and annotation for an image.
Example:
>>> from google.cloud.vision_v1 import ImageAnnotatorClient
>>> client = ImageAnnotatorClient()
>>> request = {
... 'image': {
... 'source': {'image_uri': 'https://foo.com/image.jpg'},
... },
... }
>>> response = client.annotate_image(request)
Args:
request (:class:`~.vision_v1.types.AnnotateImageRequest`)
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries, etc.
Returns:
:class:`~.vision_v1.types.AnnotateImageResponse` The API response.
"""
# If the image is a file handler, set the content.
image = protobuf.get(request, 'image')
if hasattr(image, 'read'):
img_bytes = image.read()
protobuf.set(request, 'image', {})
protobuf.set(request, 'image.content', img_bytes)
image = protobuf.get(request, 'image')
# If a filename is provided, read the file.
filename = protobuf.get(image, 'source.filename', default=None)
if filename:
with io.open(filename, 'rb') as img_file:
protobuf.set(request, 'image.content', img_file.read())
protobuf.set(request, 'image.source', None)
# This method allows features not to be specified, and you get all
# of them.
protobuf.setdefault(request, 'features', self._get_all_features())
r = self.batch_annotate_images([request], options=options)
return r.responses[0]
def _get_all_features(self):
"""Return a list of all features.
Returns:
list: A list of all available features.
"""
answer = []
for key, value in self.enums.Feature.Type.__dict__.items():
if key.upper() != key:
continue
if not isinstance(value, int) or value == 0:
continue
answer.append({'type': value})
return answer
| 38.488095
| 78
| 0.622951
|
f2019625c49ac4c3ef14d273fff40748938634a6
| 2,147
|
py
|
Python
|
train_heuristic.py
|
ayusher/othello-rl
|
38fb2f015199d15dc023b123cae1b16076f14ad3
|
[
"MIT"
] | null | null | null |
train_heuristic.py
|
ayusher/othello-rl
|
38fb2f015199d15dc023b123cae1b16076f14ad3
|
[
"MIT"
] | null | null | null |
train_heuristic.py
|
ayusher/othello-rl
|
38fb2f015199d15dc023b123cae1b16076f14ad3
|
[
"MIT"
] | null | null | null |
#import tensorflow as tf
import os
import sys
import pickle
import binascii
#import neat
import numpy as np
import random
import threading
from multiprocessing import Process, Lock
import multiprocessing
import time
from agents import MCTS_agent, random_agent, deep_agent, alphabeta_agent, deep_alphabeta_agent, self_play_agent
from collections import Counter
import othello
from othello import Othello
def play_game(l, agents):
board = Othello()
#fname = "logs/"+binascii.b2a_hex(os.urandom(15)).decode('utf-8')+".txt"
fname=""
board.print()
turn = 1
while True:
agent = agents[turn-1]
#print(type(agent).__name__, set(board.legal_moves(turn)))
agent.search(board)
board = agent.move(board)
board.print()
if board.is_terminal():
break
if turn==1: turn=2
elif turn==2: turn=1
winner = board.get_winner()
if winner == None: l.append("tie")
else: l.append(type(agents[board.get_winner()-1]).__name__+str(winner))
if __name__=="__main__":
'''
jobs = []
manager = multiprocessing.Manager()
return_list = manager.list()
for _ in range(multiprocessing.cpu_count()):
jobs.append(Process(target=play_game, args=(return_list, [minimax_agent(), MCTS_agent()], )))
for j in jobs: j.start()
for j in jobs: j.join()
d = dict(Counter(return_list))
total = sum(list(d.values()))
for k in d:
d[k]=d[k]/total
print("{} win rate: {:.2f}%".format(k, 100*d[k]))
print(d)
'''
if len(sys.argv)>1: ti = float(sys.argv[1])
else: ti = 10
outcome = []
#players = [MCTS_agent, random_agent, deep_agent, alphabeta_agent, deep_alphabeta_agent, self_play_agent]
#for p in range(len(players)):
# print("[{}] {}".format(p, players[p]))
#p1 = int(input("black "))
#p2 = int(input("white "))
p1, p2 = deep_agent(ti), deep_agent(ti)
for _ in range(100):
play_game(outcome, [p1, p2])
play_game(outcome, [p2, p1])
print(outcome)
| 24.678161
| 112
| 0.605962
|
a0396ad2253edd3d4ae8f60106cba24056e33bdd
| 403
|
py
|
Python
|
preprocess.py
|
MasiCal354/fakenewsapi
|
b8eff1cd263a1595a13ad12b0173c19804890900
|
[
"Apache-2.0"
] | null | null | null |
preprocess.py
|
MasiCal354/fakenewsapi
|
b8eff1cd263a1595a13ad12b0173c19804890900
|
[
"Apache-2.0"
] | null | null | null |
preprocess.py
|
MasiCal354/fakenewsapi
|
b8eff1cd263a1595a13ad12b0173c19804890900
|
[
"Apache-2.0"
] | null | null | null |
from models import tokenizer
from utils import denoise_text
from keras.preprocessing import sequence
import numpy as np
maxlen = 300
def fake_news_preprocess(payload):
text = payload.text + ' ' + payload.title
text = denoise_text(text)
tokenized_text = tokenizer.texts_to_sequences(np.array([text]))
vector = sequence.pad_sequences(tokenized_text, maxlen=maxlen)
return vector
| 28.785714
| 67
| 0.759305
|
8d5cd2c6bdf879b77bd39d65959ca730ef658143
| 860
|
py
|
Python
|
src/settings.py
|
sbadecker/ask_me_anything
|
b030947bb02d80b09112ed37c5529a5b2d87227e
|
[
"MIT"
] | 2
|
2021-10-09T19:59:31.000Z
|
2022-01-18T05:46:30.000Z
|
src/settings.py
|
sbadecker/ask_me_anything
|
b030947bb02d80b09112ed37c5529a5b2d87227e
|
[
"MIT"
] | null | null | null |
src/settings.py
|
sbadecker/ask_me_anything
|
b030947bb02d80b09112ed37c5529a5b2d87227e
|
[
"MIT"
] | null | null | null |
from pydantic import Field
from src.secret_manager import SecretSettings
class ProdSettings(SecretSettings):
SECRET_HEALTH_CHECK: str = Field(..., from_secrets=True)
READER_MODEL_NAME: str = Field(..., from_secrets=True)
DOCUMENT_EMBEDDINGS_BUCKET_NAME: str = Field(..., from_secrets=True)
DOCUMENT_EMBEDDINGS_FILE_NAME: str = Field(..., from_secrets=True)
DPR_PASSAGE_ENCODER_NAME: str = Field(..., from_secrets=True)
DPR_QUERY_ENCODER_NAME: str = Field(..., from_secrets=True)
DB_NAME: str = Field(..., from_secrets=True)
DB_HOST: str = Field(..., from_secrets=True)
DB_USER: str = Field(..., from_secrets=True)
DB_PASSWORD: str = Field(..., from_secrets=True)
CLOUD_SQL_CONNECTION_NAME: str = Field(..., from_secrets=True)
USE_UNIX_SOCKET: bool = Field(..., from_secrets=False)
settings = ProdSettings()
| 39.090909
| 72
| 0.719767
|
17732649227d9ea9c74ac5309860c1bb28586e86
| 3,017
|
py
|
Python
|
pyqtgraph/exporters/CSVExporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 2,762
|
2015-01-02T14:34:10.000Z
|
2022-03-30T14:06:07.000Z
|
pyqtgraph/exporters/CSVExporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 1,901
|
2015-01-12T03:20:30.000Z
|
2022-03-31T16:33:36.000Z
|
pyqtgraph/exporters/CSVExporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 1,038
|
2015-01-01T04:05:49.000Z
|
2022-03-31T11:57:51.000Z
|
# -*- coding: utf-8 -*-
from ..Qt import QtCore
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
translate = QtCore.QCoreApplication.translate
__all__ = ['CSVExporter']
class CSVExporter(Exporter):
Name = "CSV from plot data"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
self.params = Parameter(name='params', type='group', children=[
{'name': 'separator', 'title': translate("Exporter", 'separator'), 'type': 'list', 'value': 'comma', 'limits': ['comma', 'tab']},
{'name': 'precision', 'title': translate("Exporter", 'precision'), 'type': 'int', 'value': 10, 'limits': [0, None]},
{'name': 'columnMode', 'title': translate("Exporter", 'columnMode'), 'type': 'list', 'limits': ['(x,y) per plot', '(x,y,y,y) for all plots']}
])
def parameters(self):
return self.params
def export(self, fileName=None):
if not isinstance(self.item, PlotItem):
raise Exception("Must have a PlotItem selected for CSV export.")
if fileName is None:
self.fileSaveDialog(filter=["*.csv", "*.tsv"])
return
data = []
header = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
for i, c in enumerate(self.item.curves):
cd = c.getData()
if cd[0] is None:
continue
data.append(cd)
if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:
name = c.name().replace('"', '""') + '_'
xName, yName = '"'+name+'x"', '"'+name+'y"'
else:
xName = 'x%04d' % i
yName = 'y%04d' % i
if appendAllX or i == 0:
header.extend([xName, yName])
else:
header.extend([yName])
if self.params['separator'] == 'comma':
sep = ','
else:
sep = '\t'
with open(fileName, 'w') as fd:
fd.write(sep.join(map(str, header)) + '\n')
i = 0
numFormat = '%%0.%dg' % self.params['precision']
numRows = max([len(d[0]) for d in data])
for i in range(numRows):
for j, d in enumerate(data):
# write x value if this is the first column, or if we want
# x for all rows
if appendAllX or j == 0:
if d is not None and i < len(d[0]):
fd.write(numFormat % d[0][i] + sep)
else:
fd.write(' %s' % sep)
# write y value
if d is not None and i < len(d[1]):
fd.write(numFormat % d[1][i] + sep)
else:
fd.write(' %s' % sep)
fd.write('\n')
CSVExporter.register()
| 35.494118
| 153
| 0.472655
|
7602e935ed1a9648be13e325b1689237e6b378d2
| 6,165
|
py
|
Python
|
previous code/shishi_shuiqing-1.py
|
hyywestwood/Spider-of-Water-data
|
f9be30ac787251d221e3e85a24b9a8a84a12e6a8
|
[
"MIT"
] | 2
|
2021-03-26T05:18:44.000Z
|
2021-03-31T14:14:11.000Z
|
previous code/shishi_shuiqing-1.py
|
hyywestwood/Spider-of-Water-data
|
f9be30ac787251d221e3e85a24b9a8a84a12e6a8
|
[
"MIT"
] | 1
|
2021-08-31T08:44:49.000Z
|
2021-08-31T08:44:49.000Z
|
previous code/shishi_shuiqing-1.py
|
hyywestwood/Spider-of-Water-data
|
f9be30ac787251d221e3e85a24b9a8a84a12e6a8
|
[
"MIT"
] | 1
|
2021-03-26T05:18:46.000Z
|
2021-03-26T05:18:46.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/4/24 19:33
# @Author : hyy
# @Email : 1554148540@qq.com
# @File : dongtai_ceshi.py
# @Software: PyCharm
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
import os
import time
import random
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def email_send(text, time):
sender = ' 3140105713@zju.edu.cn'
receivers = ['1554148540@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
mail_host = "smtp.zju.edu.cn" # 设置服务器
mail_user = "3140105713@zju.edu.cn" # 用户名
mail_pass = "5896westwood" # 口令
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message = MIMEText(text, 'plain', 'utf-8')
message['From'] = Header("江河数据", 'utf-8') # 发送者
message['To'] = Header("hyy", 'utf-8') # 接收者
subject = '大江大河水情获取情况' + time
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("邮件发送成功")
except smtplib.SMTPException:
print("Error: 无法发送邮件")
def get_data(url):
retry_count = 3
proxy = get_proxy()
# 设置代理服务器
try:
IP, PORT = proxy.decode('utf-8').split(':')
except Exception:
print('可用代理为空,等待5分钟')
time.sleep(60*5)
return None
profile = webdriver.FirefoxOptions()
# profile.add_argument('-headless') # 设置无头模式
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.http', IP) # IP为你的代理服务器地址:如‘127.0.0.0’,字符串类型
profile.set_preference('network.proxy.http_port', int(PORT)) # PORT为代理服务器端口号:如,9999,整数类型
while retry_count > 0:
try:
driver = webdriver.Firefox(options=profile)
time.sleep(15)
driver.set_page_load_timeout(75) #设置等待时间,保险起见,设为75秒
driver.set_script_timeout(75)
driver.get(url)
element = WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.CLASS_NAME, 'row')))
time.sleep(random.uniform(1, 20)) #在当前页面随机停留一段时间
html = driver.page_source
bf = BeautifulSoup(html, 'html.parser')
data = bf.find_all('tr')
driver.close()
return data
except Exception:
print('错误发生,重新尝试获取,剩余次数{}'.format(retry_count-1))
retry_count -= 1
driver.close()
delete_proxy(proxy)
print('代理节点:{}:{}不可用,已删除'.format(IP,PORT))
return None
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").content
def delete_proxy(proxy):
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
def trans(a):
data = []
for i in range(1,len(a)):
d_str = a[i].text
d_str = d_str.replace('↑', '')
d_str = d_str.replace('↓', '')
d_str = d_str.replace('—', '')
d_str = d_str.replace('\xa0', '')
d_str = d_str.rstrip()[1:len(d_str)]
data.append(d_str.split('\n'))
return data
def write_data(a, flag, a_ex):
# 对数据a,a_ex进行处理
data = trans(a)
data1 = trans(a_ex)
dizhi = [x[3] for x in data1] #记录已有地址和时间
shijian = [x[4] for x in data1]
# 使用列表解析读取二维列表的单独列 [x[0] for x in data]
# 新爬取结果与旧数据对比,将更新的结果写入文件
for hang in data:
print(hang)
if dizhi.count(hang[3]) != 0: #地址存在
index = dizhi.index(hang[3]) #标记索引
if hang[4] != shijian[index]:
with open(path1 + '\\大江大河实时水情\\' + hang[0] + '\\' + '{}-{}-{}.txt'.format(hang[1], hang[2], hang[3]),
'a',encoding='utf-8') as f:
f.write('{}\t{}\t{}\t{} \n'.format(time.strftime("%Y-", time.localtime()) + hang[4],
hang[5], hang[6],hang[7]))
else:
# 当地址不存在时,创建新文件
with open(path1 + '\\大江大河实时水情\\' + hang[0] + '\\' + '{}-{}-{}.txt'.format(hang[1], hang[2], hang[3]),
'w', encoding='utf-8') as f:
f.write('{}\t{}\t{}\t{} \n'.format('时间', '水位(米)', '流量(m^3/s)', '警戒水位(米)'))
a_ex = a
flag += 1
if flag % 6 == 1:
run_stage = '爬取时间:{}'.format(time.strftime("%Y-", time.localtime()) + data[1][4])
email_send(run_stage, data[1][4])
# pass
now = datetime.datetime.now()
sleep_time = 3600*6 + random.uniform(1, 3600*4)
# sleep_time = random.uniform(1, 8)
end = now + datetime.timedelta(days=sleep_time / 86400)
print('开始休眠,将休眠至:{}'.format(end))
time.sleep(sleep_time)
return flag, a_ex
if __name__ == '__main__':
path1 = os.path.abspath('.') # 获取当前脚本所在的路径
folder = os.path.exists(path1 + '\\大江大河实时水情')
if not folder:
os.makedirs(path1 + '\\大江大河实时水情')
print('爬虫开始运行')
url = 'http://ditu.92cha.com/shuiqing.php?w=hd'
a_ex = None
while a_ex == None:
a_ex = get_data(url)
# a = get_data(url)
# for i in range(1,len(a)):
# d_str = a[i].text
# d_str = d_str.replace('↑', '')
# d_str = d_str.replace('↓', '')
# d_str = d_str.replace('—', '')
# d_str = d_str.replace('\xa0', '')
# d_str = d_str.rstrip()[1:len(d_str)]
# data = d_str.split('\n')
# folder = os.path.exists(path1 + '\\大江大河实时水情\\'+ data[0])
# if not folder:
# os.makedirs(path1 + '\\大江大河实时水情\\'+ data[0])
# with open(path1 + '\\大江大河实时水情\\'+ data[0] + '\\' + '{}-{}-{}.txt'.format(data[1],data[2],data[3]), 'w', encoding='utf-8') as f:
# f.write('{}\t{}\t{}\t{} \n'.format('时间', '水位(米)', '流量(m^3/s)', '警戒水位(米)'))
# f.write('{}\t{}\t{}\t{} \n'.format(data[4], data[5], data[6], data[7]))
# print('初始数据爬取完成')
flag = 0
while True:
a = None
while a == None:
a = get_data(url)
flag, a_ex = write_data(a, flag, a_ex)
| 34.441341
| 137
| 0.560584
|
806a79dbf5fea18d2e6b8f200a63b6e529691d02
| 2,606
|
py
|
Python
|
examples/inference/standard_bfgs_demo.py
|
nticea/superhawkes
|
cbaec7c4aae7ced71ec68f6d69cc516bc19ace5a
|
[
"MIT"
] | 221
|
2015-02-26T04:25:34.000Z
|
2022-03-27T13:06:10.000Z
|
examples/inference/standard_bfgs_demo.py
|
nticea/superhawkes
|
cbaec7c4aae7ced71ec68f6d69cc516bc19ace5a
|
[
"MIT"
] | 20
|
2015-08-04T01:47:19.000Z
|
2021-08-08T00:22:44.000Z
|
examples/inference/standard_bfgs_demo.py
|
nticea/superhawkes
|
cbaec7c4aae7ced71ec68f6d69cc516bc19ace5a
|
[
"MIT"
] | 86
|
2015-02-22T23:36:32.000Z
|
2021-11-13T20:56:07.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab, DiscreteTimeStandardHawkesModel
from pyhawkes.utils.basis import IdentityBasis
def sample_from_network_hawkes(K, T, dt, dt_max, B):
# Create a true model
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=dict(p=0.1))
# Plot the true network
plt.ion()
true_model.plot_network()
# Sample from the true model
S,R = true_model.generate(T=T)
# Return the spike count matrix
return S, true_model
def demo(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
K = 5 # Number of nodes
T = 10000 # Number of time bins to simulate
dt = 1 # Time bin size
dt_max = 50 # Impulse response length
B = 1 # Number of basis functions
# Sample from a sparse network Hawkes model
S, true_model = sample_from_network_hawkes(K, T, dt, dt_max, B)
# Make a new model for inference
# test_basis = IdentityBasis(dt, dt_max, allow_instantaneous=False)
test_basis = true_model.basis
test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max+dt,
beta=1.0,
basis=test_basis,
allow_self_connections=True)
test_model.add_data(S)
# DEBUG: Initialize with the true parameters of the network Hawkes model
# test_model.initialize_with_gibbs_model(true_model)
test_model.fit_with_bfgs()
print("lambda0 true: ", true_model.bias_model.lambda0)
print("lambda0 test ", test_model.bias)
print("")
print("W true: ", true_model.weight_model.A * true_model.weight_model.W)
print("W test: ", test_model.W)
print("")
print("ll true: ", true_model.log_likelihood())
print("ll test: ", test_model.log_likelihood())
# test_model.plot_network()
# Plot the rates
plt.figure()
for k in range(3):
plt.subplot(3,1,k+1)
plt.plot(np.arange(T) * dt, true_model.compute_rate(proc=k), '-b')
plt.plot(np.arange(T) * dt, test_model.compute_rate(ks=k), '-r')
lim = plt.ylim()
plt.ylim(0, 1.25*lim[1])
plt.ioff()
plt.show()
demo(11223344)
| 31.780488
| 103
| 0.613584
|
006b8bf67fae2bd521f0b0c7c4eb7f2c082f384a
| 1,555
|
py
|
Python
|
wsgi_basic/service.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
wsgi_basic/service.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
wsgi_basic/service.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
import functools
import sys
from oslo_config import cfg
from oslo_log import log
from paste import deploy
import routes
from wsgi_basic import controllers
from wsgi_basic import routers
from wsgi_basic.common import wsgi
from wsgi_basic.token import routers as token_routers
from wsgi_basic.user import routers as user_routers
CONF = cfg.CONF
LOG = log.getLogger(__name__)
def loadapp(conf, name):
# NOTE(blk-u): Save the application being loaded in the controllers module.
# This is similar to how public_app_factory() and v3_app_factory()
# register the version with the controllers module.
controllers.latest_app = deploy.loadapp(conf, name=name)
return controllers.latest_app
def fail_gracefully(f):
"""Logs exceptions and aborts."""
@functools.wraps(f)
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
LOG.debug(e, exc_info=True)
# exception message is printed to all logs
LOG.critical(e)
sys.exit(1)
return wrapper
@fail_gracefully
def public_app_factory(global_conf, **local_conf):
return wsgi.ComposingRouter(routes.Mapper(),
[token_routers.Router(),
user_routers.Router(),
routers.Versions('public')])
@fail_gracefully
def public_version_app_factory(global_conf, **local_conf):
return wsgi.ComposingRouter(routes.Mapper(),
[routers.Versions('public')])
| 26.810345
| 79
| 0.663023
|
3697d8af12d7877985f4330d902365e7784fe721
| 1,848
|
py
|
Python
|
reports/tests/test_models.py
|
Igorishe/Report_Traker
|
886da5d5dd40247779a76611cf6b66cb95963ad7
|
[
"MIT"
] | null | null | null |
reports/tests/test_models.py
|
Igorishe/Report_Traker
|
886da5d5dd40247779a76611cf6b66cb95963ad7
|
[
"MIT"
] | null | null | null |
reports/tests/test_models.py
|
Igorishe/Report_Traker
|
886da5d5dd40247779a76611cf6b66cb95963ad7
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from reports.models import MobinetReport, MoneyBack, Report
class ReportModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
Report.objects.create(
text='Текст больше 15 знаков',
author=123144,
author_name="author_username",
)
cls.task = Report.objects.get(text='Текст больше 15 знаков')
def test_str_field(self):
"""Отображение поля __str__ объекта task"""
task = ReportModelTest.task
expected_object_name = task.text[:12]
self.assertEquals(expected_object_name, str(task))
class MobinetReportModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
MobinetReport.objects.create(
text='Текст больше 15 знаков',
author=123144,
author_name="author_username",
)
cls.task = MobinetReport.objects.get(text='Текст больше 15 знаков')
def test_str_field(self):
"""Отображение поля __str__ объекта task"""
task = MobinetReportModelTest.task
expected_object_name = task.text[:12]
self.assertEquals(expected_object_name, str(task))
class MoneyBackModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
MoneyBack.objects.create(
text='Текст больше 15 знаков',
author=123144,
author_name="author_username",
value="123.00",
link="128542",
)
cls.task = MoneyBack.objects.get(text='Текст больше 15 знаков')
def test_str_field(self):
"""Отображение поля __str__ объекта task"""
task = MoneyBackModelTest.task
expected_object_name = task.text[:12]
self.assertEquals(expected_object_name, str(task))
| 30.8
| 75
| 0.636905
|
f513507fe684849e95f9dcc82113402d9fe8db59
| 79,419
|
py
|
Python
|
Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
|
salgattpx/content
|
d225c92df633b1cfc6abb7dbead4d39dec905f72
|
[
"MIT"
] | null | null | null |
Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
|
salgattpx/content
|
d225c92df633b1cfc6abb7dbead4d39dec905f72
|
[
"MIT"
] | null | null | null |
Packs/Base/Scripts/CommonServerPython/CommonServerPython_test.py
|
salgattpx/content
|
d225c92df633b1cfc6abb7dbead4d39dec905f72
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import demistomock as demisto
import copy
import json
import re
import os
import sys
import requests
from pytest import raises, mark
import pytest
from CommonServerPython import xml2json, json2xml, entryTypes, formats, tableToMarkdown, underscoreToCamelCase, \
flattenCell, date_to_timestamp, datetime, camelize, pascalToSpace, argToList, \
remove_nulls_from_dictionary, is_error, get_error, hash_djb2, fileResult, is_ip_valid, get_demisto_version, \
IntegrationLogger, parse_date_string, IS_PY3, DebugLogger, b64_encode, parse_date_range, return_outputs, \
argToBoolean, ipv4Regex, ipv4cidrRegex, ipv6cidrRegex, ipv6Regex, batch, FeedIndicatorType, \
encode_string_results, safe_load_json, remove_empty_elements, aws_table_to_markdown, is_demisto_version_ge, \
appendContext, auto_detect_indicator_type, handle_proxy
try:
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO # noqa
INFO = {'b': 1,
'a': {
'safd': 3,
'b': [
{'c': {'d': 432}, 'd': 2},
{'c': {'f': 1}},
{'b': 1234},
{'c': {'d': 4567}},
{'c': {'d': 11}},
{'c': {'d': u'asdf'}}],
'c': {'d': 10},
}
}
@pytest.fixture()
def clear_version_cache():
"""
Clear the version cache at end of the test (in case we mocked demisto.serverVersion)
"""
yield
if hasattr(get_demisto_version, '_version'):
delattr(get_demisto_version, '_version')
def test_xml():
import json
xml = b"<work><employee><id>100</id><name>foo</name></employee><employee><id>200</id><name>goo</name>" \
b"</employee></work>"
jsonExpected = '{"work": {"employee": [{"id": "100", "name": "foo"}, {"id": "200", "name": "goo"}]}}'
jsonActual = xml2json(xml)
assert jsonActual == jsonExpected, "expected\n" + jsonExpected + "\n to equal \n" + jsonActual
jsonDict = json.loads(jsonActual)
assert jsonDict['work']['employee'][0]['id'] == "100", 'id of first employee must be 100'
assert jsonDict['work']['employee'][1]['name'] == "goo", 'name of second employee must be goo'
xmlActual = json2xml(jsonActual)
assert xmlActual == xml, "expected:\n{}\nto equal:\n{}".format(xml, xmlActual)
def toEntry(table):
return {
'Type': entryTypes['note'],
'Contents': table,
'ContentsFormat': formats['table'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': table
}
DATA = [
{
'header_1': 'a1',
'header_2': 'b1',
'header_3': 'c1'
},
{
'header_1': 'a2',
'header_2': 'b2',
'header_3': 'c2'
},
{
'header_1': 'a3',
'header_2': 'b3',
'header_3': 'c3'
},
]
def test_tbl_to_md_only_data():
# sanity
table = tableToMarkdown('tableToMarkdown test', DATA)
expected_table = '''### tableToMarkdown test
|header_1|header_2|header_3|
|---|---|---|
| a1 | b1 | c1 |
| a2 | b2 | c2 |
| a3 | b3 | c3 |
'''
assert table == expected_table
def test_tbl_to_md_header_transform_underscoreToCamelCase():
# header transform
table = tableToMarkdown('tableToMarkdown test with headerTransform', DATA,
headerTransform=underscoreToCamelCase)
expected_table = '''### tableToMarkdown test with headerTransform
|Header1|Header2|Header3|
|---|---|---|
| a1 | b1 | c1 |
| a2 | b2 | c2 |
| a3 | b3 | c3 |
'''
assert table == expected_table
def test_tbl_to_md_multiline():
# escaping characters: multiline + md-chars
data = copy.deepcopy(DATA)
for i, d in enumerate(data):
d['header_2'] = 'b%d.1\nb%d.2' % (i + 1, i + 1,)
d['header_3'] = 'c%d|1' % (i + 1,)
table = tableToMarkdown('tableToMarkdown test with multiline', data)
expected_table = '''### tableToMarkdown test with multiline
|header_1|header_2|header_3|
|---|---|---|
| a1 | b1.1<br>b1.2 | c1\|1 |
| a2 | b2.1<br>b2.2 | c2\|1 |
| a3 | b3.1<br>b3.2 | c3\|1 |
'''
assert table == expected_table
def test_tbl_to_md_url():
# url + empty data
data = copy.deepcopy(DATA)
for i, d in enumerate(data):
d['header_3'] = '[url](https:\\demisto.com)'
d['header_2'] = None
table_url_missing_info = tableToMarkdown('tableToMarkdown test with url and missing info', data)
expected_table_url_missing_info = '''### tableToMarkdown test with url and missing info
|header_1|header_2|header_3|
|---|---|---|
| a1 | | [url](https:\\demisto.com) |
| a2 | | [url](https:\\demisto.com) |
| a3 | | [url](https:\\demisto.com) |
'''
assert table_url_missing_info == expected_table_url_missing_info
def test_tbl_to_md_single_column():
# single column table
table_single_column = tableToMarkdown('tableToMarkdown test with single column', DATA, ['header_1'])
expected_table_single_column = '''### tableToMarkdown test with single column
|header_1|
|---|
| a1 |
| a2 |
| a3 |
'''
assert table_single_column == expected_table_single_column
def test_is_ip_valid():
valid_ip_v6 = "FE80:0000:0000:0000:0202:B3FF:FE1E:8329"
valid_ip_v6_b = "FE80::0202:B3FF:FE1E:8329"
invalid_ip_v6 = "KKKK:0000:0000:0000:0202:B3FF:FE1E:8329"
valid_ip_v4 = "10.10.10.10"
invalid_ip_v4 = "10.10.10.9999"
invalid_not_ip_with_ip_structure = "1.1.1.1.1.1.1.1.1.1.1.1.1.1.1"
not_ip = "Demisto"
assert not is_ip_valid(valid_ip_v6)
assert is_ip_valid(valid_ip_v6, True)
assert is_ip_valid(valid_ip_v6_b, True)
assert not is_ip_valid(invalid_ip_v6, True)
assert not is_ip_valid(not_ip, True)
assert is_ip_valid(valid_ip_v4)
assert not is_ip_valid(invalid_ip_v4)
assert not is_ip_valid(invalid_not_ip_with_ip_structure)
def test_tbl_to_md_list_values():
# list values
data = copy.deepcopy(DATA)
for i, d in enumerate(data):
d['header_3'] = [i + 1, 'second item']
d['header_2'] = 'hi'
table_list_field = tableToMarkdown('tableToMarkdown test with list field', data)
expected_table_list_field = '''### tableToMarkdown test with list field
|header_1|header_2|header_3|
|---|---|---|
| a1 | hi | 1,<br>second item |
| a2 | hi | 2,<br>second item |
| a3 | hi | 3,<br>second item |
'''
assert table_list_field == expected_table_list_field
def test_tbl_to_md_empty_fields():
# all fields are empty
data = [
{
'a': None,
'b': None,
'c': None,
} for _ in range(3)
]
table_all_none = tableToMarkdown('tableToMarkdown test with all none fields', data)
expected_table_all_none = '''### tableToMarkdown test with all none fields
|a|b|c|
|---|---|---|
| | | |
| | | |
| | | |
'''
assert table_all_none == expected_table_all_none
# all fields are empty - removed
table_all_none2 = tableToMarkdown('tableToMarkdown test with all none fields2', data, removeNull=True)
expected_table_all_none2 = '''### tableToMarkdown test with all none fields2
**No entries.**
'''
assert table_all_none2 == expected_table_all_none2
def test_tbl_to_md_header_not_on_first_object():
# header not on first object
data = copy.deepcopy(DATA)
data[1]['extra_header'] = 'sample'
table_extra_header = tableToMarkdown('tableToMarkdown test with extra header', data,
headers=['header_1', 'header_2', 'extra_header'])
expected_table_extra_header = '''### tableToMarkdown test with extra header
|header_1|header_2|extra_header|
|---|---|---|
| a1 | b1 | |
| a2 | b2 | sample |
| a3 | b3 | |
'''
assert table_extra_header == expected_table_extra_header
def test_tbl_to_md_no_header():
# no header
table_no_headers = tableToMarkdown('tableToMarkdown test with no headers', DATA,
headers=['no', 'header', 'found'], removeNull=True)
expected_table_no_headers = '''### tableToMarkdown test with no headers
**No entries.**
'''
assert table_no_headers == expected_table_no_headers
def test_tbl_to_md_dict_value():
# dict value
data = copy.deepcopy(DATA)
data[1]['extra_header'] = {'sample': 'qwerty', 'sample2': 'asdf'}
table_dict_record = tableToMarkdown('tableToMarkdown test with dict record', data,
headers=['header_1', 'header_2', 'extra_header'])
expected_dict_record = '''### tableToMarkdown test with dict record
|header_1|header_2|extra_header|
|---|---|---|
| a1 | b1 | |
| a2 | b2 | sample: qwerty<br>sample2: asdf |
| a3 | b3 | |
'''
assert table_dict_record == expected_dict_record
def test_tbl_to_md_string_header():
# string header (instead of list)
table_string_header = tableToMarkdown('tableToMarkdown string header', DATA, 'header_1')
expected_string_header_tbl = '''### tableToMarkdown string header
|header_1|
|---|
| a1 |
| a2 |
| a3 |
'''
assert table_string_header == expected_string_header_tbl
def test_tbl_to_md_list_of_strings_instead_of_dict():
# list of string values instead of list of dict objects
table_string_array = tableToMarkdown('tableToMarkdown test with string array', ['foo', 'bar', 'katz'], ['header_1'])
expected_string_array_tbl = '''### tableToMarkdown test with string array
|header_1|
|---|
| foo |
| bar |
| katz |
'''
assert table_string_array == expected_string_array_tbl
def test_tbl_to_md_list_of_strings_instead_of_dict_and_string_header():
# combination: string header + string values list
table_string_array_string_header = tableToMarkdown('tableToMarkdown test with string array and string header',
['foo', 'bar', 'katz'], 'header_1')
expected_string_array_string_header_tbl = '''### tableToMarkdown test with string array and string header
|header_1|
|---|
| foo |
| bar |
| katz |
'''
assert table_string_array_string_header == expected_string_array_string_header_tbl
def test_tbl_to_md_dict_with_special_character():
data = {
'header_1': u'foo',
'header_2': [u'\xe2.rtf']
}
table_with_character = tableToMarkdown('tableToMarkdown test with special character', data)
expected_string_with_special_character = '''### tableToMarkdown test with special character
|header_1|header_2|
|---|---|
| foo | â.rtf |
'''
assert table_with_character == expected_string_with_special_character
def test_tbl_to_md_header_with_special_character():
data = {
'header_1': u'foo'
}
table_with_character = tableToMarkdown('tableToMarkdown test with special character Ù', data)
expected_string_with_special_character = '''### tableToMarkdown test with special character Ù
|header_1|
|---|
| foo |
'''
assert table_with_character == expected_string_with_special_character
def test_flatten_cell():
# sanity
utf8_to_flatten = b'abcdefghijklmnopqrstuvwxyz1234567890!'.decode('utf8')
flatten_text = flattenCell(utf8_to_flatten)
expected_string = 'abcdefghijklmnopqrstuvwxyz1234567890!'
assert flatten_text == expected_string
# list of uft8 and string to flatten
str_a = b'abcdefghijklmnopqrstuvwxyz1234567890!'
utf8_b = str_a.decode('utf8')
list_to_flatten = [str_a, utf8_b]
flatten_text2 = flattenCell(list_to_flatten)
expected_flatten_string = 'abcdefghijklmnopqrstuvwxyz1234567890!,\nabcdefghijklmnopqrstuvwxyz1234567890!'
assert flatten_text2 == expected_flatten_string
# special character test
special_char = u'会'
list_of_special = [special_char, special_char]
flattenCell(list_of_special)
flattenCell(special_char)
# dictionary test
dict_to_flatten = {'first': u'会'}
expected_flatten_dict = u'{\n "first": "\u4f1a"\n}'
assert flattenCell(dict_to_flatten) == expected_flatten_dict
def test_hash_djb2():
assert hash_djb2("test") == 2090756197, "Invalid value of hash_djb2"
def test_camelize():
non_camalized = [{'chookity_bop': 'asdasd'}, {'ab_c': 'd e', 'fgh_ijk': 'lm', 'nop': 'qr_st'}]
expected_output = [{'ChookityBop': 'asdasd'}, {'AbC': 'd e', 'Nop': 'qr_st', 'FghIjk': 'lm'}]
assert camelize(non_camalized, '_') == expected_output
non_camalized2 = {'ab_c': 'd e', 'fgh_ijk': 'lm', 'nop': 'qr_st'}
expected_output2 = {'AbC': 'd e', 'Nop': 'qr_st', 'FghIjk': 'lm'}
assert camelize(non_camalized2, '_') == expected_output2
# Note this test will fail when run locally (in pycharm/vscode) as it assumes the machine (docker image) has UTC timezone set
def test_date_to_timestamp():
assert date_to_timestamp('2018-11-06T08:56:41') == 1541494601000
assert date_to_timestamp(datetime.strptime('2018-11-06T08:56:41', "%Y-%m-%dT%H:%M:%S")) == 1541494601000
def test_pascalToSpace():
use_cases = [
('Validate', 'Validate'),
('validate', 'Validate'),
('TCP', 'TCP'),
('eventType', 'Event Type'),
('eventID', 'Event ID'),
('eventId', 'Event Id'),
('IPAddress', 'IP Address'),
]
for s, expected in use_cases:
assert pascalToSpace(s) == expected, 'Error on {} != {}'.format(pascalToSpace(s), expected)
def test_safe_load_json():
valid_json_str = '{"foo": "bar"}'
expected_valid_json_result = {u'foo': u'bar'}
assert expected_valid_json_result == safe_load_json(valid_json_str)
def test_remove_empty_elements():
test_dict = {
"foo": "bar",
"baz": {},
"empty": [],
"nested_dict": {
"empty_list": [],
"hummus": "pita"
},
"nested_list": {
"more_empty_list": []
}
}
expected_result = {
"foo": "bar",
"nested_dict": {
"hummus": "pita"
}
}
assert expected_result == remove_empty_elements(test_dict)
def test_aws_table_to_markdown():
header = "AWS DynamoDB DescribeBackup"
raw_input = {
'BackupDescription': {
"Foo": "Bar",
"Baz": "Bang",
"TestKey": "TestValue"
}
}
expected_output = '''### AWS DynamoDB DescribeBackup
|Baz|Foo|TestKey|
|---|---|---|
| Bang | Bar | TestValue |
'''
assert expected_output == aws_table_to_markdown(raw_input, header)
def test_argToList():
expected = ['a', 'b', 'c']
test1 = ['a', 'b', 'c']
test2 = 'a,b,c'
test3 = '["a","b","c"]'
test4 = 'a;b;c'
results = [argToList(test1), argToList(test2), argToList(test2, ','), argToList(test3), argToList(test4, ';')]
for result in results:
assert expected == result, 'argToList test failed, {} is not equal to {}'.format(str(result), str(expected))
def test_remove_nulls():
temp_dictionary = {"a": "b", "c": 4, "e": [], "f": {}, "g": None, "h": "", "i": [1], "k": ()}
expected_dictionary = {"a": "b", "c": 4, "i": [1]}
remove_nulls_from_dictionary(temp_dictionary)
assert expected_dictionary == temp_dictionary, \
"remove_nulls_from_dictionary test failed, {} is not equal to {}".format(str(temp_dictionary),
str(expected_dictionary))
def test_is_error_true():
execute_command_results = [
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": "this is error message"
}
]
assert is_error(execute_command_results)
def test_is_error_none():
assert not is_error(None)
def test_is_error_single_entry():
execute_command_results = {
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": "this is error message"
}
assert is_error(execute_command_results)
def test_is_error_false():
execute_command_results = [
{
"Type": entryTypes["note"],
"ContentsFormat": formats["text"],
"Contents": "this is regular note"
}
]
assert not is_error(execute_command_results)
def test_not_error_entry():
execute_command_results = "invalid command results as string"
assert not is_error(execute_command_results)
def test_get_error():
execute_command_results = [
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": "this is error message"
}
]
error = get_error(execute_command_results)
assert error == "this is error message"
def test_get_error_single_entry():
execute_command_results = {
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": "this is error message"
}
error = get_error(execute_command_results)
assert error == "this is error message"
def test_get_error_need_raise_error_on_non_error_input():
execute_command_results = [
{
"Type": entryTypes["note"],
"ContentsFormat": formats["text"],
"Contents": "this is not an error"
}
]
try:
get_error(execute_command_results)
except ValueError as exception:
assert "execute_command_result has no error entry. before using get_error use is_error" in str(exception)
return
assert False
@mark.parametrize('data,data_expected', [
("this is a test", b"this is a test"),
(u"עברית", u"עברית".encode('utf-8')),
(b"binary data\x15\x00", b"binary data\x15\x00"),
]) # noqa: E124
def test_fileResult(mocker, request, data, data_expected):
mocker.patch.object(demisto, 'uniqueFile', return_value="test_file_result")
mocker.patch.object(demisto, 'investigation', return_value={'id': '1'})
file_name = "1_test_file_result"
def cleanup():
try:
os.remove(file_name)
except OSError:
pass
request.addfinalizer(cleanup)
res = fileResult("test.txt", data)
assert res['File'] == "test.txt"
with open(file_name, 'rb') as f:
assert f.read() == data_expected
# Error that always returns a unicode string to it's str representation
class SpecialErr(Exception):
def __str__(self):
return u"מיוחד"
def test_logger():
from CommonServerPython import LOG
LOG(u'€')
LOG(Exception(u'€'))
LOG(SpecialErr(12))
def test_logger_write(mocker):
mocker.patch.object(demisto, 'params', return_value={
'credentials': {'password': 'my_password'},
})
mocker.patch.object(demisto, 'info')
ilog = IntegrationLogger()
ilog.write("This is a test with my_password")
ilog.print_log()
# assert that the print doesn't contain my_password
# call_args is tuple (args list, kwargs). we only need the args
args = demisto.info.call_args[0]
assert 'This is a test' in args[0]
assert 'my_password' not in args[0]
assert '<XX_REPLACED>' in args[0]
def test_logger_init_key_name(mocker):
mocker.patch.object(demisto, 'params', return_value={
'key': {'password': 'my_password'},
'secret': 'my_secret'
})
mocker.patch.object(demisto, 'info')
ilog = IntegrationLogger()
ilog.write("This is a test with my_password and my_secret")
ilog.print_log()
# assert that the print doesn't contain my_password
# call_args is tuple (args list, kwargs). we only need the args
args = demisto.info.call_args[0]
assert 'This is a test' in args[0]
assert 'my_password' not in args[0]
assert 'my_secret' not in args[0]
assert '<XX_REPLACED>' in args[0]
def test_logger_replace_strs(mocker):
mocker.patch.object(demisto, 'params', return_value={
'apikey': 'my_apikey',
})
ilog = IntegrationLogger()
ilog.add_replace_strs('special_str', '') # also check that empty string is not added by mistake
ilog('my_apikey is special_str and b64: ' + b64_encode('my_apikey'))
assert ('' not in ilog.replace_strs)
assert ilog.messages[0] == '<XX_REPLACED> is <XX_REPLACED> and b64: <XX_REPLACED>'
SENSITIVE_PARAM = {
'app': None,
'authentication': {
'credential': '',
'credentials': {
'id': '',
'locked': False,
'modified': '0001-01-01T00: 00: 00Z',
'name': '',
'password': 'cred_pass',
'sortValues': None,
'sshkey': 'ssh_key_secret',
'sshkeyPass': 'ssh_key_secret_pass',
'user': '',
'vaultInstanceId': '',
'version': 0,
'workgroup': ''
},
'identifier': 'admin',
'password': 'ident_pass',
'passwordChanged': False
},
}
def test_logger_replace_strs_credentials(mocker):
mocker.patch.object(demisto, 'params', return_value=SENSITIVE_PARAM)
ilog = IntegrationLogger()
# log some secrets
ilog('my cred pass: cred_pass. my ssh key: ssh_key_secret. my ssh pass: ssh_key_secret_pass. ident: ident_pass:')
for s in ('cred_pass', 'ssh_key_secret', 'ssh_key_secret_pass', 'ident_pass'):
assert s not in ilog.messages[0]
def test_debug_logger_replace_strs(mocker):
mocker.patch.object(demisto, 'params', return_value=SENSITIVE_PARAM)
debug_logger = DebugLogger()
debug_logger.int_logger.set_buffering(True)
debug_logger.log_start_debug()
msg = debug_logger.int_logger.messages[0]
assert 'debug-mode started' in msg
assert 'Params:' in msg
for s in ('cred_pass', 'ssh_key_secret', 'ssh_key_secret_pass', 'ident_pass'):
assert s not in msg
def test_is_mac_address():
from CommonServerPython import is_mac_address
mac_address_false = 'AA:BB:CC:00:11'
mac_address_true = 'AA:BB:CC:00:11:22'
assert (is_mac_address(mac_address_false) is False)
assert (is_mac_address(mac_address_true))
def test_return_error_command(mocker):
from CommonServerPython import return_error
err_msg = "Testing unicode Ё"
outputs = {'output': 'error'}
expected_error = {
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': err_msg,
"EntryContext": outputs
}
# Test command that is not fetch-incidents
mocker.patch.object(demisto, 'command', return_value="test-command")
mocker.patch.object(sys, 'exit')
mocker.spy(demisto, 'results')
return_error(err_msg, '', outputs)
assert str(demisto.results.call_args) == "call({})".format(expected_error)
def test_return_error_fetch_incidents(mocker):
from CommonServerPython import return_error
err_msg = "Testing unicode Ё"
# Test fetch-incidents
mocker.patch.object(demisto, 'command', return_value="fetch-incidents")
returned_error = False
try:
return_error(err_msg)
except Exception as e:
returned_error = True
assert str(e) == err_msg
assert returned_error
def test_return_error_fetch_indicators(mocker):
from CommonServerPython import return_error
err_msg = "Testing unicode Ё"
# Test fetch-indicators
mocker.patch.object(demisto, 'command', return_value="fetch-indicators")
returned_error = False
try:
return_error(err_msg)
except Exception as e:
returned_error = True
assert str(e) == err_msg
assert returned_error
def test_return_error_long_running_execution(mocker):
from CommonServerPython import return_error
err_msg = "Testing unicode Ё"
# Test long-running-execution
mocker.patch.object(demisto, 'command', return_value="long-running-execution")
returned_error = False
try:
return_error(err_msg)
except Exception as e:
returned_error = True
assert str(e) == err_msg
assert returned_error
def test_return_error_script(mocker, monkeypatch):
from CommonServerPython import return_error
mocker.patch.object(sys, 'exit')
mocker.spy(demisto, 'results')
monkeypatch.delattr(demisto, 'command')
err_msg = "Testing unicode Ё"
outputs = {'output': 'error'}
expected_error = {
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': err_msg,
"EntryContext": outputs
}
assert not hasattr(demisto, 'command')
return_error(err_msg, '', outputs)
assert str(demisto.results.call_args) == "call({})".format(expected_error)
def test_exception_in_return_error(mocker):
from CommonServerPython import return_error, IntegrationLogger
expected = {'EntryContext': None, 'Type': 4, 'ContentsFormat': 'text', 'Contents': 'Message'}
mocker.patch.object(demisto, 'results')
mocker.patch.object(IntegrationLogger, '__call__')
with raises(SystemExit, match='0'):
return_error("Message", error=ValueError("Error!"))
results = demisto.results.call_args[0][0]
assert expected == results
# IntegrationLogger = LOG (2 times if exception supplied)
assert IntegrationLogger.__call__.call_count == 2
def test_get_demisto_version(mocker, clear_version_cache):
# verify expected server version and build returned in case Demisto class has attribute demistoVersion
mocker.patch.object(
demisto,
'demistoVersion',
return_value={
'version': '5.0.0',
'buildNumber': '50000'
}
)
assert get_demisto_version() == {
'version': '5.0.0',
'buildNumber': '50000'
}
# call again to check cache
assert get_demisto_version() == {
'version': '5.0.0',
'buildNumber': '50000'
}
# call count should be 1 as we cached
assert demisto.demistoVersion.call_count == 1
# test is_demisto_version_ge
assert is_demisto_version_ge('5.0.0')
assert is_demisto_version_ge('4.5.0')
assert not is_demisto_version_ge('5.5.0')
def test_is_demisto_version_ge_4_5(mocker):
get_version_patch = mocker.patch('CommonServerPython.get_demisto_version')
get_version_patch.side_effect = AttributeError('simulate missing demistoVersion')
assert not is_demisto_version_ge('5.0.0')
assert not is_demisto_version_ge('6.0.0')
with raises(AttributeError, match='simulate missing demistoVersion'):
is_demisto_version_ge('4.5.0')
def test_assign_params():
from CommonServerPython import assign_params
res = assign_params(a='1', b=True, c=None, d='')
assert res == {'a': '1', 'b': True}
class TestBuildDBotEntry(object):
def test_build_dbot_entry(self):
from CommonServerPython import build_dbot_entry
res = build_dbot_entry('user@example.com', 'Email', 'Vendor', 1)
assert res == {'DBotScore': {'Indicator': 'user@example.com', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 1}}
def test_build_dbot_entry_no_malicious(self):
from CommonServerPython import build_dbot_entry
res = build_dbot_entry('user@example.com', 'Email', 'Vendor', 3, build_malicious=False)
assert res == {'DBotScore': {'Indicator': 'user@example.com', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 3}}
def test_build_dbot_entry_malicious(self):
from CommonServerPython import build_dbot_entry, outputPaths
res = build_dbot_entry('user@example.com', 'Email', 'Vendor', 3, 'Malicious email')
assert res == {
"DBotScore": {
"Vendor": "Vendor",
"Indicator": "user@example.com",
"Score": 3,
"Type": "email"
},
outputPaths['email']: {
"Malicious": {
"Vendor": "Vendor",
"Description": "Malicious email"
},
"Address": "user@example.com"
}
}
def test_build_malicious_dbot_entry_file(self):
from CommonServerPython import build_malicious_dbot_entry, outputPaths
res = build_malicious_dbot_entry('md5hash', 'MD5', 'Vendor', 'Google DNS')
assert res == {
outputPaths['file']:
{"Malicious": {"Vendor": "Vendor", "Description": "Google DNS"}, "MD5": "md5hash"}}
def test_build_malicious_dbot_entry(self):
from CommonServerPython import build_malicious_dbot_entry, outputPaths
res = build_malicious_dbot_entry('8.8.8.8', 'ip', 'Vendor', 'Google DNS')
assert res == {outputPaths['ip']: {
'Address': '8.8.8.8', 'Malicious': {'Vendor': 'Vendor', 'Description': 'Google DNS'}}}
def test_build_malicious_dbot_entry_wrong_indicator_type(self):
from CommonServerPython import build_malicious_dbot_entry, DemistoException
with raises(DemistoException, match='Wrong indicator type'):
build_malicious_dbot_entry('8.8.8.8', 'notindicator', 'Vendor', 'Google DNS')
def test_illegal_dbot_score(self):
from CommonServerPython import build_dbot_entry, DemistoException
with raises(DemistoException, match='illegal DBot score'):
build_dbot_entry('1', 'ip', 'Vendor', 8)
def test_illegal_indicator_type(self):
from CommonServerPython import build_dbot_entry, DemistoException
with raises(DemistoException, match='illegal indicator type'):
build_dbot_entry('1', 'NOTHING', 'Vendor', 2)
def test_file_indicators(self):
from CommonServerPython import build_dbot_entry, outputPaths
res = build_dbot_entry('md5hash', 'md5', 'Vendor', 3)
assert res == {
"DBotScore": {
"Indicator": "md5hash",
"Type": "file",
"Vendor": "Vendor",
"Score": 3
},
outputPaths['file']: {
"MD5": "md5hash",
"Malicious": {
"Vendor": "Vendor",
"Description": None
}
}
}
class TestCommandResults:
def test_readable_only_context(self):
"""
Given:
- Markdown entry to CommandResults
When:
- Returning results
Then:
- Validate HumanReadable exists
"""
from CommonServerPython import CommandResults
markdown = '## Something'
context = CommandResults(readable_output=markdown).to_context()
assert context.get('HumanReadable') == markdown
def test_empty_outputs(self):
"""
Given:
- Empty outputs
When:
- Returning results
Then:
- Validate EntryContext key value
"""
from CommonServerPython import CommandResults
res = CommandResults(
outputs_prefix='FoundIndicators',
outputs_key_field='value',
outputs=[]
)
context = res.to_context()
assert {'FoundIndicators(val.value == obj.value)': []} == context.get('EntryContext')
def test_return_command_results(self):
from CommonServerPython import Common, CommandResults, EntryFormat, EntryType, DBotScoreType
dbot_score = Common.DBotScore(
indicator='8.8.8.8',
integration_name='Virus Total',
indicator_type=DBotScoreType.IP,
score=Common.DBotScore.GOOD
)
ip = Common.IP(
ip='8.8.8.8',
dbot_score=dbot_score,
asn='some asn',
hostname='test.com',
geo_country=None,
geo_description=None,
geo_latitude=None,
geo_longitude=None,
positive_engines=None,
detection_engines=None
)
results = CommandResults(
outputs_key_field=None,
outputs_prefix=None,
outputs=None,
indicators=[ip]
)
assert results.to_context() == {
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': None,
'HumanReadable': None,
'EntryContext': {
'IP(val.Address && val.Address == obj.Address)': [
{
'Address': '8.8.8.8',
'ASN': 'some asn',
'Hostname': 'test.com'
}
],
'DBotScore(val.Indicator && val.Indicator == obj.Indicator && '
'val.Vendor == obj.Vendor && val.Type == obj.Type)': [
{
'Indicator': '8.8.8.8',
'Vendor': 'Virus Total',
'Score': 1,
'Type': 'ip'
}
]
}
}
def test_multiple_indicators(self):
from CommonServerPython import Common, CommandResults, EntryFormat, EntryType, DBotScoreType
dbot_score1 = Common.DBotScore(
indicator='8.8.8.8',
integration_name='Virus Total',
indicator_type=DBotScoreType.IP,
score=Common.DBotScore.GOOD
)
ip1 = Common.IP(
ip='8.8.8.8',
dbot_score=dbot_score1,
asn='some asn',
hostname='test.com',
geo_country=None,
geo_description=None,
geo_latitude=None,
geo_longitude=None,
positive_engines=None,
detection_engines=None
)
dbot_score2 = Common.DBotScore(
indicator='5.5.5.5',
integration_name='Virus Total',
indicator_type=DBotScoreType.IP,
score=Common.DBotScore.GOOD
)
ip2 = Common.IP(
ip='5.5.5.5',
dbot_score=dbot_score2,
asn='some asn',
hostname='test.com',
geo_country=None,
geo_description=None,
geo_latitude=None,
geo_longitude=None,
positive_engines=None,
detection_engines=None
)
results = CommandResults(
outputs_key_field=None,
outputs_prefix=None,
outputs=None,
indicators=[ip1, ip2]
)
assert results.to_context() == {
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': None,
'HumanReadable': None,
'EntryContext': {
'IP(val.Address && val.Address == obj.Address)': [
{
'Address': '8.8.8.8',
'ASN': 'some asn',
'Hostname': 'test.com'
},
{
'Address': '5.5.5.5',
'ASN': 'some asn',
'Hostname': 'test.com'
}
],
'DBotScore(val.Indicator && val.Indicator == obj.Indicator && '
'val.Vendor == obj.Vendor && val.Type == obj.Type)': [
{
'Indicator': '8.8.8.8',
'Vendor': 'Virus Total',
'Score': 1,
'Type': 'ip'
},
{
'Indicator': '5.5.5.5',
'Vendor': 'Virus Total',
'Score': 1,
'Type': 'ip'
}
]
}
}
def test_return_list_of_items(self):
from CommonServerPython import CommandResults, EntryFormat, EntryType
tickets = [
{
'ticket_id': 1,
'title': 'foo'
},
{
'ticket_id': 2,
'title': 'goo'
}
]
results = CommandResults(
outputs_prefix='Jira.Ticket',
outputs_key_field='ticket_id',
outputs=tickets
)
assert results.to_context() == {
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': tickets,
'HumanReadable': tableToMarkdown('Results', tickets),
'EntryContext': {
'Jira.Ticket(val.ticket_id == obj.ticket_id)': tickets
}
}
def test_return_list_of_items_the_old_way(self):
from CommonServerPython import CommandResults, EntryFormat, EntryType
tickets = [
{
'ticket_id': 1,
'title': 'foo'
},
{
'ticket_id': 2,
'title': 'goo'
}
]
results = CommandResults(
outputs_prefix=None,
outputs_key_field=None,
outputs={
'Jira.Ticket(val.ticket_id == obj.ticket_id)': tickets
},
raw_response=tickets
)
assert sorted(results.to_context()) == sorted({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': tickets,
'HumanReadable': None,
'EntryContext': {
'Jira.Ticket(val.ticket_id == obj.ticket_id)': tickets
}
})
def test_create_dbot_score_with_invalid_score(self):
from CommonServerPython import Common, DBotScoreType
try:
Common.DBotScore(
indicator='8.8.8.8',
integration_name='Virus Total',
score=100,
indicator_type=DBotScoreType.IP
)
assert False
except TypeError:
assert True
def test_create_domain(self):
from CommonServerPython import CommandResults, Common, EntryType, EntryFormat, DBotScoreType
dbot_score = Common.DBotScore(
indicator='somedomain.com',
integration_name='Virus Total',
indicator_type=DBotScoreType.DOMAIN,
score=Common.DBotScore.GOOD
)
domain = Common.Domain(
domain='somedomain.com',
dbot_score=dbot_score,
dns='dns.somedomain',
detection_engines=10,
positive_detections=5,
organization='Some Organization',
admin_phone='18000000',
admin_email='admin@test.com',
registrant_name='Mr Registrant',
registrar_name='Mr Registrar',
registrar_abuse_email='registrar@test.com',
creation_date='2019-01-01T00:00:00',
updated_date='2019-01-02T00:00:00',
expiration_date=None,
domain_status='ACTIVE',
name_servers=[
'PNS31.CLOUDNS.NET',
'PNS32.CLOUDNS.NET'
],
sub_domains=[
'sub-domain1.somedomain.com',
'sub-domain2.somedomain.com',
'sub-domain3.somedomain.com'
]
)
results = CommandResults(
outputs_key_field=None,
outputs_prefix=None,
outputs=None,
indicators=[domain]
)
assert results.to_context() == {
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': None,
'HumanReadable': None,
'EntryContext': {
'Domain(val.Name && val.Name == obj.Name)': [
{
"Name": "somedomain.com",
"DNS": "dns.somedomain",
"DetectionEngines": 10,
"PositiveDetections": 5,
"Registrar": {
"Name": "Mr Registrar",
"AbuseEmail": "registrar@test.com",
"AbusePhone": None
},
"Registrant": {
"Name": "Mr Registrant",
"Email": None,
"Phone": None,
"Country": None
},
"Admin": {
"Name": None,
"Email": "admin@test.com",
"Phone": "18000000",
"Country": None
},
"Organization": "Some Organization",
"Subdomains": [
"sub-domain1.somedomain.com",
"sub-domain2.somedomain.com",
"sub-domain3.somedomain.com"
],
"DomainStatus": "ACTIVE",
"CreationDate": "2019-01-01T00:00:00",
"UpdatedDate": "2019-01-02T00:00:00",
"NameServers": [
"PNS31.CLOUDNS.NET",
"PNS32.CLOUDNS.NET"
],
"WHOIS": {
"Registrar": {
"Name": "Mr Registrar",
"AbuseEmail": "registrar@test.com",
"AbusePhone": None
},
"Registrant": {
"Name": "Mr Registrant",
"Email": None,
"Phone": None,
"Country": None
},
"Admin": {
"Name": None,
"Email": "admin@test.com",
"Phone": "18000000",
"Country": None
},
"DomainStatus": "ACTIVE",
"CreationDate": "2019-01-01T00:00:00",
"UpdatedDate": "2019-01-02T00:00:00",
"NameServers": [
"PNS31.CLOUDNS.NET",
"PNS32.CLOUDNS.NET"
]
}
}
],
'DBotScore(val.Indicator && val.Indicator == obj.Indicator && '
'val.Vendor == obj.Vendor && val.Type == obj.Type)': [
{
'Indicator': 'somedomain.com',
'Vendor': 'Virus Total',
'Score': 1,
'Type': 'domain'
}
]
}
}
class TestBaseClient:
from CommonServerPython import BaseClient
text = {"status": "ok"}
client = BaseClient('http://example.com/api/v2/', ok_codes=(200, 201))
RETRIES_POSITIVE_TEST = [
'get',
'put',
'post'
]
@pytest.mark.skip(reason="Test - too long, only manual")
@pytest.mark.parametrize('method', RETRIES_POSITIVE_TEST)
def test_http_requests_with_retry_sanity(self, method):
"""
Given
- A base client
When
- Making http request call with retries configured to a number higher then 0
Then
- Ensure a successful request return response as expected
"""
url = 'http://httpbin.org/{}'.format(method)
res = self.client._http_request(method,
'',
full_url=url,
retries=1,
status_list_to_retry=[401])
assert res['url'] == url
RETRIES_NEGATIVE_TESTS_INPUT = [
('get', 400), ('get', 401), ('get', 500),
('put', 400), ('put', 401), ('put', 500),
('post', 400), ('post', 401), ('post', 500),
]
@pytest.mark.skip(reason="Test - too long, only manual")
@pytest.mark.parametrize('method, status', RETRIES_NEGATIVE_TESTS_INPUT)
def test_http_requests_with_retry_negative_sanity(self, method, status):
"""
Given
- A base client
When
- Making http request call with retries configured to a number higher then 0
Then
- An unsuccessful request returns a DemistoException regardless the bad status code.
"""
from CommonServerPython import DemistoException
with raises(DemistoException, match='{}'.format(status)):
self.client._http_request(method,
'',
full_url='http://httpbin.org/status/{}'.format(status),
retries=3,
status_list_to_retry=[400, 401, 500])
def test_http_request_json(self, requests_mock):
requests_mock.get('http://example.com/api/v2/event', text=json.dumps(self.text))
res = self.client._http_request('get', 'event')
assert res == self.text
def test_http_request_json_negative(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', text='notjson')
with raises(DemistoException, match="Failed to parse json"):
self.client._http_request('get', 'event')
def test_http_request_text(self, requests_mock):
requests_mock.get('http://example.com/api/v2/event', text=json.dumps(self.text))
res = self.client._http_request('get', 'event', resp_type='text')
assert res == json.dumps(self.text)
def test_http_request_content(self, requests_mock):
requests_mock.get('http://example.com/api/v2/event', content=str.encode(json.dumps(self.text)))
res = self.client._http_request('get', 'event', resp_type='content')
assert json.loads(res) == self.text
def test_http_request_response(self, requests_mock):
requests_mock.get('http://example.com/api/v2/event')
res = self.client._http_request('get', 'event', resp_type='response')
assert isinstance(res, requests.Response)
def test_http_request_not_ok(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', status_code=500)
with raises(DemistoException, match="[500]"):
self.client._http_request('get', 'event')
def test_http_request_not_ok_but_ok(self, requests_mock):
requests_mock.get('http://example.com/api/v2/event', status_code=500)
res = self.client._http_request('get', 'event', resp_type='response', ok_codes=(500,))
assert res.status_code == 500
def test_http_request_not_ok_with_json(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', status_code=500, content=str.encode(json.dumps(self.text)))
with raises(DemistoException, match="Error in API call"):
self.client._http_request('get', 'event')
def test_http_request_not_ok_with_json_parsing(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', status_code=500, content=str.encode(json.dumps(self.text)))
with raises(DemistoException) as exception:
self.client._http_request('get', 'event')
message = str(exception.value)
response_json_error = json.loads(message.split('\n')[1])
assert response_json_error == self.text
def test_http_request_timeout(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', exc=requests.exceptions.ConnectTimeout)
with raises(DemistoException, match="Connection Timeout Error"):
self.client._http_request('get', 'event')
def test_http_request_ssl_error(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', exc=requests.exceptions.SSLError)
with raises(DemistoException, match="SSL Certificate Verification Failed"):
self.client._http_request('get', 'event', resp_type='response')
def test_http_request_proxy_error(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', exc=requests.exceptions.ProxyError)
with raises(DemistoException, match="Proxy Error"):
self.client._http_request('get', 'event', resp_type='response')
def test_http_request_connection_error(self, requests_mock):
from CommonServerPython import DemistoException
requests_mock.get('http://example.com/api/v2/event', exc=requests.exceptions.ConnectionError)
with raises(DemistoException, match="Verify that the server URL parameter"):
self.client._http_request('get', 'event', resp_type='response')
def test_text_exception_parsing(self, requests_mock):
from CommonServerPython import DemistoException
reason = 'Bad Request'
text = 'additional text'
requests_mock.get('http://example.com/api/v2/event',
status_code=400,
reason=reason,
text=text)
with raises(DemistoException, match='- {}\n{}'.format(reason, text)):
self.client._http_request('get', 'event', resp_type='text')
def test_json_exception_parsing(self, requests_mock):
from CommonServerPython import DemistoException
reason = 'Bad Request'
json_response = {'error': 'additional text'}
requests_mock.get('http://example.com/api/v2/event',
status_code=400,
reason=reason,
json=json_response)
with raises(DemistoException, match='- {}\n.*{}'.format(reason, json_response["error"])):
self.client._http_request('get', 'event', resp_type='text')
def test_is_valid_ok_codes_empty(self):
from requests import Response
from CommonServerPython import BaseClient
new_client = BaseClient('http://example.com/api/v2/')
response = Response()
response.status_code = 200
assert new_client._is_status_code_valid(response, None)
def test_is_valid_ok_codes_from_function(self):
from requests import Response
response = Response()
response.status_code = 200
assert self.client._is_status_code_valid(response, (200, 201))
def test_is_valid_ok_codes_from_self(self):
from requests import Response
response = Response()
response.status_code = 200
assert self.client._is_status_code_valid(response, None)
def test_is_valid_ok_codes_empty_false(self):
from requests import Response
response = Response()
response.status_code = 400
assert not self.client._is_status_code_valid(response, None)
def test_is_valid_ok_codes_from_function_false(self):
from requests import Response
response = Response()
response.status_code = 400
assert not self.client._is_status_code_valid(response, (200, 201))
def test_is_valid_ok_codes_from_self_false(self):
from requests import Response
response = Response()
response.status_code = 400
assert not self.client._is_status_code_valid(response)
def test_parse_date_string():
# test unconverted data remains: Z
assert parse_date_string('2019-09-17T06:16:39Z') == datetime(2019, 9, 17, 6, 16, 39)
# test unconverted data remains: .22Z
assert parse_date_string('2019-09-17T06:16:39.22Z') == datetime(2019, 9, 17, 6, 16, 39, 220000)
# test time data without ms does not match format with ms
assert parse_date_string('2019-09-17T06:16:39Z', '%Y-%m-%dT%H:%M:%S.%f') == datetime(2019, 9, 17, 6, 16, 39)
# test time data with timezone Z does not match format with timezone +05:00
assert parse_date_string('2019-09-17T06:16:39Z', '%Y-%m-%dT%H:%M:%S+05:00') == datetime(2019, 9, 17, 6, 16, 39)
# test time data with timezone +05:00 does not match format with timezone Z
assert parse_date_string('2019-09-17T06:16:39+05:00', '%Y-%m-%dT%H:%M:%SZ') == datetime(2019, 9, 17, 6, 16, 39)
# test time data with timezone -05:00 and with ms does not match format with timezone +02:00 without ms
assert parse_date_string(
'2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00'
) == datetime(2019, 9, 17, 6, 16, 39, 404000)
def test_override_print(mocker):
mocker.patch.object(demisto, 'info')
int_logger = IntegrationLogger()
int_logger.set_buffering(False)
int_logger.print_override("test", "this")
assert demisto.info.call_count == 1
assert demisto.info.call_args[0][0] == "test this"
demisto.info.reset_mock()
int_logger.print_override("test", "this", file=sys.stderr)
assert demisto.info.call_count == 1
assert demisto.info.call_args[0][0] == "test this"
buf = StringIO()
# test writing to custom file (not stdout/stderr)
int_logger.print_override("test", "this", file=buf)
assert buf.getvalue() == 'test this\n'
def test_http_client_debug(mocker):
if not IS_PY3:
pytest.skip("test not supported in py2")
return
mocker.patch.object(demisto, 'info')
debug_log = DebugLogger()
from http.client import HTTPConnection
HTTPConnection.debuglevel = 1
con = HTTPConnection("google.com")
con.request('GET', '/')
r = con.getresponse()
r.read()
assert demisto.info.call_count > 5
assert debug_log is not None
def test_parse_date_range():
utc_now = datetime.utcnow()
utc_start_time, utc_end_time = parse_date_range('2 days', utc=True)
# testing UTC date time and range of 2 days
assert utc_now.replace(microsecond=0) == utc_end_time.replace(microsecond=0)
assert abs(utc_start_time - utc_end_time).days == 2
local_now = datetime.now()
local_start_time, local_end_time = parse_date_range('73 minutes', utc=False)
# testing local datetime and range of 73 minutes
assert local_now.replace(microsecond=0) == local_end_time.replace(microsecond=0)
assert abs(local_start_time - local_end_time).seconds / 60 == 73
def test_encode_string_results():
s = "test"
assert s == encode_string_results(s)
s2 = u"בדיקה"
if IS_PY3:
res = str(s2)
else:
res = s2.encode("utf8")
assert encode_string_results(s2) == res
not_string = [1, 2, 3]
assert not_string == encode_string_results(not_string)
class TestReturnOutputs:
def test_return_outputs(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
outputs = {'Event': 1}
raw_response = {'event': 1}
return_outputs(md, outputs, raw_response)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert raw_response == results['Contents']
assert outputs == results['EntryContext']
assert md == results['HumanReadable']
def test_return_outputs_only_md(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
return_outputs(md)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert md == results['HumanReadable']
assert 'text' == results['ContentsFormat']
def test_return_outputs_raw_none(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
outputs = {'Event': 1}
return_outputs(md, outputs, None)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert outputs == results['Contents']
assert outputs == results['EntryContext']
assert md == results['HumanReadable']
def test_return_outputs_timeline(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
outputs = {'Event': 1}
raw_response = {'event': 1}
timeline = [{'Value': 'blah', 'Message': 'test', 'Category': 'test'}]
return_outputs(md, outputs, raw_response, timeline)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert raw_response == results['Contents']
assert outputs == results['EntryContext']
assert md == results['HumanReadable']
assert timeline == results['IndicatorTimeline']
def test_return_outputs_timeline_without_category(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
outputs = {'Event': 1}
raw_response = {'event': 1}
timeline = [{'Value': 'blah', 'Message': 'test'}]
return_outputs(md, outputs, raw_response, timeline)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert raw_response == results['Contents']
assert outputs == results['EntryContext']
assert md == results['HumanReadable']
assert 'Category' in results['IndicatorTimeline'][0].keys()
assert results['IndicatorTimeline'][0]['Category'] == 'Integration Update'
def test_return_outputs_ignore_auto_extract(self, mocker):
mocker.patch.object(demisto, 'results')
md = 'md'
outputs = {'Event': 1}
raw_response = {'event': 1}
ignore_auto_extract = True
return_outputs(md, outputs, raw_response, ignore_auto_extract=ignore_auto_extract)
results = demisto.results.call_args[0][0]
assert len(demisto.results.call_args[0]) == 1
assert demisto.results.call_count == 1
assert raw_response == results['Contents']
assert outputs == results['EntryContext']
assert md == results['HumanReadable']
assert ignore_auto_extract == results['IgnoreAutoExtract']
def test_argToBoolean():
assert argToBoolean('true') is True
assert argToBoolean('yes') is True
assert argToBoolean('TrUe') is True
assert argToBoolean(True) is True
assert argToBoolean('false') is False
assert argToBoolean('no') is False
assert argToBoolean(False) is False
batch_params = [
# full batch case
([1, 2, 3], 1, [[1], [2], [3]]),
# empty case
([], 1, []),
# out of index case
([1, 2, 3], 5, [[1, 2, 3]]),
# out of index in end with batches
([1, 2, 3, 4, 5], 2, [[1, 2], [3, 4], [5]]),
([1] * 100, 2, [[1, 1]] * 50)
]
@pytest.mark.parametrize('iterable, sz, expected', batch_params)
def test_batch(iterable, sz, expected):
for i, item in enumerate(batch(iterable, sz)):
assert expected[i] == item
regexes_test = [
(ipv4Regex, '192.168.1.1', True),
(ipv4Regex, '192.168.1.1/24', False),
(ipv4Regex, '192.168.a.1', False),
(ipv4Regex, '192.168..1.1', False),
(ipv4Regex, '192.256.1.1', False),
(ipv4Regex, '192.256.1.1.1', False),
(ipv4cidrRegex, '192.168.1.1/32', True),
(ipv4cidrRegex, '192.168.1.1.1/30', False),
(ipv4cidrRegex, '192.168.1.b/30', False),
(ipv4cidrRegex, '192.168.1.12/381', False),
(ipv6Regex, '2001:db8:a0b:12f0::1', True),
(ipv6Regex, '2001:db8:a0b:12f0::1/11', False),
(ipv6Regex, '2001:db8:a0b:12f0::1::1', False),
(ipv6Regex, '2001:db8:a0b:12f0::98aa5', False),
(ipv6cidrRegex, '2001:db8:a0b:12f0::1/64', True),
(ipv6cidrRegex, '2001:db8:a0b:12f0::1/256', False),
(ipv6cidrRegex, '2001:db8:a0b:12f0::1::1/25', False),
(ipv6cidrRegex, '2001:db8:a0b:12f0::1aaasds::1/1', False)
]
@pytest.mark.parametrize('pattern, string, expected', regexes_test)
def test_regexes(pattern, string, expected):
# (str, str, bool) -> None
# emulates re.fullmatch from py3.4
assert expected is bool(re.match("(?:" + pattern + r")\Z", string))
IP_TO_INDICATOR_TYPE_PACK = [
('192.168.1.1', FeedIndicatorType.IP),
('192.168.1.1/32', FeedIndicatorType.CIDR),
('2001:db8:a0b:12f0::1', FeedIndicatorType.IPv6),
('2001:db8:a0b:12f0::1/64', FeedIndicatorType.IPv6CIDR),
]
@pytest.mark.parametrize('ip, indicator_type', IP_TO_INDICATOR_TYPE_PACK)
def test_ip_to_indicator(ip, indicator_type):
assert FeedIndicatorType.ip_to_indicator_type(ip) is indicator_type
data_test_b64_encode = [
(u'test', 'dGVzdA=='),
('test', 'dGVzdA=='),
(b'test', 'dGVzdA=='),
('', ''),
('%', 'JQ=='),
(u'§', 'wqc='),
(u'§t`e§s`t§', 'wqd0YGXCp3NgdMKn'),
]
@pytest.mark.parametrize('_input, expected_output', data_test_b64_encode)
def test_b64_encode(_input, expected_output):
output = b64_encode(_input)
assert output == expected_output, 'b64_encode({}) returns: {} instead: {}'.format(_input, output, expected_output)
def test_traceback_in_return_error_debug_mode_on(mocker):
mocker.patch.object(demisto, 'command', return_value="test-command")
mocker.spy(demisto, 'results')
mocker.patch('CommonServerPython.is_debug_mode', return_value=True)
from CommonServerPython import return_error
try:
raise Exception("This is a test string")
except Exception:
with pytest.raises(SystemExit):
return_error("some text")
assert "This is a test string" in str(demisto.results.call_args)
assert "Traceback" in str(demisto.results.call_args)
assert "some text" in str(demisto.results.call_args)
def test_traceback_in_return_error_debug_mode_off(mocker):
mocker.patch.object(demisto, 'command', return_value="test-command")
mocker.spy(demisto, 'results')
mocker.patch('CommonServerPython.is_debug_mode', return_value=False)
from CommonServerPython import return_error
try:
raise Exception("This is a test string")
except Exception:
with pytest.raises(SystemExit):
return_error("some text")
assert "This is a test string" not in str(demisto.results.call_args)
assert "Traceback" not in str(demisto.results.call_args)
assert "some text" in str(demisto.results.call_args)
# append_context unit test
CONTEXT_MOCK = {
'str_key': 'str_value',
'dict_key': {
'key1': 'val1',
'key2': 'val2'
},
'int_key': 1,
'list_key_str': ['val1', 'val2'],
'list_key_list': ['val1', 'val2'],
'list_key_dict': ['val1', 'val2']
}
UPDATED_CONTEXT = {
'str_key': 'str_data,str_value',
'dict_key': {
'key1': 'val1',
'key2': 'val2',
'data_key': 'data_val'
},
'int_key': [1, 2],
'list_key_str': ['val1', 'val2', 'str_data'],
'list_key_list': ['val1', 'val2', 'val1', 'val2'],
'list_key_dict': ['val1', 'val2', {'data_key': 'data_val'}]
}
DATA_MOCK_STRING = "str_data"
DATA_MOCK_LIST = ['val1', 'val2']
DATA_MOCK_DICT = {
'data_key': 'data_val'
}
DATA_MOCK_INT = 2
STR_KEY = "str_key"
DICT_KEY = "dict_key"
APPEND_CONTEXT_INPUT = [
(CONTEXT_MOCK, DATA_MOCK_STRING, STR_KEY, "key = {}, val = {}".format(STR_KEY, UPDATED_CONTEXT[STR_KEY])),
(CONTEXT_MOCK, DATA_MOCK_LIST, STR_KEY, "TypeError"),
(CONTEXT_MOCK, DATA_MOCK_DICT, STR_KEY, "TypeError"),
(CONTEXT_MOCK, DATA_MOCK_STRING, DICT_KEY, "TypeError"),
(CONTEXT_MOCK, DATA_MOCK_LIST, DICT_KEY, "TypeError"),
(CONTEXT_MOCK, DATA_MOCK_DICT, DICT_KEY, "key = {}, val = {}".format(DICT_KEY, UPDATED_CONTEXT[DICT_KEY])),
(CONTEXT_MOCK, DATA_MOCK_STRING, 'list_key_str',
"key = {}, val = {}".format('list_key_str', UPDATED_CONTEXT['list_key_str'])),
(CONTEXT_MOCK, DATA_MOCK_LIST, 'list_key_list',
"key = {}, val = {}".format('list_key_list', UPDATED_CONTEXT['list_key_list'])),
(CONTEXT_MOCK, DATA_MOCK_DICT, 'list_key_dict',
"key = {}, val = {}".format('list_key_dict', UPDATED_CONTEXT['list_key_dict'])),
(CONTEXT_MOCK, DATA_MOCK_INT, 'int_key', "key = {}, val = {}".format('int_key', UPDATED_CONTEXT['int_key'])),
]
def get_set_context(key, val):
from CommonServerPython import return_error
return_error("key = {}, val = {}".format(key, val))
@pytest.mark.parametrize('context_mock, data_mock, key, expected_answer', APPEND_CONTEXT_INPUT)
def test_append_context(mocker, context_mock, data_mock, key, expected_answer):
from CommonServerPython import demisto
mocker.patch.object(demisto, 'get', return_value=context_mock.get(key))
mocker.patch.object(demisto, 'setContext', side_effect=get_set_context)
mocker.patch.object(demisto, 'results')
if "TypeError" not in expected_answer:
with raises(SystemExit, match='0'):
appendContext(key, data_mock)
assert expected_answer in demisto.results.call_args[0][0]['Contents']
else:
with raises(TypeError) as e:
appendContext(key, data_mock)
assert expected_answer in e.value
INDICATOR_VALUE_AND_TYPE = [
('3fec1b14cea32bbcd97fad4507b06888', "File"),
('1c8893f75089a27ca6a8d49801d7aa6b64ea0c6167fe8b1becfe9bc13f47bdc1', 'File'),
('castaneda-thornton.com', 'Domain'),
('192.0.0.1', 'IP'),
('test@gmail.com', 'Email'),
('e775eb1250137c0b83d4e7c4549c71d6f10cae4e708ebf0b5c4613cbd1e91087', 'File'),
('test@yahoo.com', 'Email'),
('http://test.com', 'URL'),
('11.111.11.11/11', 'CIDR'),
('CVE-0000-0000', 'CVE'),
('dbot@demisto.works', 'Email'),
('37b6d02m-63e0-495e-kk92-7c21511adc7a@SB2APC01FT091.outlook.com', 'Email'),
('dummy@recipient.com', 'Email'),
('image003.gif@01CF4D7F.1DF62650', 'Email'),
('bruce.wayne@pharmtech.zz', 'Email'),
('joe@gmail.com', 'Email'),
('koko@demisto.com', 'Email'),
('42a5e275559a1651b3df8e15d3f5912499f0f2d3d1523959c56fc5aea6371e59', 'File'),
('10676cf66244cfa91567fbc1a937f4cb19438338b35b69d4bcc2cf0d3a44af5e', 'File'),
('52483514f07eb14570142f6927b77deb7b4da99f', 'File'),
('c8092abd8d581750c0530fa1fc8d8318', 'File'),
('fe80:0000:0000:0000:91ba:7558:26d3:acde', 'IPv6'),
('fd60:e22:f1b9::2', 'IPv6'),
('2001:db8:0000:0000:0000:0000:0000:0000', 'IPv6'),
('112.126.94.107', 'IP'),
('a', None),
('*castaneda-thornton.com', 'DomainGlob')
]
@pytest.mark.parametrize('indicator_value, indicatory_type', INDICATOR_VALUE_AND_TYPE)
def test_auto_detect_indicator_type(indicator_value, indicatory_type):
"""
Given
- Indicator value
- Indicator type
When
- Trying to detect the type of an indicator.
Then
- Run the auto_detect_indicator_type and validate that the indicator type the function returns is as expected.
"""
if sys.version_info.major == 3 and sys.version_info.minor == 8:
assert auto_detect_indicator_type(indicator_value) == indicatory_type
else:
try:
auto_detect_indicator_type(indicator_value)
except Exception as e:
assert str(e) == "Missing tldextract module, In order to use the auto detect function please" \
" use a docker image with it installed such as: demisto/jmespath"
def test_handle_proxy(mocker):
os.environ['REQUESTS_CA_BUNDLE'] = '/test1.pem'
mocker.patch.object(demisto, 'params', return_value={'insecure': True})
handle_proxy()
assert os.getenv('REQUESTS_CA_BUNDLE') is None
os.environ['REQUESTS_CA_BUNDLE'] = '/test2.pem'
mocker.patch.object(demisto, 'params', return_value={})
handle_proxy()
assert os.environ['REQUESTS_CA_BUNDLE'] == '/test2.pem' # make sure no change
mocker.patch.object(demisto, 'params', return_value={'unsecure': True})
handle_proxy()
assert os.getenv('REQUESTS_CA_BUNDLE') is None
@pytest.mark.parametrize(argnames="dict_obj, keys, expected, default_return_value",
argvalues=[
({'a': '1'}, ['a'], '1', None),
({'a': {'b': '2'}}, ['a', 'b'], '2', None),
({'a': {'b': '2'}}, ['a', 'c'], 'test', 'test'),
])
def test_safe_get(dict_obj, keys, expected, default_return_value):
from CommonServerPython import dict_safe_get
assert expected == dict_safe_get(dict_object=dict_obj,
keys=keys,
default_return_value=default_return_value)
MIRRORS = '''
[{
"channel_id":"GKQ86DVPH",
"channel_name": "incident-681",
"channel_topic": "incident-681",
"investigation_id":"681",
"mirror_type":"all",
"mirror_direction":"both",
"mirror_to":"group",
"auto_close":true,
"mirrored":true
},
{
"channel_id":"GKB19PA3V",
"channel_name": "group2",
"channel_topic": "cooltopic",
"investigation_id":"684",
"mirror_type":"all",
"mirror_direction":"both",
"mirror_to":"group",
"auto_close":true,
"mirrored":true
},
{
"channel_id":"GKB19PA3V",
"channel_name": "group2",
"channel_topic": "cooltopic",
"investigation_id":"692",
"mirror_type":"all",
"mirror_direction":"both",
"mirror_to":"group",
"auto_close":true,
"mirrored":true
},
{
"channel_id":"GKNEJU4P9",
"channel_name": "group3",
"channel_topic": "incident-713",
"investigation_id":"713",
"mirror_type":"all",
"mirror_direction":"both",
"mirror_to":"group",
"auto_close":true,
"mirrored":true
},
{
"channel_id":"GL8GHC0LV",
"channel_name": "group5",
"channel_topic": "incident-734",
"investigation_id":"734",
"mirror_type":"all",
"mirror_direction":"both",
"mirror_to":"group",
"auto_close":true,
"mirrored":true
}]
'''
CONVERSATIONS = '''[{
"id": "C012AB3CD",
"name": "general",
"is_channel": true,
"is_group": false,
"is_im": false,
"created": 1449252889,
"creator": "U012A3CDE",
"is_archived": false,
"is_general": true,
"unlinked": 0,
"name_normalized": "general",
"is_shared": false,
"is_ext_shared": false,
"is_org_shared": false,
"pending_shared": [],
"is_pending_ext_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"topic": {
"value": "Company-wide announcements and work-based matters",
"creator": "",
"last_set": 0
},
"purpose": {
"value": "This channel is for team-wide communication and announcements. All team members are in this channel.",
"creator": "",
"last_set": 0
},
"previous_names": [],
"num_members": 4
},
{
"id": "C061EG9T2",
"name": "random",
"is_channel": true,
"is_group": false,
"is_im": false,
"created": 1449252889,
"creator": "U061F7AUR",
"is_archived": false,
"is_general": false,
"unlinked": 0,
"name_normalized": "random",
"is_shared": false,
"is_ext_shared": false,
"is_org_shared": false,
"pending_shared": [],
"is_pending_ext_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"topic": {
"value": "Non-work banter and water cooler conversation",
"creator": "",
"last_set": 0
},
"purpose": {
"value": "A place for non-work-related flimflam.",
"creator": "",
"last_set": 0
},
"previous_names": [],
"num_members": 4
}]'''
OBJECTS_TO_KEYS = {
'mirrors': 'investigation_id',
'questions': 'entitlement',
'users': 'id'
}
def set_integration_context_versioned(integration_context, version=-1, sync=False):
global INTEGRATION_CONTEXT_VERSIONED
try:
if not INTEGRATION_CONTEXT_VERSIONED:
INTEGRATION_CONTEXT_VERSIONED = {'context': '{}', 'version': 0}
except NameError:
INTEGRATION_CONTEXT_VERSIONED = {'context': '{}', 'version': 0}
current_version = INTEGRATION_CONTEXT_VERSIONED['version']
if version != -1 and version <= current_version:
raise ValueError('DB Insert version {} does not match version {}'.format(current_version, version))
INTEGRATION_CONTEXT_VERSIONED = {'context': integration_context, 'version': current_version + 1}
def get_integration_context_versioned(refresh=False):
return INTEGRATION_CONTEXT_VERSIONED
def test_merge_lists():
from CommonServerPython import merge_lists
# Set
original = [{'id': '1', 'updated': 'n'}, {'id': '2', 'updated': 'n'}, {'id': '11', 'updated': 'n'}]
updated = [{'id': '1', 'updated': 'y'}, {'id': '3', 'updated': 'y'}, {'id': '11', 'updated': 'n', 'remove': True}]
expected = [{'id': '1', 'updated': 'y'}, {'id': '2', 'updated': 'n'}, {'id': '3', 'updated': 'y'}]
# Arrange
result = merge_lists(original, updated, 'id')
# Assert
assert len(result) == len(expected)
for obj in result:
assert obj in expected
@pytest.mark.parametrize('version, expected', [({'version': '5.5.0'}, False), ({'version': '6.0.0'}, True)])
def test_is_versioned_context_available(mocker, version, expected):
from CommonServerPython import is_versioned_context_available
# Set
mocker.patch.object(demisto, 'demistoVersion', return_value=version)
# Arrange
result = is_versioned_context_available()
get_demisto_version._version = None
# Assert
assert expected == result
def test_update_context_merge(mocker):
import CommonServerPython
# Set
set_integration_context_versioned({
'mirrors': MIRRORS,
'conversations': CONVERSATIONS
})
mocker.patch.object(demisto, 'getIntegrationContextVersioned', return_value=get_integration_context_versioned())
mocker.patch.object(demisto, 'setIntegrationContextVersioned', side_effecet=set_integration_context_versioned)
mocker.patch.object(CommonServerPython, 'is_versioned_context_available', return_value=True)
new_mirror = {
'channel_id': 'new_group',
'channel_name': 'incident-999',
'channel_topic': 'incident-999',
'investigation_id': '999',
'mirror_type': 'all',
'mirror_direction': 'both',
'mirror_to': 'group',
'auto_close': True,
'mirrored': False
}
mirrors = json.loads(MIRRORS)
mirrors.extend([new_mirror])
# Arrange
context, version = CommonServerPython.update_integration_context({'mirrors': [new_mirror]}, OBJECTS_TO_KEYS, True)
new_mirrors = json.loads(context['mirrors'])
# Assert
assert len(mirrors) == len(new_mirrors)
for mirror in mirrors:
assert mirror in new_mirrors
assert version == get_integration_context_versioned()['version']
def test_update_context_no_merge(mocker):
import CommonServerPython
# Set
set_integration_context_versioned({
'mirrors': MIRRORS,
'conversations': CONVERSATIONS
})
mocker.patch.object(demisto, 'getIntegrationContextVersioned', return_value=get_integration_context_versioned())
mocker.patch.object(demisto, 'setIntegrationContextVersioned', side_effecet=set_integration_context_versioned)
mocker.patch.object(CommonServerPython, 'is_versioned_context_available', return_value=True)
new_conversation = {
'id': 'A0123456',
'name': 'general'
}
conversations = json.loads(CONVERSATIONS)
conversations.extend([new_conversation])
# Arrange
context, version = CommonServerPython.update_integration_context({'conversations': conversations}, OBJECTS_TO_KEYS, True)
new_conversations = json.loads(context['conversations'])
# Assert
assert conversations == new_conversations
assert version == get_integration_context_versioned()['version']
@pytest.mark.parametrize('versioned_available', [True, False])
def test_get_latest_integration_context(mocker, versioned_available):
import CommonServerPython
# Set
set_integration_context_versioned({
'mirrors': MIRRORS,
'conversations': CONVERSATIONS
})
mocker.patch.object(demisto, 'getIntegrationContextVersioned', return_value=get_integration_context_versioned())
mocker.patch.object(demisto, 'setIntegrationContextVersioned', side_effecet=set_integration_context_versioned)
mocker.patch.object(CommonServerPython, 'is_versioned_context_available', return_value=versioned_available)
mocker.patch.object(demisto, 'getIntegrationContext',
return_value={'mirrors': MIRRORS, 'conversations': CONVERSATIONS})
# Arrange
context, ver = CommonServerPython.get_integration_context_with_version(True)
# Assert
assert context == get_integration_context_versioned()['context']
assert ver == get_integration_context_versioned()['version'] if versioned_available else -1
def test_set_latest_integration_context(mocker):
import CommonServerPython
# Set
set_integration_context_versioned({
'mirrors': MIRRORS,
'conversations': CONVERSATIONS,
})
mocker.patch.object(demisto, 'getIntegrationContextVersioned', return_value=get_integration_context_versioned())
mocker.patch.object(demisto, 'setIntegrationContextVersioned', side_effecet=set_integration_context_versioned)
int_context = get_integration_context_versioned()
mocker.patch.object(CommonServerPython, 'update_integration_context',
side_effect=[(int_context['context'], int_context['version']),
(int_context['context'], int_context['version'] + 1)])
mocker.patch.object(CommonServerPython, 'set_integration_context', side_effect=[ValueError, int_context['context']])
# Arrange
CommonServerPython.set_to_integration_context_with_retries({}, OBJECTS_TO_KEYS)
int_context_calls = CommonServerPython.set_integration_context.call_count
int_context_args_1 = CommonServerPython.set_integration_context.call_args_list[0][0]
int_context_args_2 = CommonServerPython.set_integration_context.call_args_list[1][0]
# Assert
assert int_context_calls == 2
assert int_context_args_1 == (int_context['context'], True, int_context['version'])
assert int_context_args_2 == (int_context['context'], True, int_context['version'] + 1)
def test_set_latest_integration_context_fail(mocker):
import CommonServerPython
# Set
set_integration_context_versioned({
'mirrors': MIRRORS,
'conversations': CONVERSATIONS,
})
mocker.patch.object(demisto, 'getIntegrationContextVersioned', return_value=get_integration_context_versioned())
mocker.patch.object(demisto, 'setIntegrationContextVersioned', side_effecet=set_integration_context_versioned)
int_context = get_integration_context_versioned()
mocker.patch.object(CommonServerPython, 'update_integration_context', return_value=(int_context['context'],
int_context['version']))
mocker.patch.object(CommonServerPython, 'set_integration_context', side_effect=ValueError)
# Arrange
with pytest.raises(Exception):
CommonServerPython.set_to_integration_context_with_retries({}, OBJECTS_TO_KEYS)
int_context_calls = CommonServerPython.set_integration_context.call_count
# Assert
assert int_context_calls == CommonServerPython.CONTEXT_UPDATE_RETRY_TIMES
| 34.970938
| 125
| 0.612624
|
4ebe383a0d41b34b37627411d00a44d56a8457d1
| 3,073
|
py
|
Python
|
codes/minimal_shtns.py
|
Mr-Markovian/SphericalHarmonics_for_StokesFlow
|
f1577426bcc3805f3b3727b0cb607daa87576b03
|
[
"MIT"
] | null | null | null |
codes/minimal_shtns.py
|
Mr-Markovian/SphericalHarmonics_for_StokesFlow
|
f1577426bcc3805f3b3727b0cb607daa87576b03
|
[
"MIT"
] | null | null | null |
codes/minimal_shtns.py
|
Mr-Markovian/SphericalHarmonics_for_StokesFlow
|
f1577426bcc3805f3b3727b0cb607daa87576b03
|
[
"MIT"
] | null | null | null |
#We can compute Bulk flow and find the flow field inside at a radius r,given
#the velocity field at the Surface(radius R),l is a 1-D array
#l=0,1 mode not possible for Vlm_y and Vlm_psi,l=0 not possible for Vlm_phi.
def modify_(a,b,c,l):
x=np.where(l==1)
y=np.where(l==0)
for i in y:
a[i]=complex(0,0)
b[i]=complex(0,0)
c[i]=complex(0,0)
for i in x:
a[i]=complex(0,0)
b[i]=complex(0,0)
return a,b,c
#To compute the bulk flow from the analytical results
def bulkflow(r_,Alm_,Clm_,l,R):
#vslm, vtlm = sht.analys(vtheta_s, vphi_s)
#Computing the cofficients
#Alm = vtlm/(R**l)
#Clm = (l*(l+1)* vslm)/(2*(R**(l+1)))
r_lplus1 =r_**(l+1) #calculated r^(l+1)
r_lminus1 =r_**(l-1) #Calculated r^(l-1)
Vlm_phi = Alm_*(r_**l)
Vlm_psi = (Clm_*(-(R**2)*(r_lminus1)+(r_lplus1*(l+3)/(l+1)))/l)
Vlm_y = Clm_*(r_lplus1- (R**2)*r_lminus1)
#the modes not allowed are removed , by making those cofficients zero.
#Vlm_y_new,Vlm_psi_new,Vlm_phi_new=modify_(Vlm_y,Vlm_psi,Vlm_phi,l)
vr,vtheta,vphi = sht.synth(Vlm_y,Vlm_psi,Vlm_phi)
return vr,vtheta,vphi
import numpy as np
import shtns
import cmath
from mayavi.mlab import *
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.cm import get_cmap, viridis
values = np.linspace(0., 1., 256)
lut = 255*get_cmap(viridis)(values.copy())
from mayavi import mlab
r_=np.linspace(0.2,1.,5)
lmax = 16
mmax=lmax
sht = shtns.sht(lmax, mmax)
nlats, nlons = sht.set_grid()
theta_vals = np.arccos(sht.cos_theta)
phi_vals = (2.0*np.pi/nlons)*np.arange(nlons)
phi, theta = np.meshgrid(phi_vals, theta_vals)
l = sht.l
m = sht.m
l2 = l * (l + 1)
# given surface flow
vtheta_s =np.sin(theta)#np.zeros_like(theta)#np.sin(theta)*np.sin(phi)#
vphi_s =np.zeros_like(theta)#np.sin(theta)*np.cos(phi)##np.sin(theta)*np.cos(theta)
vslm, vtlm = sht.analys(vtheta_s, vphi_s)
R=1
Alm = vtlm/(R**l)
Clm = (l*(l+1)* vslm)/(2*(R**(l+1)))
#phi, theta = np.meshgrid(
# np.linspace(0, 2 * np.pi, nlons ),
# np.linspace(0, np.pi, nlats))
#Changing spherical coordinate field into Cartesian Velocity field
#now to plot the velocity field on the sphere
for i in range(len(r_)):
ur,utheta,uphi=bulkflow(r_[i],Alm,Clm,l,R)
x =r_[i]*np.sin(theta)*np.cos(phi)
y =r_[i]*np.sin(theta)*np.sin(phi)
z =r_[i]*np.cos(theta)
ux =ur*np.sin(theta)*np.cos(phi)+utheta*np.cos(theta)*np.cos(phi)-uphi*np.sin(phi)
uy =ur*np.sin(theta)*np.sin(phi)+utheta*np.cos(theta)*np.sin(phi)+uphi*np.cos(phi)
uz =ur*np.cos(theta)-utheta*np.sin(theta)
speed=np.sqrt(ux**2+uy**2+uz**2)
#mlab.quiver3d(x, y, z,ux,uy,uz,colormap='jet')
#mlab.vectorbar(title='Vector color mapping',orientation='vertical')
#mlab.show()
#def stress()
| 31.040404
| 128
| 0.59616
|
92cd681dcda45f7aa47d5fb42821e692cb28f03d
| 527
|
py
|
Python
|
helloExcel/temp.py
|
a18792721831/StudyPython
|
2e266f49534017cc1097382b93a9f25e62a4af19
|
[
"Apache-2.0"
] | 2
|
2019-05-07T00:30:10.000Z
|
2019-05-07T00:30:13.000Z
|
helloExcel/temp.py
|
a18792721831/StudyPython
|
2e266f49534017cc1097382b93a9f25e62a4af19
|
[
"Apache-2.0"
] | null | null | null |
helloExcel/temp.py
|
a18792721831/StudyPython
|
2e266f49534017cc1097382b93a9f25e62a4af19
|
[
"Apache-2.0"
] | 1
|
2021-01-26T11:29:17.000Z
|
2021-01-26T11:29:17.000Z
|
from xlrd import open_workbook
from createSql import getSql
def excel2Sql():
print('Please input file path:')
filePath = input()
excelFile = open_workbook(filePath)
sheet = excelFile.sheet_by_index(0)
print('Please input author:')
author = input()
with open(filePath.replace('.xlsx', '.sql'), 'a') as file:
for r in range(1, sheet.nrows):
row = sheet.row_values(r)
file.write(getSql(int(row[0]), row[1], row[2], author))
if __name__ == "__main__":
excel2Sql()
| 27.736842
| 67
| 0.633776
|
1c29623970cc411e5ac5fc1a68a6bbaf69dc0ffa
| 8,502
|
py
|
Python
|
SBaaS_MFA/stage02_isotopomer_fittedNetFluxDifferences_execute.py
|
dmccloskey/SBaaS_MFA
|
005e1d34c2ace7e28c53dffcab3e9cb8c7e7ce18
|
[
"MIT"
] | null | null | null |
SBaaS_MFA/stage02_isotopomer_fittedNetFluxDifferences_execute.py
|
dmccloskey/SBaaS_MFA
|
005e1d34c2ace7e28c53dffcab3e9cb8c7e7ce18
|
[
"MIT"
] | null | null | null |
SBaaS_MFA/stage02_isotopomer_fittedNetFluxDifferences_execute.py
|
dmccloskey/SBaaS_MFA
|
005e1d34c2ace7e28c53dffcab3e9cb8c7e7ce18
|
[
"MIT"
] | null | null | null |
#SBaaS
from .stage02_isotopomer_fittedNetFluxDifferences_io import stage02_isotopomer_fittedNetFluxDifferences_io
from .stage02_isotopomer_fittedNetFluxes_query import stage02_isotopomer_fittedNetFluxes_query
#Resources
from genomeScale_MFA.MFA_methods import MFA_methods
class stage02_isotopomer_fittedNetFluxDifferences_execute(stage02_isotopomer_fittedNetFluxDifferences_io,
stage02_isotopomer_fittedNetFluxes_query):
def execute_findNetFluxSignificantDifferences(self,analysis_id_I, criteria_I = 'flux_lb/flux_ub',
simulation_ids_I=[],simulation_dateAndTimes_I = [],
rxn_ids_I = [],flux_units_I = [],
control_simulation_id_I=None,
control_simulation_dateAndTime_I=None,
redundancy_I=False,
observable_only_I=False):
"""Find fluxes that are significantly different
Input:
analysis_id_I = string,
criteria_I = string, flux_lb/flux_ub: use flux_lb and flux_ub to determine significance (default)
flux_mean/flux_stdev: use the flux_mean and flux_stdev to determine significance
control_simulation_id_I = string, simulation_id to compare all other simulation_ids to
simulation_dateAndTime_I = string, simulation_dateAndTime to compare all other simulation_ids to
redundancy_I = boolean, if true, all values with be compared, if false (default), only unique comparisons will be made
observable_only_I = boolean, if true, only observable fluxes will be compared, if false (default), observable and unobservable fluxes will be compared
"""
mfamethods = MFA_methods();
data_O = [];
print('executing findNetFluxSignificantDifferences...')
# get the simulation_id and simulation_id dateAndTimes
if simulation_ids_I and simulation_dateAndTimes_I:
simulation_ids = simulation_ids_I;
simulation_dateAndTimes = simulation_dateAndTimes_I;
else:
simulation_ids = [];
simulation_ids_unique = [];
simulation_dateAndTimes = [];
# get the simulation unique ids
simulation_ids_unique = self.get_simulationID_analysisID_dataStage02IsotopomerAnalysis(analysis_id_I);
for simulation_id in simulation_ids_unique:
# get the simulation dateAndTimes
simulation_dateAndTimes_tmp = []
simulation_dateAndTimes_tmp = self.get_simulationDateAndTimes_simulationID_dataStage02IsotopomerfittedNetFluxes(simulation_id);
simulation_ids_tmp = [simulation_id for x in simulation_dateAndTimes_tmp];
simulation_dateAndTimes.extend(simulation_dateAndTimes_tmp)
simulation_ids.extend(simulation_ids_tmp)
if control_simulation_id_I and control_simulation_dateAndTime_I:
index = simulation_ids.index(control_simulation_id_I);
value = simulation_ids.pop(index);
simulation_ids.insert(0, value);
control_simulation_dateAndTime_I = self.convert_string2datetime(control_simulation_dateAndTime_I);
index = simulation_dateAndTimes.index(control_simulation_dateAndTime_I);
value = simulation_dateAndTimes.pop(index)
simulation_dateAndTimes.insert(0, value);
for simulation_cnt_1, simulation_id_1 in enumerate(simulation_ids):
print("calculating netFluxDifferences for simulation_id " + simulation_id_1);
# check for control
if control_simulation_id_I and control_simulation_dateAndTime_I and simulation_cnt_1>0:
break;
#prevents redundancy and
if simulation_cnt_1+1 >= len(simulation_ids):
break;
# get the units
if flux_units_I:
flux_units = flux_units_I;
else:
flux_units = self.get_fluxUnits_simulationIDAndSimulationDateAndTime_dataStage02IsotopomerfittedNetFluxes(simulation_id_1,simulation_dateAndTimes[simulation_cnt_1])
for flux_unit in flux_units:
print("calculating netFluxDifferences for flux_units " + flux_unit);
# get the rxn_ids
if rxn_ids_I:
rxn_ids = rxn_ids_I;
else:
rxn_ids = [];
rxn_ids = self.get_rxnIDs_simulationIDAndSimulationDateAndTimeAndFluxUnits_dataStage02IsotopomerfittedNetFluxes(simulation_id_1,simulation_dateAndTimes[simulation_cnt_1],flux_unit);
for rxn_id in rxn_ids:
print("calculating netFluxDifferes for rxn_id " + rxn_id);
# get simulation_id_1 flux data
flux_1,flux_stdev_1,flux_lb_1,flux_ub_1,flux_units_1=None,None,None,None,None;
flux_1,flux_stdev_1,flux_lb_1,flux_ub_1,flux_units_1=self.get_flux_simulationIDAndSimulationDateAndTimeAndFluxUnitsAndRxnID_dataStage02IsotopomerfittedNetFluxes(simulation_id_1,simulation_dateAndTimes[simulation_cnt_1],flux_unit,rxn_id);
if not mfamethods.check_criteria(flux_1,flux_stdev_1,flux_lb_1,flux_ub_1, criteria_I):
continue;
if redundancy_I: list_2 = simulation_ids;
else: list_2 = simulation_ids[simulation_cnt_1+1:];
if observable_only_I:
observable_1 = mfamethods.check_observableNetFlux(flux_1,flux_lb_1,flux_ub_1)
if not observable_1: continue;
for cnt,simulation_id_2 in enumerate(list_2): #prevents redundancy
if redundancy_I: simulation_cnt_2 = cnt;
else: simulation_cnt_2 = simulation_cnt_1+cnt+1;
if simulation_cnt_2 == simulation_cnt_1:
continue;
# simulation_id_2 flux_data
flux_2,flux_stdev_2,flux_lb_2,flux_ub_2,flux_units_2=None,None,None,None,None;
flux_2,flux_stdev_2,flux_lb_2,flux_ub_2,flux_units_2=self.get_flux_simulationIDAndSimulationDateAndTimeAndFluxUnitsAndRxnID_dataStage02IsotopomerfittedNetFluxes(simulation_id_2,simulation_dateAndTimes[simulation_cnt_2],flux_unit,rxn_id);
if not mfamethods.check_criteria(flux_2,flux_stdev_2,flux_lb_2,flux_ub_2, criteria_I):
continue;
if observable_only_I:
observable_2 = mfamethods.check_observableNetFlux(flux_2,flux_lb_2,flux_ub_2);
if not observable_2: continue;
flux_diff,flux_distance,fold_change,significant = None,None,None,False;
flux_diff,flux_distance,fold_change,significant = mfamethods.calculate_fluxDifference(flux_1,flux_stdev_1,flux_lb_1,flux_ub_1,flux_units_1,
flux_2,flux_stdev_2,flux_lb_2,flux_ub_2,flux_units_2,
criteria_I = criteria_I);
# record the data
data_O.append({
'analysis_id':analysis_id_I,
'simulation_id_1':simulation_id_1,
'simulation_dateAndTime_1':simulation_dateAndTimes[simulation_cnt_1],
'simulation_id_2':simulation_id_2,
'simulation_dateAndTime_2':simulation_dateAndTimes[simulation_cnt_2],
'rxn_id':rxn_id,
'flux_difference':flux_diff,
'significant':significant,
'significant_criteria':criteria_I,
'flux_units':flux_unit,
'fold_change_geo':fold_change,
'flux_distance':flux_distance,
'used_':True,
'comment_':None});
# add data to the database
self.add_data_stage02_isotopomer_fittedNetFluxDifferences(data_O);
| 70.264463
| 261
| 0.623147
|
483fb784ace7d9a827474ce35393fa4e983b3fe2
| 1,372
|
py
|
Python
|
ramp-frontend/ramp_frontend/utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 13
|
2019-02-16T22:30:11.000Z
|
2021-01-11T10:13:47.000Z
|
ramp-frontend/ramp_frontend/utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 427
|
2018-11-22T22:01:47.000Z
|
2022-03-15T17:35:57.000Z
|
ramp-frontend/ramp_frontend/utils.py
|
frcaud/ramp-board
|
3df90e51a4faeb0c03bab5dc13e12311807a618e
|
[
"BSD-3-Clause"
] | 18
|
2018-11-22T16:22:18.000Z
|
2021-12-07T14:42:41.000Z
|
"""
The :mod:`ramp_frontend.utils` provides utilities to ease sending email.
"""
import logging
from flask_mail import Message
from ramp_frontend import mail
logger = logging.getLogger("RAMP-FRONTEND")
def body_formatter_user(user):
"""Create the body of an email using the user information.
Parameters
----------
user : :class:`ramp_database.model.User`
The user profile.
Returns
-------
body : str
The email body.
"""
body = """
user = {}
name = {} {}
email = {}
linkedin = {}
twitter = {}
facebook = {}
github = {}
notes = {}
bio = {}
""".format(
user.name,
user.firstname,
user.lastname,
user.email,
user.linkedin_url,
user.twitter_url,
user.facebook_url,
user.github_url,
user.hidden_notes,
user.bio,
)
return body
def send_mail(to, subject, body):
"""Send email using Flask Mail.
Parameters
----------
to : str
The email address of the recipient.
subject : str
The subject of the email.
body : str
The body of the email.
"""
try:
msg = Message(subject)
msg.body = body
msg.add_recipient(to)
mail.send(msg)
except Exception as e:
logger.error("Mailing error: {}".format(e))
| 18.794521
| 72
| 0.555394
|
5f156d52cb3e886f9779606669eaca1ef1e9c0b7
| 2,730
|
py
|
Python
|
spot_finding/spot_finder.py
|
graeme-winter/sidewinder
|
c0c7f61dbf3aadad0f9dbf803051f6c81a255317
|
[
"BSD-3-Clause"
] | null | null | null |
spot_finding/spot_finder.py
|
graeme-winter/sidewinder
|
c0c7f61dbf3aadad0f9dbf803051f6c81a255317
|
[
"BSD-3-Clause"
] | null | null | null |
spot_finding/spot_finder.py
|
graeme-winter/sidewinder
|
c0c7f61dbf3aadad0f9dbf803051f6c81a255317
|
[
"BSD-3-Clause"
] | null | null | null |
# spot_finder.py
#
# openCL / GPU powered spot finding
#
import sys
import time
import numpy as np
import pyopencl as cl
from spot_finder_data import setup, mask, data, shape, rettilb, plot
from spot_finder_cl import get_devices, device_help
from spot_finder_config import get_config
def main():
if len(sys.argv) < 2:
print(f"{sys.argv[0]} /path/to/data.nxs")
sys.exit(1)
filename = sys.argv[1]
config = get_config()
gpus = tuple(map(int, config["devices"].split(",")))
devices = get_devices()
context = cl.Context(devices=[devices[gpus[0]]])
queue = cl.CommandQueue(context)
# TODO verify that there is enough local memory for this size of work group
# TODO verify that this size of work group is legal for this device
local_work = tuple(map(int, config["work"].split(",")))
# work box + 7 pixels around
LOCAL = str((local_work[0] + 7) * (local_work[1] + 7))
cl_text = open("spot_finder.cl", "r").read().replace("LOCAL_SIZE", LOCAL)
program = cl.Program(context, cl_text).build()
spot_finder = program.spot_finder
spot_finder.set_scalar_arg_dtypes(
[
None,
None,
np.int32,
np.int32,
np.int32,
np.int32,
np.float32,
np.float32,
None,
]
)
setup(filename)
m = mask()
nz, ny, nx = shape()
data_shape = (32, ny // 32, nx)
d = data(0)
_image = cl.Buffer(
context, cl.mem_flags.READ_ONLY, d.size * np.dtype(d.dtype).itemsize
)
_mask = cl.Buffer(
context, cl.mem_flags.READ_ONLY, m.size * np.dtype(m.dtype).itemsize
)
_signal = cl.Buffer(
context, cl.mem_flags.WRITE_ONLY, m.size * np.dtype(m.dtype).itemsize
)
# mask same for all images -> only copy the once
cl.enqueue_copy(queue, _mask, m)
# likewise output buffer
signal = np.zeros(shape=m.shape, dtype=m.dtype)
group = (1, local_work[0], local_work[1])
work = tuple(int(group[d] * np.ceil(data_shape[d] / group[d])) for d in (0, 1, 2))
t0 = time.time()
n = 0
for i in range(nz):
n += 1
image = data(i)
cl.enqueue_copy(queue, _image, image)
evt = spot_finder(
queue,
work,
group,
_image,
_mask,
data_shape[0],
data_shape[1],
data_shape[2],
3,
3.0,
6.0,
_signal,
)
evt.wait()
cl.enqueue_copy(queue, signal, _signal)
print(i, np.count_nonzero(signal))
t1 = time.time()
print(f"Processing {n} images took {(t1 - t0):.1f}s")
main()
| 23.534483
| 86
| 0.567399
|
83d8c9d295b1aaca07e8d8aaadb8b0ed6f3fb5d7
| 4,647
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_probe_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_probe_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_probe_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header
should be picked from the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked
healthy. Default value is 0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match:
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewayProbeHealthResponseMatch
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, protocol=None, host: str=None, path: str=None, interval: int=None, timeout: int=None, unhealthy_threshold: int=None, pick_host_name_from_backend_http_settings: bool=None, min_servers: int=None, match=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayProbe, self).__init__(id=id, **kwargs)
self.protocol = protocol
self.host = host
self.path = path
self.interval = interval
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
self.pick_host_name_from_backend_http_settings = pick_host_name_from_backend_http_settings
self.min_servers = min_servers
self.match = match
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| 49.43617
| 342
| 0.665806
|
82c62eacf5a07c1672d05022fc65cd7519fc34a3
| 21
|
py
|
Python
|
hello.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
hello.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
hello.py
|
fabionunesdeparis/Fundamentos-em-python3
|
b21d06b44d5b18e99419cd06b4e08363c1f7a7ce
|
[
"MIT"
] | null | null | null |
print('Olá Mundo!!!')
| 21
| 21
| 0.619048
|
ad4a44764332c4eae76775ded127e890bfe5aae7
| 360
|
py
|
Python
|
mlprodict/onnxrt/ops_cpu/op_sub.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 32
|
2018-03-04T23:33:30.000Z
|
2022-03-10T19:15:06.000Z
|
mlprodict/onnxrt/ops_cpu/op_sub.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 184
|
2017-11-30T14:10:35.000Z
|
2022-02-21T08:29:31.000Z
|
mlprodict/onnxrt/ops_cpu/op_sub.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 9
|
2019-07-24T13:18:00.000Z
|
2022-03-07T04:08:07.000Z
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRunBinaryNumpy
class Sub(OpRunBinaryNumpy):
def __init__(self, onnx_node, desc=None, **options):
OpRunBinaryNumpy.__init__(self, numpy.subtract, onnx_node,
desc=desc, **options)
| 22.5
| 66
| 0.644444
|
9a91c8bd811f354324cda4680fd98e466c137266
| 2,122
|
py
|
Python
|
tests/test_action_space.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
tests/test_action_space.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
tests/test_action_space.py
|
emadboctorx/stable-baselines
|
9bce185538e8bf69836371286e23919fd85eec64
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from stable_baselines import A2C, PPO1, PPO2, TRPO
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.identity_env import (IdentityEnvMultiBinary,
IdentityEnvMultiDiscrete)
from stable_baselines.common.vec_env import DummyVecEnv
MODEL_LIST = [A2C, PPO1, PPO2, TRPO]
@pytest.mark.slow
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_identity_multidiscrete(model_class):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
with a multidiscrete action space
:param model_class: (BaseRLModel) A RL Model
"""
env = DummyVecEnv([lambda: IdentityEnvMultiDiscrete(10)])
model = model_class("MlpPolicy", env)
model.learn(total_timesteps=1000)
evaluate_policy(model, env, n_eval_episodes=5)
obs = env.reset()
assert np.array(model.action_probability(obs)).shape == (
2,
1,
10,
), "Error: action_probability not returning correct shape"
assert (
np.prod(model.action_probability(obs, actions=env.action_space.sample()).shape)
== 1
), "Error: not scalar probability"
@pytest.mark.slow
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_identity_multibinary(model_class):
"""
Test if the algorithm (with a given policy)
can learn an identity transformation (i.e. return observation as an action)
with a multibinary action space
:param model_class: (BaseRLModel) A RL Model
"""
env = DummyVecEnv([lambda: IdentityEnvMultiBinary(10)])
model = model_class("MlpPolicy", env)
model.learn(total_timesteps=1000)
evaluate_policy(model, env, n_eval_episodes=5)
obs = env.reset()
assert model.action_probability(obs).shape == (
1,
10,
), "Error: action_probability not returning correct shape"
assert (
np.prod(model.action_probability(obs, actions=env.action_space.sample()).shape)
== 1
), "Error: not scalar probability"
| 32.646154
| 87
| 0.696513
|
89d67333af0ab4a0d22e05225d78236bafa36d32
| 1,721
|
py
|
Python
|
tests/test_validators.py
|
gundotio/worf
|
45268e3d04ba5a2549d3a4f511d876622c9e0cad
|
[
"MIT"
] | null | null | null |
tests/test_validators.py
|
gundotio/worf
|
45268e3d04ba5a2549d3a4f511d876622c9e0cad
|
[
"MIT"
] | 33
|
2021-03-05T05:20:30.000Z
|
2022-03-16T02:01:45.000Z
|
tests/test_validators.py
|
gundotio/worf
|
45268e3d04ba5a2549d3a4f511d876622c9e0cad
|
[
"MIT"
] | null | null | null |
import pytest
from uuid import uuid4
from django.core.exceptions import ValidationError
uuid = uuid4()
email = "something@example.com"
phone = "(555) 555-5555"
@pytest.fixture(name="profile_view")
def profile_view_fixture(db, profile_factory):
from django.test import RequestFactory
from tests.views import ProfileDetail
profile_factory.create(email=email, phone=phone)
view = ProfileDetail()
view.bundle = {
"id": str(uuid),
"email": email,
"phone": phone,
}
view.request = RequestFactory().patch(f"/{uuid}/")
view.kwargs = dict(id=str(uuid))
view.serializer = None
return view
def test_validate_bundle(profile_view):
assert profile_view.validate_bundle("id")
assert profile_view.validate_bundle("email")
assert profile_view.validate_bundle("phone")
def test_validate_uuid_accepts_str(profile_view):
assert profile_view.validate_uuid(str(uuid)) == uuid
def test_validate_uuid_accepts_uuid(profile_view):
assert profile_view.validate_uuid(uuid) == uuid
def test_validate_uuid_raises_error(profile_view):
with pytest.raises(ValidationError):
profile_view.validate_uuid("not-a-uuid")
def test_validate_email_passes(profile_view):
assert profile_view.validate_email(email) == email
def test_validate_email_raises_error(profile_view):
with pytest.raises(ValidationError):
profile_view.validate_email("fake.example@com")
def test_validate_custom_field_passes(profile_view):
assert profile_view.validate_phone(phone) == "+5555555555"
def test_validate_custom_field_raises_error(profile_view):
with pytest.raises(ValidationError):
profile_view.validate_phone("invalid number")
| 26.075758
| 62
| 0.750145
|
b0a60491392e7cad3a202ca66ae3fd5c049d3f7e
| 12,251
|
py
|
Python
|
model/fine_tune_model.py
|
joonzzh/cdp_based_clustering
|
474c419104de7496eb8749e1049ae07326abe706
|
[
"MIT"
] | 1
|
2018-11-08T11:55:05.000Z
|
2018-11-08T11:55:05.000Z
|
model/fine_tune_model.py
|
joonzzh/cdp_based_clustering
|
474c419104de7496eb8749e1049ae07326abe706
|
[
"MIT"
] | null | null | null |
model/fine_tune_model.py
|
joonzzh/cdp_based_clustering
|
474c419104de7496eb8749e1049ae07326abe706
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
from PIL import Image
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torchvision import models, transforms
class_num = 38
batch_size = 8
trans = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
def default_loader(path):
return Image.open(path).convert('RGB')
class MyDataset(Dataset):
def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):
fh = open(txt, 'r')
imgs = []
for line in fh:
line = line.strip('\n')
line = line.rstrip()
words = line.split()
imgs.append((words[0], int(words[1])))
fh.close()
self.imgs = imgs
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
fn, label = self.imgs[index]
img = self.loader(fn)
if self.transform is not None:
img = self.transform(img)
return img,label
def __len__(self):
return len(self.imgs)
def fine_tune_resnet18():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleResnet18.pth'
if os.path.exists(model_path):
resnet_model = models.resnet18()
if torch.cuda.is_available():
resnet_model.load_state_dict(torch.load(model_path))
else:
resnet_model.load_state_dict(torch.load(model_path, map_location='cpu'))
resnet_model.fc = nn.Linear(in_features=512, out_features=class_num, bias=True)
print('Load model succeed!')
else:
print('Model not exists.')
resnet_model = models.resnet18(pretrained=True)
resnet_model.fc = nn.Linear(in_features=512, out_features=class_num, bias=True)
if torch.cuda.is_available():
resnet_model = resnet_model.cuda()
print(resnet_model)
return resnet_model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_resnet34():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleResnet34.pth'
if os.path.exists(model_path):
resnet_model = models.resnet34()
if torch.cuda.is_available():
resnet_model.load_state_dict(torch.load(model_path))
else:
resnet_model.load_state_dict(torch.load(model_path, map_location='cpu'))
resnet_model.fc = nn.Linear(in_features=512, out_features=class_num, bias=True)
print('Load model succeed!')
else:
resnet_model = models.resnet34(pretrained=True)
resnet_model.fc = nn.Linear(in_features=512, out_features=38, bias=True)
if torch.cuda.is_available():
resnet_model = resnet_model.cuda()
print(resnet_model)
return resnet_model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_resnet50():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleResnet50.pth'
if os.path.exists(model_path):
resnet_model = models.resnet50()
if torch.cuda.is_available():
resnet_model.load_state_dict(torch.load(model_path))
else:
resnet_model.load_state_dict(torch.load(model_path, map_location='cpu'))
resnet_model.fc = nn.Linear(in_features=2048, out_features=class_num, bias=True)
print('Load model succeed!')
else:
resnet_model = models.resnet50(pretrained=True)
resnet_model.fc = nn.Linear(in_features=2048, out_features=class_num, bias=True)
if torch.cuda.is_available():
resnet_model = resnet_model.cuda()
print(resnet_model)
return resnet_model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_resnet101():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleResnet101.pth'
if os.path.exists(model_path):
resnet_model = models.resnet101()
if torch.cuda.is_available():
resnet_model.load_state_dict(torch.load(model_path))
else:
resnet_model.load_state_dict(torch.load(model_path, map_location='cpu'))
resnet_model.fc = nn.Linear(in_features=2048, out_features=class_num, bias=True)
print('Load model succeed!')
else:
resnet_model = models.resnet101(pretrained=True)
resnet_model.fc = nn.Linear(in_features=2048, out_features=class_num, bias=True)
if torch.cuda.is_available():
resnet_model = resnet_model.cuda()
print(resnet_model)
return resnet_model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_vgg16():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleVgg16.pth'
if os.path.exists(model_path):
model = models.vgg16()
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
else:
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.classifier = nn.Sequential(nn.Linear(25088, 4096), #vgg16
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, class_num))
print('Load model succeed!')
else:
model = models.vgg16(pretrained=True)
model.classifier = nn.Sequential(nn.Linear(25088, 4096), #vgg16
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, class_num))
if torch.cuda.is_available():
model = model.cuda()
print(model)
return model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_densenet121():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleDense121.pth'
if os.path.exists(model_path):
model = models.densenet121()
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
else:
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.classifier = nn.Linear(1024, class_num, True)
print('Load model succeed!')
else:
model = models.densenet121(pretrained=True)
model.classifier = nn.Linear(1024, class_num, True)
if torch.cuda.is_available():
model = model.cuda()
print(model)
return model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune_densenet161():
train_data=MyDataset(txt='../data/train/train.txt', transform=trans)
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data=MyDataset(txt='../data/test/test.txt', transform=trans)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
model_path = 'YaleDense161.pth'
if os.path.exists(model_path):
model = models.densenet161()
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
else:
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.classifier = nn.Linear(2208, class_num, True)
print('Load model succeed!')
else:
model = models.densenet161(pretrained=True)
model.classifier = nn.Linear(2208, class_num, True)
if torch.cuda.is_available():
model = model.cuda()
print(model)
return model, train_data, train_loader, test_data, test_loader, model_path
def fine_tune(net, train_data, train_loader, test_data, test_loader, path):
optimizer = optim.Adam(net.parameters(), 1e-4)
loss_func = nn.CrossEntropyLoss()
for epoch in range(30):
print('epoch {}'.format(epoch + 1))
# training
train_loss = 0.0
train_acc = 0.0
for batch_x, batch_y in train_loader:
if torch.cuda.is_available():
batch_x, batch_y = Variable(batch_x.cuda()), Variable(batch_y.cuda())
else:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = net(batch_x)
loss = loss_func(out, batch_y)
train_loss += loss.data[0]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += float(train_correct.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: {}\nTrain Loss: {:.6f}, Acc: {:.6f}'.format(epoch, train_loss / (len(train_data)), train_acc / (len(train_data))))
#evaluation
acc = 0.0
for batch_x, batch_y in test_loader:
if torch.cuda.is_available():
batch_x, batch_y = Variable(batch_x.cuda()), Variable(batch_y.cuda())
else:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = net(batch_x)
loss = loss_func(out, batch_y)
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
acc += float(train_correct.data[0])
print('Test Acc: {:.6f}'.format(acc / (len(test_data))))
torch.save(net.state_dict(), path)
# resnet18
# net, train_data, train_loader, test_data, test_loader, path = fine_tune_resnet18()
# fine_tune(net, train_data, train_loader, test_data, test_loader, path)
# resnet34
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_resnet34()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
# resnet50
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_resnet50()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
# resnet101
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_resnet101()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
#vgg16
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_vgg16()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
#densenet121
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_densenet121()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
#densenet161
#net, train_data, train_loader, test_data, test_loader, path = fine_tune_densenet161()
#fine_tune(net, train_data, train_loader, test_data, test_loader, path)
| 43.443262
| 136
| 0.662885
|
497894d58df9b24d3ecf030f33a73fb9abe75f2a
| 4,173
|
py
|
Python
|
Image/where.py
|
jdgomezmo/gee
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | 1
|
2020-11-16T22:07:42.000Z
|
2020-11-16T22:07:42.000Z
|
Image/where.py
|
jdgomezmo/gee
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | null | null | null |
Image/where.py
|
jdgomezmo/gee
|
7016c47ee902dbf60b1aeb6319424c61c1107345
|
[
"MIT"
] | null | null | null |
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/where.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/where.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/where.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
Map.setCenter(-113.41842, 40.055489, 6)
elev = ee.Image('srtm90_v4')
cover = ee.Image('MCD12Q1/MCD12Q1_005_2001_01_01').select('Land_Cover_Type_1')
blank = ee.Image(0)
# Where (1 <= cover <= 4) and (elev > 1000), set the output to 1.
output = blank.where(
cover.lte(4).And(cover.gte(1)).And(elev.gt(1000)),
1)
# Output contains 0s and 1s. Mask it with itself to get rid of the 0s.
result = output.mask(output)
vis = {'min': 0, 'max': 3000}
Map.addLayer(elev, vis, 'SRTM')
Map.addLayer(result, {'palette': '00AA00'}, 'Land Cover')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 47.965517
| 1,021
| 0.73664
|
f05fab1fee963ff0abc14f37d72f99605c9595cc
| 1,234
|
py
|
Python
|
Projects/Online Workouts/w3resource/List/program-31.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1
|
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/List/program-31.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5
|
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/List/program-31.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python 3
############################################################################################
# #
# Program purpose: Finds the number of items that falls within a certain range. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : November 12, 2019 #
# #
############################################################################################
import random
def random_int_list(low: int, high: int, size: int) -> list:
return [random.randint(low, high) for _ in range(size)]
def count_items_in_range(main_data: list, low: int, high: int) -> int:
cnt = int(0)
for x in main_data:
if low <= x <= high:
cnt += 1
return cnt
if __name__ == "__main__":
list_data = random_int_list(low=0, high=20, size=15)
print(f'Generated list data: {list_data}')
print(f'Number of items in range 2 and 10: {count_items_in_range(main_data=list_data, low=2, high=10)}')
| 45.703704
| 109
| 0.4141
|
279b0ae4916949fb3d9cea3d223d44ba5fa7efd6
| 601
|
py
|
Python
|
benchmark/run.py
|
ioistired/pytomlpp
|
54b7a3606fecfb1fb76469ce355aced007db0ecc
|
[
"MIT"
] | null | null | null |
benchmark/run.py
|
ioistired/pytomlpp
|
54b7a3606fecfb1fb76469ce355aced007db0ecc
|
[
"MIT"
] | null | null | null |
benchmark/run.py
|
ioistired/pytomlpp
|
54b7a3606fecfb1fb76469ce355aced007db0ecc
|
[
"MIT"
] | null | null | null |
import pytomlpp
import toml
import tomlkit
import qtoml
import timeit
def benchmark(name, func, number=5000):
print(f'{name:>10}: Running...', end='', flush=True)
res = str(timeit.timeit(func, number=number)).split('.')
print('\b'*10 + f'{res[0]:>3}.{res[1]} s')
test_data = ''
with open('data.toml', 'r', encoding='utf-8') as f:
test_data = f.read()
benchmark('pytomlpp', lambda: pytomlpp.loads(test_data))
benchmark('toml', lambda: toml.loads(test_data))
benchmark('qtoml', lambda: qtoml.loads(test_data))
benchmark('tomlkit', lambda: tomlkit.parse(test_data))
| 30.05
| 61
| 0.66223
|
dbb8acb65dc6e069fc5ea0475937df38bd18b556
| 1,497
|
py
|
Python
|
Deathly Dungeon/pathfinding/finder/best_first.py
|
iTecAI/Deathly-Dungeons
|
54d8bb9b9c6175a6f8c55858bf864f773cfe8f2c
|
[
"MIT"
] | null | null | null |
Deathly Dungeon/pathfinding/finder/best_first.py
|
iTecAI/Deathly-Dungeons
|
54d8bb9b9c6175a6f8c55858bf864f773cfe8f2c
|
[
"MIT"
] | null | null | null |
Deathly Dungeon/pathfinding/finder/best_first.py
|
iTecAI/Deathly-Dungeons
|
54d8bb9b9c6175a6f8c55858bf864f773cfe8f2c
|
[
"MIT"
] | null | null | null |
from .a_star import AStarFinder, MAX_RUNS, TIME_LIMIT
from pathfinding.core.diagonal_movement import DiagonalMovement
class BestFirst(AStarFinder):
"""
Similar to the default A* algorithm from a_star.
"""
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path using BestFirst algorithm
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhatten)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
super(BestFirst, self).__init__(
heuristic=heuristic,
weight=weight,
diagonal_movement=diagonal_movement,
time_limit=time_limit,
max_runs=max_runs)
self.weighted = False
def apply_heuristic(self, node_a, node_b, heuristic=None):
return super(BestFirst, self).apply_heuristic(
node_a, node_b, heuristic) * 1000000
| 39.394737
| 77
| 0.628591
|
6079690d45fb5d46a3f4e6dfb7b05fdc446cd22c
| 3,022
|
py
|
Python
|
nova/conf/base.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/base.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/base.py
|
badock/nova-tidb
|
4c4591f2cd887fdc22828e12f0c297c051bbd912
|
[
"Apache-2.0"
] | null | null | null |
# needs:fix_opt_description
# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
notify_decorator = 'nova.notifications.notify_decorator'
base_options = [
cfg.IntOpt('password_length',
default=12,
help='Length of generated instance admin passwords'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='Time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.BoolOpt('use_rootwrap_daemon',
default=False,
help="Start and use a daemon that can run the commands that "
"need to be run with root privileges. This option is "
"usually enabled on nodes that run nova compute "
"processes"),
cfg.StrOpt('rootwrap_config',
default="/etc/nova/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.StrOpt(
'tempdir',
help='Explicitly specify the temporary working directory'),
cfg.BoolOpt(
'monkey_patch',
default=False,
help="""Determine if monkey patching should be applied.
Possible values:
* True: Functions specified in ``monkey_patch_modules`` will be patched.
* False: No monkey patching will occur.
Services which consume this:
* All
Interdependencies to other options:
* ``monkey_patch_modules``: This must have values set for this option to have
any effect
"""),
cfg.ListOpt(
'monkey_patch_modules',
default=[
'nova.compute.api:%s' % (notify_decorator)
],
help="""List of modules/decorators to monkey patch.
This option allows you to patch a decorator for all functions in specified
modules.
Possible values:
* nova.compute.api:nova.notifications.notify_decorator
* nova.api.ec2.cloud:nova.notifications.notify_decorator
* [...]
Interdependencies to other options:
* ``monkey_patch``: This must be set to ``True`` for this option to
have any effect
"""),
]
def register_opts(conf):
conf.register_opts(base_options)
def list_opts():
return {'DEFAULT': base_options}
| 30.525253
| 78
| 0.708802
|
9af8d14180b0c19112b4ceace6bec6cbb3718b23
| 570
|
py
|
Python
|
main/twolame/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/twolame/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/twolame/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "twolame"
pkgver = "0.4.0"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
makedepends = ["libsndfile-devel"]
checkdepends = ["perl"]
pkgdesc = "Optimized MPEG Audio Layer 2 encoder"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.1-or-later"
url = "https://www.twolame.org"
source = f"$(SOURCEFORGE_SITE)/{pkgname}/{pkgname}-{pkgver}.tar.gz"
sha256 = "cc35424f6019a88c6f52570b63e1baf50f62963a3eac52a03a800bb070d7c87d"
@subpackage("twolame-devel")
def _devel(self):
return self.default_devel(extra = ["usr/share/doc"])
| 31.666667
| 75
| 0.736842
|
457661126b08d9c5cc50b127ba32e492cd20fd4d
| 1,652
|
py
|
Python
|
app/views.py
|
mateusvictor/Email-Sender
|
66034f315a1b0f0ee7bb07fda12d58810c465aa6
|
[
"MIT"
] | null | null | null |
app/views.py
|
mateusvictor/Email-Sender
|
66034f315a1b0f0ee7bb07fda12d58810c465aa6
|
[
"MIT"
] | null | null | null |
app/views.py
|
mateusvictor/Email-Sender
|
66034f315a1b0f0ee7bb07fda12d58810c465aa6
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .models import Receiver
import os
@login_required
def home(request):
user = request.user
receivers = [str(receiver) for receiver in Receiver.objects.filter(user=user)]
context = {
'receivers': ' | '.join(receivers),
}
return render(request, "app/home.html", context)
@login_required
def add_receiver(request):
if request.method != 'POST':
return redirect("home")
user = request.user
receiver = request.POST['receiver']
new_receiver = Receiver.objects.create(user=user, receiver=receiver)
return redirect("receivers")
@login_required
def remove_receiver(request, pk):
try:
to_delete = Receiver.objects.get(pk=pk)
except:
return redirect("home")
if request.user == to_delete.user:
to_delete.delete()
return redirect("receivers")
@login_required
def receiver_list(request):
user = request.user
context = {
'receiver_list': Receiver.objects.filter(user=user).order_by('receiver'),
}
return render(request, "app/receiver_form.html", context=context)
@login_required
def send_email(request):
if request.method != 'POST':
return redirect("home")
content = request.POST['content']
subject = request.POST['subject']
sender = os.environ.get('EMAIL_USER')
user = request.user
receivers = [str(receiver) for receiver in Receiver.objects.filter(user=user)]
if not receivers:
return redirect("home")
send_mail(
subject,
content,
sender,
receivers,
fail_silently=False,
)
return redirect("home")
| 20.395062
| 79
| 0.740315
|
1f9a56b08c6b09de021bf700b520e9b6fc6f46fc
| 13,331
|
py
|
Python
|
connectordb/_websocket.py
|
connectordb/connectordb-python
|
2092b0cb30898139a247176bcf433d5a4abde7cb
|
[
"Apache-2.0"
] | 1
|
2017-02-26T07:40:39.000Z
|
2017-02-26T07:40:39.000Z
|
connectordb/_websocket.py
|
connectordb/connectordb-python
|
2092b0cb30898139a247176bcf433d5a4abde7cb
|
[
"Apache-2.0"
] | null | null | null |
connectordb/_websocket.py
|
connectordb/connectordb-python
|
2092b0cb30898139a247176bcf433d5a4abde7cb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import websocket
import threading
import logging
import json
import random
import time
class WebsocketHandler(object):
"""WebsocketHandler handles websocket connections to a ConnectorDB server. It allows
subscribing and unsubscribing from inputs/outputs. The handler also deals with dropped
connections, repeatedly attempting to reconnect to the server whenever connection is lost."""
"""The maximum time to wait between reconnection attempts"""
reconnect_time_max_seconds = 8 * 60.0
"""Multiply the wait time by this factor when a reconnect fails"""
reconnect_time_backoff_multiplier = 1.5
"""The time in seconds to wait before an initial attempt at reconnecting"""
reconnect_time_starting_seconds = 1.0
"""The time between pings that results in a connection timeout"""
connection_ping_timeout = 60 * 2
def __init__(self, server_url, basic_auth):
"""
The handler is initialized as follows::
from requests.auth import HTTPBasicAuth
req = HTTPBasicAuth(username,password)
ws = WebsocketHandler("https://connectordb.com",req)
"""
# The websocket is at /api/v1/websocket, and the server_url includes the /api/v1/
server_url += "websocket"
# First we must get the websocket URI from the server URL
self.ws_url = "wss://" + server_url[8:]
if server_url.startswith("http://"):
self.ws_url = "ws://" + server_url[7:]
self.setauth(basic_auth)
# Set up the variable which will hold all of the subscriptions
self.subscriptions = {}
self.subscription_lock = threading.Lock()
# The server periodically sends ping messages during websocket connection.
# we keep track of the pings so that we notice loss of connection
self.lastpingtime = time.time()
self.pingtimer = None
# Now set up the websocket
self.ws = None
self.ws_thread = None # The thread where the websocket runs
self.ws_openlock = threading.Lock()
self.ws_sendlock = threading.Lock()
# Set up the websocket status
self._status = "disconnected"
self._status_lock = threading.Lock()
# Set up the reconnect time
self.reconnect_time = self.reconnect_time_starting_seconds
# Set up the times that we were connected and disconnected. These allow for
# setting up reconnect delays correctly
self.connected_time = 0
self.disconnected_time = 0
def setauth(self,basic_auth):
""" setauth can be used during runtime to make sure that authentication is reset.
it can be used when changing passwords/apikeys to make sure reconnects succeed """
self.headers = []
# If we have auth
if basic_auth is not None:
# we use a cheap hack to get the basic auth header out of the auth object.
# This snippet ends up with us having an array of the necessary headers
# to perform authentication.
class auth_extractor():
def __init__(self):
self.headers = {}
extractor = auth_extractor()
basic_auth(extractor)
for header in extractor.headers:
self.headers.append("%s: %s" % (header, extractor.headers[header]))
@property
def status(self):
status = ""
with self._status_lock:
status = self._status
return status
@status.setter
def status(self, newstatus):
with self._status_lock:
self._status = newstatus
logging.debug("ConnectorDB:WS:STATUS: %s", newstatus)
def send(self, cmd):
"""Send the given command thru the websocket"""
with self.ws_sendlock:
self.ws.send(json.dumps(cmd))
def insert(self, stream, data):
"""Insert the given datapoints into the stream"""
self.send({"cmd": "insert", "arg": stream, "d": data})
def subscribe(self, stream, callback, transform=""):
"""Given a stream, a callback and an optional transform, sets up the subscription"""
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True
def unsubscribe(self, stream, transform=""):
"""Unsubscribe from the given stream (with the optional transform)"""
if self.status is not "connected":
return False
logging.debug("Unsubscribing from %s", stream)
self.send(
{"cmd": "unsubscribe",
"arg": stream,
"transform": transform})
self.subscription_lock.acquire()
del self.subscriptions[stream + ":" + transform]
if len(self.subscriptions) is 0:
self.subscription_lock.release()
self.disconnect()
else:
self.subscription_lock.release()
def connect(self):
"""Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not"""
# Wait for the lock to be available (ie, the websocket is not being used (yet))
self.ws_openlock.acquire()
self.ws_openlock.release()
if self.status == "connected":
return True # Already connected
if self.status == "disconnecting":
# If currently disconnecting, wait a moment, and retry connect
time.sleep(0.1)
return self.connect()
if self.status == "disconnected" or self.status == "reconnecting":
self.ws = websocket.WebSocketApp(self.ws_url,
header=self.headers,
on_message=self.__on_message,
on_ping=self.__on_ping,
on_open=self.__on_open,
on_close=self.__on_close,
on_error=self.__on_error)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.status = "connecting"
self.ws_openlock.acquire()
self.ws_thread.start()
self.ws_openlock.acquire()
self.ws_openlock.release()
return self.status == "connected"
def disconnect(self):
if self.status == "connected":
self.status = "disconnecting"
with self.subscription_lock:
self.subscriptions = {}
self.ws.close()
self.__on_close(self.ws)
def __reconnect(self):
"""This is called when a connection is lost - it attempts to reconnect to the server"""
self.status = "reconnecting"
# Reset the disconnect time after 15 minutes
if self.disconnected_time - self.connected_time > 15 * 60:
self.reconnect_time = self.reconnect_time_starting_seconds
else:
self.reconnect_time *= self.reconnect_time_backoff_multiplier
if self.reconnect_time > self.reconnect_time_max_seconds:
self.reconnect_time = self.reconnect_time_max_seconds
# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server
# if it goes down
self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
if self.reconnect_time < self.reconnect_time_starting_seconds:
self.reconnect_time = self.reconnect_time_starting_seconds
logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs",
self.reconnect_time)
self.reconnector = threading.Timer(self.reconnect_time,
self.__reconnect_fnc)
self.reconnector.daemon = True
self.reconnector.start()
def __reconnect_fnc(self):
"""This function is called by reconnect after the time delay"""
if self.connect():
self.__resubscribe()
else:
self.__reconnect()
def __resubscribe(self):
"""Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed"""
with self.subscription_lock:
for sub in self.subscriptions:
logging.debug("Resubscribing to %s", sub)
stream_transform = sub.split(":", 1)
self.send({
"cmd": "subscribe",
"arg": stream_transform[0],
"transform": stream_transform[1]
})
def __on_open(self, ws):
"""Called when the websocket is opened"""
logging.debug("ConnectorDB: Websocket opened")
# Connection success - decrease the wait time for next connection
self.reconnect_time /= self.reconnect_time_backoff_multiplier
self.status = "connected"
self.lastpingtime = time.time()
self.__ensure_ping()
self.connected_time = time.time()
# Release the lock that connect called
self.ws_openlock.release()
def __on_close(self, ws):
"""Called when the websocket is closed"""
if self.status == "disconnected":
return # This can be double-called on disconnect
logging.debug("ConnectorDB:WS: Websocket closed")
# Turn off the ping timer
if self.pingtimer is not None:
self.pingtimer.cancel()
self.disconnected_time = time.time()
if self.status == "disconnecting":
self.status = "disconnected"
elif self.status == "connected":
self.__reconnect()
def __on_error(self, ws, err):
"""Called when there is an error in the websocket"""
logging.debug("ConnectorDB:WS: Connection Error")
if self.status == "connecting":
self.status = "errored"
self.ws_openlock.release() # Release the lock of connecting
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys()))
def __on_ping(self, ws, data):
"""The server periodically sends us websocket ping messages to keep the connection alive. To
ensure that the connection to the server is still active, we memorize the most recent ping's time
and we periodically ensure that a ping was received in __ensure_ping"""
logging.debug("ConnectorDB:WS: ping")
self.lastpingtime = time.time()
def __ensure_ping(self):
"""Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect"""
logging.debug("ConnectorDB:WS: pingcheck")
if (time.time() - self.lastpingtime > self.connection_ping_timeout):
logging.warn("ConnectorDB:WS: Websocket ping timed out!")
if self.ws is not None:
self.ws.close()
self.__on_close(self.ws)
else:
# reset the ping timer
self.pingtimer = threading.Timer(self.connection_ping_timeout,
self.__ensure_ping)
self.pingtimer.daemon = True
self.pingtimer.start()
def __del__(self):
"""Make sure that all threads shut down when needed"""
self.disconnect()
| 39.913174
| 108
| 0.610082
|
9045c77f23b285dc2514de4d8ca14608c1a3c558
| 1,957
|
py
|
Python
|
aardvark/api/resource_provider.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
aardvark/api/resource_provider.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
aardvark/api/resource_provider.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from aardvark.api.rest import placement as client
from aardvark.objects import capabilities
from aardvark.objects import resource_provider as rp_obj
from aardvark.objects import resources
class ResourceProvider(object):
def __init__(self, uuid, name):
self.uuid = uuid
self.name = name
self.client = client.PlacementClient()
@property
def usages(self):
usages = self.client.usages(self.uuid)
return resources.Resources(usages)
@property
def all_usages(self):
return self.client.all_usages()
@property
def inventories(self):
inventories = self.client.inventories(self.uuid)
return resources.Resources.obj_from_inventories(inventories)
@property
def resource_classes(self):
return self.client.resource_classes()
@property
def capabilities(self):
return capabilities.Capabilities(self.usages, self.inventories)
class ResourceProviderList(object):
def __init__(self, aggregates=None):
self.client = client.PlacementClient()
self.aggregates = aggregates
@property
def resource_providers(self):
rps = self.client.resource_providers(self.aggregates)
return [rp_obj.ResourceProvider(rp['uuid'], rp['name']) for rp in rps]
| 31.564516
| 78
| 0.712315
|
c5833a05c23161bf57f6bd6da45f594aacf03c84
| 7,938
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_exponential_op.py
|
shangzhizhou/Paddle
|
f1d56b7722399dce93a1fc337db8f9523754c92e
|
[
"Apache-2.0"
] | 2
|
2019-05-16T03:09:06.000Z
|
2022-01-14T07:06:37.000Z
|
python/paddle/fluid/tests/unittests/test_exponential_op.py
|
zhangliya01/Paddle
|
2e4cb27927a3ea0f58b25d534e90ac68989e8897
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_exponential_op.py
|
zhangliya01/Paddle
|
2e4cb27927a3ea0f58b25d534e90ac68989e8897
|
[
"Apache-2.0"
] | 2
|
2021-03-10T08:05:40.000Z
|
2021-03-11T14:30:14.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
from op_test import OpTest
paddle.enable_static()
class TestExponentialOp1(OpTest):
def setUp(self):
self.op_type = "exponential"
self.config()
self.attrs = {"lambda": self.lam}
self.inputs = {'X': np.empty([1024, 1024], dtype=self.dtype)}
self.outputs = {'Out': np.ones([1024, 1024], dtype=self.dtype)}
def config(self):
self.lam = 0.5
self.dtype = "float64"
def test_check_output(self):
self.check_output_customized(self.verify_output)
def verify_output(self, outs):
hist1, _ = np.histogram(outs[0], range=(0, 5))
hist1 = hist1.astype("float32")
hist1 = hist1 / float(outs[0].size)
data_np = np.random.exponential(1. / self.lam, [1024, 1024])
hist2, _ = np.histogram(data_np, range=(0, 5))
hist2 = hist2.astype("float32")
hist2 = hist2 / float(data_np.size)
self.assertTrue(
np.allclose(
hist1, hist2, rtol=0.02),
"actual: {}, expected: {}".format(hist1, hist2))
def test_check_grad_normal(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[np.zeros(
[1024, 1024], dtype=self.dtype)],
user_defined_grad_outputs=[
np.random.rand(1024, 1024).astype(self.dtype)
])
class TestExponentialOp2(TestExponentialOp1):
def config(self):
self.lam = 0.25
self.dtype = "float32"
class TestExponentialAPI(unittest.TestCase):
def test_static(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x_np = np.full([10, 10], -1.)
x = paddle.static.data(name="X", shape=[10, 10], dtype='float64')
x.exponential_(1.0)
exe = paddle.static.Executor()
out = exe.run(paddle.static.default_main_program(),
feed={"X": x_np},
fetch_list=[x])
self.assertTrue(np.min(out) >= 0)
def test_dygraph(self):
paddle.disable_static()
x = paddle.full([10, 10], -1., dtype='float32')
x.exponential_(0.5)
self.assertTrue(np.min(x.numpy()) >= 0)
paddle.enable_static()
# Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
def test_fixed_random_number(self):
if not paddle.is_compiled_with_cuda():
return
# Note(zhouwei): The Number of threads is determined by
# 'multiProcessorCount * maxThreadsPerMultiProcessor'. So, different
# GPU have different number of threads, which result in different
# random value. Only test on V100 GPU here.
if not "V100" in paddle.device.cuda.get_device_name():
return
print("Test Fixed Random number on V100 GPU------>")
paddle.disable_static()
paddle.set_device('gpu')
paddle.seed(2021)
x = paddle.empty([64, 3, 1024, 1024], dtype="float32")
x.exponential_(1.0)
x_np = x.numpy()
expect = [
0.80073667, 0.2249291, 0.07734892, 1.25392, 0.14013891, 0.45736602,
1.9735607, 0.30490234, 0.57100505, 0.8115938
]
self.assertTrue(np.allclose(x_np[0, 0, 0, 0:10], expect))
expect = [
1.4296371e+00, 9.5411777e-01, 5.2575850e-01, 2.4805880e-01,
1.2322118e-04, 8.4604341e-01, 2.1111444e-01, 1.4143821e+00,
2.8194717e-01, 1.1360573e+00
]
self.assertTrue(np.allclose(x_np[16, 1, 300, 200:210], expect))
expect = [
1.3448033, 0.35146526, 1.7380928, 0.32012638, 0.10396296,
0.51344526, 0.15308502, 0.18712929, 0.03888268, 0.20771872
]
self.assertTrue(np.allclose(x_np[32, 1, 600, 500:510], expect))
expect = [
0.5107464, 0.20970327, 2.1986802, 1.580056, 0.31036147, 0.43966478,
0.9056133, 0.30119267, 1.4797124, 1.4319834
]
self.assertTrue(np.allclose(x_np[48, 2, 900, 800:810], expect))
expect = [
3.4640615, 1.1019983, 0.41195083, 0.22681557, 0.291846, 0.53617656,
1.5791925, 2.4645927, 0.04094889, 0.9057725
]
self.assertTrue(np.allclose(x_np[63, 2, 1023, 1000:1010], expect))
x = paddle.empty([10, 10], dtype="float32")
x.exponential_(3.0)
x_np = x.numpy()
expect = [
0.02831675, 0.1691551, 0.6798956, 0.69347525, 0.0243443, 0.22180498,
0.30574575, 0.9839696, 0.2834912, 0.59420055
]
self.assertTrue(np.allclose(x_np[5, 0:10], expect))
x = paddle.empty([16, 2, 1024, 768], dtype="float64")
x.exponential_(0.25)
x_np = x.numpy()
expect = [
10.0541229, 12.67860643, 1.09850734, 7.35289643, 2.65471225,
3.86217432, 2.97902086, 2.92744479, 2.67927152, 0.19667352
]
self.assertTrue(np.allclose(x_np[0, 0, 0, 100:110], expect))
expect = [
0.68328125, 3.1454553, 0.92158376, 1.95842188, 1.05296941,
12.93242051, 5.20255978, 3.3588624, 1.57377174, 5.73194183
]
self.assertTrue(np.allclose(x_np[4, 0, 300, 190:200], expect))
expect = [
1.37973974, 3.45036798, 7.94625406, 1.62610973, 0.31032122,
4.13596493, 1.98494535, 1.13207041, 8.30592769, 2.81460147
]
self.assertTrue(np.allclose(x_np[8, 1, 600, 300:310], expect))
expect = [
2.27710811, 12.25003028, 2.96409124, 4.72405788, 0.67917249,
4.35856718, 0.46870976, 2.31120149, 9.61595826, 4.64446271
]
self.assertTrue(np.allclose(x_np[12, 1, 900, 500:510], expect))
expect = [
0.95883744, 1.57316361, 15.22524512, 20.49559882, 13.70008548,
3.29430143, 3.90390424, 0.9146657, 0.80972249, 0.33376219
]
self.assertTrue(np.allclose(x_np[15, 1, 1023, 750:760], expect))
x = paddle.empty([512, 768], dtype="float64")
x.exponential_(0.3)
x_np = x.numpy()
expect = [
8.79266704, 4.79596009, 2.75480243, 6.04670011, 0.35379556,
0.76864868, 3.17428251, 0.26556859, 12.22485885, 10.51690383
]
self.assertTrue(np.allclose(x_np[0, 200:210], expect))
expect = [
5.6341126, 0.52243418, 5.36410796, 6.83672002, 11.9243311,
5.85985566, 5.75169548, 0.13877972, 6.1348385, 3.82436519
]
self.assertTrue(np.allclose(x_np[300, 400:410], expect))
expect = [
4.94883581, 0.56345306, 0.85841585, 1.92287801, 6.10036656,
1.19524847, 3.64735434, 5.19618716, 2.57467974, 3.49152791
]
self.assertTrue(np.allclose(x_np[500, 700:710], expect))
x = paddle.empty([10, 10], dtype="float64")
x.exponential_(4.0)
x_np = x.numpy()
expect = [
0.15713826, 0.56395964, 0.0680941, 0.00316643, 0.27046853,
0.19852724, 0.12776634, 0.09642974, 0.51977551, 1.33739699
]
self.assertTrue(np.allclose(x_np[5, 0:10], expect))
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
| 37.620853
| 86
| 0.588183
|
30781edbf3e687af6945178cb3935bad480d9795
| 1,321
|
py
|
Python
|
rlkit/samplers/in_place.py
|
NagisaZj/state-marginal-matching
|
007de43eacbb1fc976e9df082d6679e52a159023
|
[
"MIT"
] | 62
|
2019-06-18T20:35:55.000Z
|
2022-03-05T12:27:06.000Z
|
rlkit/samplers/in_place.py
|
fuxianh/state-marginal-matching
|
41934dfe488be2832ee99449b6f5887e4fe0b91e
|
[
"MIT"
] | null | null | null |
rlkit/samplers/in_place.py
|
fuxianh/state-marginal-matching
|
41934dfe488be2832ee99449b6f5887e4fe0b91e
|
[
"MIT"
] | 12
|
2019-06-19T12:02:14.000Z
|
2021-11-21T18:56:07.000Z
|
from rlkit.samplers.util import rollout
class InPlacePathSampler(object):
"""
A sampler that does not serialization for sampling. Instead, it just uses
the current policy and environment as-is.
WARNING: This will affect the environment! So
```
sampler = InPlacePathSampler(env, ...)
sampler.obtain_samples # this has side-effects: env will change!
```
"""
def __init__(self, env, policy, max_samples, max_path_length):
self.env = env
self.policy = policy
self.max_path_length = max_path_length
self.max_samples = max_samples
assert max_samples >= max_path_length, "Need max_samples >= max_path_length"
def start_worker(self):
pass
def shutdown_worker(self):
pass
def start_new_rollout(self):
pass
def handle_rollout_ending(self):
pass
def obtain_samples(self):
paths = []
n_steps_total = 0
while n_steps_total + self.max_path_length <= self.max_samples:
self.start_new_rollout()
path = rollout(
self.env, self.policy, max_path_length=self.max_path_length
)
self.handle_rollout_ending()
paths.append(path)
n_steps_total += len(path['observations'])
return paths
| 29.355556
| 84
| 0.635125
|
27c32d981cb770f2971aaf47e18041da36da5b03
| 7,417
|
py
|
Python
|
dojo/tools/fortify/parser.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T17:25:57.000Z
|
2021-01-19T17:25:57.000Z
|
dojo/tools/fortify/parser.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | null | null | null |
dojo/tools/fortify/parser.py
|
tvtongerloo/django-DefectDojo
|
77f241afd7773a95a73621fcc60971263885337a
|
[
"BSD-3-Clause"
] | null | null | null |
__guide__ = 'aaronweaver'
__author__ = 'Rajarshi333'
from defusedxml import ElementTree
from dateutil import parser
import re
from dojo.models import Finding
import logging
logger = logging.getLogger(__name__)
class FortifyXMLParser(object):
language_list = []
def __init__(self, filename, test):
fortify_scan = ElementTree.parse(filename)
root = fortify_scan.getroot()
# Get Date
date_string = root.getchildren()[5].getchildren()[1].getchildren()[2].text
date_list = date_string.split()[1:4]
date_act = "".join(date_list)
find_date = parser.parse(date_act)
# Get Language
lang_string = root[8][4][2].text
lang_need_string = re.findall("^.*com.fortify.sca.Phase0HigherOrder.Languages.*$",
lang_string, re.MULTILINE)
lang_my_string = lang_need_string[0]
language = lang_my_string.split('=')[1]
if language not in self.language_list:
self.language_list.append(language)
# Get Category Information:
# Abstract, Explanation, Recommendation, Tips
cat_meta = {}
# Get all issues
issues = []
meta_pair = ({}, {})
issue_pair = ([], [])
for ReportSection in root.findall('ReportSection'):
if ReportSection.findtext('Title') in ["Results Outline", "Issue Count by Category"]:
place = 0 if ReportSection.findtext('Title') == "Results Outline" else 1
# Get information on the vulnerability like the Abstract, Explanation,
# Recommendation, and Tips
for group in ReportSection.iter("GroupingSection"):
title = group.findtext("groupTitle")
maj_attr_summary = group.find("MajorAttributeSummary")
if maj_attr_summary:
meta_info = maj_attr_summary.findall("MetaInfo")
meta_pair[place][title] = {x.findtext("Name"): x.findtext("Value")
for x in meta_info}
# Collect all issues
for issue in ReportSection.iter("Issue"):
issue_pair[place].append(issue)
if len(issue_pair[0]) > len(issue_pair[1]):
issues = issue_pair[0]
cat_meta = meta_pair[0]
else:
issues = issue_pair[1]
cat_meta = meta_pair[1]
# All issues obtained, create a map for reference
issue_map = {}
issue_id = "N/A"
try:
for issue in issues:
issue_id = issue.attrib['iid']
details = {
"Category": issue.find("Category").text,
"Folder": issue.find("Folder").text, "Kingdom": issue.find("Kingdom").text,
"Abstract": issue.find("Abstract").text,
"Friority": issue.find("Friority").text,
"FileName": issue.find("Primary").find("FileName").text,
"FilePath": issue.find("Primary").find("FilePath").text,
"LineStart": issue.find("Primary").find("LineStart").text}
if issue.find("Primary").find("Snippet"):
details["Snippet"] = issue.find("Primary").find("Snippet").text
else:
details["Snippet"] = "n/a"
if issue.find("Source"):
source = {
"FileName": issue.find("Source").find("FileName").text,
"FilePath": issue.find("Source").find("FilePath").text,
"LineStart": issue.find("Source").find("LineStart").text,
"Snippet": issue.find("Source").find("Snippet").text}
details["Source"] = source
issue_map.update({issue.attrib['iid']: details})
except AttributeError:
logger.warning("XML Parsing error on issue number: %s", issue_id)
raise
# map created
self.items = []
dupes = set()
for issue_key, issue in issue_map.items():
title = self.format_title(issue["Category"], issue["FileName"], issue["LineStart"])
if title not in dupes:
self.items.append(Finding(
title=title,
severity=issue["Friority"],
numerical_severity=Finding.get_numerical_severity(issue["Friority"]),
file_path=issue['FilePath'],
line_number=int(issue['LineStart']),
line=int(issue['LineStart']),
static_finding=True,
active=False,
verified=False,
test=test,
date=find_date,
description=self.format_description(issue, cat_meta),
mitigation=self.format_mitigation(issue, cat_meta),
unique_id_from_tool=issue_key
))
dupes.add(title)
def format_title(self, category, filename, line_no):
"""
Builds the title much like it is represented in Fortify
:param category: Basically the title of the issue in the code
:param filename: File where it is found
:param line_no: Line number of offending line
:return: str
"""
return "{} - {}: {}".format(category, filename, line_no)
def format_mitigation(self, issue, meta_info) -> str:
"""
Built from the meta_info of a category. All items of the same category will
have the same information in it
:param issue: Issue dictionary
:param meta_info: Meta_info dictionary
:return: str
"""
mitigation = ""
recommendation = meta_info[issue["Category"]].get("Recommendations")
if recommendation:
mitigation += "###Recommendation:\n {}\n".format(recommendation)
tips = meta_info[issue["Category"]].get("Tips")
if tips:
mitigation += "###Tips:\n {}".format(tips)
return mitigation
def format_description(self, issue, meta_info) -> str:
"""
Returns a formatted Description. This will contain information about the category,
snippet from the code, including the file and line number. If there is source information
it will also include that. Adds explanation of finding from the meta info
:param issue: Issue Dictionary
:param meta_info: Meta Dictionary
:return: str
"""
desc = "##Catagory: {}\n".format(issue["Category"])
desc += "###Abstract:\n{}\n###Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format(
issue["Abstract"], issue["FileName"], issue["LineStart"], issue["Snippet"])
explanation = meta_info[issue["Category"]].get("Explanation")
source = issue.get("Source")
if source:
desc += "##Source:\nThis snippet provides more context on the execution path that " \
"leads to this finding. \n" \
"####Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format(
source["FileName"], source["LineStart"], source["Snippet"])
if explanation:
desc += "##Explanation:\n {}".format(explanation)
return desc
| 42.872832
| 97
| 0.549279
|
1e3f7e9e0281fcaf4adfb0f79d76300b40a74a1f
| 14,511
|
py
|
Python
|
simple/1.0.2/__sessions.py
|
Jesse3692/flask_note
|
419d7671677e8ea0d85ebd5da82267717e10813c
|
[
"MIT"
] | null | null | null |
simple/1.0.2/__sessions.py
|
Jesse3692/flask_note
|
419d7671677e8ea0d85ebd5da82267717e10813c
|
[
"MIT"
] | null | null | null |
simple/1.0.2/__sessions.py
|
Jesse3692/flask_note
|
419d7671677e8ea0d85ebd5da82267717e10813c
|
[
"MIT"
] | null | null | null |
"""sessions"""
import hashlib
import warnings
from collections import MutableMapping
from datetime import datetime
from werkzeug.datastructures import CallbackDict
from itsdangerous import BadSignature, URLSafeTimedSerializer
from __tag import TaggedJSONSerializer
from __helpers import is_ip, total_seconds
session_json_serializer = TaggedJSONSerializer()
class SessionMixin(MutableMapping): # HACK pylint: disable=abstract-method
"""Expands a basic dictionary with session attributes."""
@property
def permanent(self):
"""This reflects the ``'_permanent'`` key in the dict."""
return self.get('_permanent', False)
@permanent.setter
def permanent(self, value):
self['_permanent'] = bool(value)
#: Some implementations can detect whether a session is newly
#: created, but that is not guaranteed. Use with caution. The mixin
# default is hard-coded ``False``.
new = False
#: Some implementations can detect changes to the session and set
#: this when that happens. The mixin default is hard coded to
#: ``True``.
modified = True
#: Some implementations can detect when session data is read or
#: written and set this when that happens. The mixin default is hard
#: coded to ``True``.
accessed = True
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies.
This session backend will set the :attr:`modified` and
:attr:`accessed` attributes. It cannot reliably track whether a
session is new (vs. empty), so :attr:`new` remains hard coded to
``False``.
"""
#: When data is changed, this is set to ``True``. Only the session
#: dictionary itself is tracked; if the session contains mutable
#: data (for example a nested dict) then this must be set to
#: ``True`` manually when modifying that data. The session cookie
#: will only be written to the response if this is ``True``.
modified = False
#: When data is read or written, this is set to ``True``. Used by
# :class:`.SecureCookieSessionInterface` to add a ``Vary: Cookie``
#: header, which allows caching proxies to cache different pages for
#: different users.
accessed = False
def __init__(self, initial=None):
def on_update(self):
self.modified = True
self.accessed = True
super(SecureCookieSession, self).__init__(initial, on_update)
def __getitem__(self, key):
self.accessed = True
return super(SecureCookieSession, self).__getitem__(key)
def get(self, key, default=None):
self.accessed = True
return super(SecureCookieSession, self).get(key, default)
def setdefault(self, key, default=None):
self.accessed = True
return super(SecureCookieSession, self).setdefault(key, default)
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by Flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app): # pylint: disable=unused-argument
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Returns the domain that should be set for the session cookie.
Uses ``SESSION_COOKIE_DOMAIN`` if it is configured, otherwise
falls back to detecting the domain based on ``SERVER_NAME``.
Once detected (or if not set at all), ``SESSION_COOKIE_DOMAIN`` is
updated to avoid re-running the logic.
"""
rv = app.config['SESSION_COOKIE_DOMAIN'] # pylint: disable=invalid-name
# set explicitly, or cached from SERVER_NAME detection
# if False, return None
if rv is not None:
return rv if rv else None
rv = app.config['SERVER_NAME'] # pylint: disable=invalid-name
# server name not set, cache False to return none next time
if not rv:
app.config['SESSION_COOKIE_DOMAIN'] = False
return None
# chop off the port which is usually not supported by browsers
# remove any leading '.' since we'll add that later
rv = rv.rsplit(':', 1)[0].lstrip('.') # pylint: disable=invalid-name
if '.' not in rv:
# Chrome doesn't allow names without a '.'
# this should only come up with localhost
# hack around this by not setting the name, and show a warning
warnings.warn(
'"{rv}" is not a valid cookie domain, it must contain a ".".'
' Add an entry to your hosts file, for example'
' "{rv}.localdomain", and use that instead.'.format(rv=rv)
)
app.config['SESSION_COOKIE_DOMAIN'] = False
return None
ip = is_ip(rv) # pylint: disable=invalid-name
if ip:
warnings.warn(
'The session cookie domain is an IP address. This may not work'
' as intended in some browsers. Add an entry to your hosts'
' file, for example "localhost.localdomain", and use that'
' instead.'
)
# if this is not an ip and app is mounted at the root, allow subdomain
# matching by adding a '.' prefix
if self.get_cookie_path(app) == '/' and not ip:
rv = '.' + rv # pylint: disable=invalid-name
app.config['SESSION_COOKIE_DOMAIN'] = rv
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] \
or app.config['APPLICATION_ROOT']
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_cookie_samesite(self, app):
"""Return ``'Strict'`` or ``'Lax'`` if the cookie should use the
``SameSite`` attribute. This currently just returns the value of
the :data:`SESSION_COOKIE_SAMESITE` setting.
"""
return app.config['SESSION_COOKIE_SAMESITE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Used by session backends to determine if a ``Set-Cookie`` header
should be set for this session cookie for this response. If the session
has been modified, the cookie is set. If the session is permanent and
the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is
always set.
This check is usually skipped if the session was deleted.
.. versionadded:: 0.11
"""
return session.modified or (
session.permanent and app.config['SESSION_REFRESH_EACH_REQUEST']
)
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
"""#TODO"""
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app) # pylint: disable=invalid-name
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# If the session is modified to be empty, remove the cookie.
# If the session is empty, return without setting the cookie.
if not session:
if session.modified:
response.delete_cookie(
app.session_cookie_name,
domain=domain,
path=path
)
return
# Add a "Vary: Cookie" header if the session was accessed at all.
if session.accessed:
response.vary.add('Cookie')
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
samesite = self.get_cookie_samesite(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(
app.session_cookie_name,
val,
expires=expires,
httponly=httponly,
domain=domain,
path=path,
secure=secure,
samesite=samesite
)
| 38.490716
| 80
| 0.650059
|
878005b42eeceabc3ba00ba10a8074a6bc3c0aef
| 503
|
py
|
Python
|
src/scenarios/50consoletemplate/test.py
|
eerhardt/performance
|
683e4aa79d18edf4c0fa1090043da4b8a88e6bcc
|
[
"MIT"
] | null | null | null |
src/scenarios/50consoletemplate/test.py
|
eerhardt/performance
|
683e4aa79d18edf4c0fa1090043da4b8a88e6bcc
|
[
"MIT"
] | null | null | null |
src/scenarios/50consoletemplate/test.py
|
eerhardt/performance
|
683e4aa79d18edf4c0fa1090043da4b8a88e6bcc
|
[
"MIT"
] | null | null | null |
'''
C# Console app
'''
import sys
import os
from shared.runner import TestTraits, Runner
SCENARIONAME = '.NET Core 5.0 Console Template'
EXENAME = '50consoletemplate'
if __name__ == "__main__":
traits = TestTraits(scenarioname=SCENARIONAME,
exename=EXENAME,
startupmetric='TimeToMain',
startup=True,
guiapp='false', # string passed through to tool
)
Runner(traits).run()
| 26.473684
| 71
| 0.562624
|
78ef2995a70798f4b44fe38845b09d20d93eb744
| 12,903
|
py
|
Python
|
plugins/translate.py
|
joshgyn/Google_Translater_V2
|
dd667bd5297718cfe98a5797d9902ac8ac28be1e
|
[
"Apache-2.0"
] | null | null | null |
plugins/translate.py
|
joshgyn/Google_Translater_V2
|
dd667bd5297718cfe98a5797d9902ac8ac28be1e
|
[
"Apache-2.0"
] | null | null | null |
plugins/translate.py
|
joshgyn/Google_Translater_V2
|
dd667bd5297718cfe98a5797d9902ac8ac28be1e
|
[
"Apache-2.0"
] | null | null | null |
from googletrans import Translator
from pyrogram import Client, filters
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup
)
from helper.database import find , insert
from helper.list import list
@Client.on_message(filters.private & filters.command(['start']))
async def start(client, message):
insert(int(message.chat.id))
await message.reply_text(text =f"Hello **{message.from_user.first_name }** \n\n __I am simple Google Translater Bot \n I can translate any language to you selected language__",reply_to_message_id = message.message_id , parse_mode="markdown", reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton("Support 🇮🇳" ,url="https://t.me/lntechnical") ], [InlineKeyboardButton("Subscribe 🧐", url="https://youtube.com/c/LNtechnical"),InlineKeyboardButton("How To Use",url = "https://youtu.be/dUYvenXiYKE") ] ] ) )
@Client.on_message(filters.private & filters.text )
async def echo(client, message):
keybord1= InlineKeyboardMarkup( [
[
InlineKeyboardButton("Afrikaans", callback_data='af'),
InlineKeyboardButton("Albanian", callback_data='sq'),
InlineKeyboardButton("Amharic",callback_data ='am'),
],
[ InlineKeyboardButton("Arabic", callback_data='ar'),
InlineKeyboardButton("Armenian", callback_data='hy'),
InlineKeyboardButton("Azerbaijani",callback_data = 'az'),
],
[InlineKeyboardButton("Basque",callback_data ="eu"),
InlineKeyboardButton("Belarusian",callback_data ="be"),
InlineKeyboardButton("Bengali",callback_data="bn")],
[InlineKeyboardButton("Bosnian",callback_data = "bs"),
InlineKeyboardButton("Bulgarian",callback_data ="bg"),
InlineKeyboardButton("Catalan",callback_data = "ca")
],
[
InlineKeyboardButton("Corsican",callback_data ="co"),
InlineKeyboardButton("Croatian",callback_data = "hr"),
InlineKeyboardButton("Czech", callback_data = "cs"),
],
[ InlineKeyboardButton("Danish",callback_data = "da"),
InlineKeyboardButton("Dutch",callback_data = "nl"),
InlineKeyboardButton("Esperanto",callback_data = "eo"),
],
[InlineKeyboardButton(" Next --->",callback_data = "page2")
]
] )
try:
code =find(int(message.chat.id))
except Exception as e:
await message.reply_text(" Error : {e}\nclick /start ........")
return
if code :
try:
translator = Translator()
translation = translator.translate(message.text,dest = code)
except Exception as e:
await message.reply_text(f"Error : {e}")
return
try:
for i in list:
if list[i]==translation.src:
fromt = i
if list[i] == translation.dest:
to = i
await message.reply_text(f"Translated from **{fromt.capitalize()}** To **{to.capitalize()}**\n\n```{translation.text}```\n\n join @lntechnical")
except Exception as e:
await message.reply_text(f"Translated from **{translation.src}** To **{translation.dest}**\n\n```{translation.text}```\n\n join @lntechnical")
else:
await message.reply_text("Select language 👇",reply_to_message_id = message.message_id, reply_markup =keybord1)
@Client.on_callback_query()
async def translate_text(bot,update):
keybord1= InlineKeyboardMarkup( [
[
InlineKeyboardButton("Afrikaans", callback_data='af'),
InlineKeyboardButton("Albanian", callback_data='sq'),
InlineKeyboardButton("Amharic",callback_data ='am'),
],
[ InlineKeyboardButton("Arabic", callback_data='ar'),
InlineKeyboardButton("Armenian", callback_data='hy'),
InlineKeyboardButton("Azerbaijani",callback_data = 'az'),
],
[InlineKeyboardButton("Basque",callback_data ="eu"),
InlineKeyboardButton("Belarusian",callback_data ="be"),
InlineKeyboardButton("Bengali",callback_data="bn")],
[InlineKeyboardButton("Bosnian",callback_data = "bs"),
InlineKeyboardButton("Bulgarian",callback_data ="bg"),
InlineKeyboardButton("Catalan",callback_data = "ca")
],
[
InlineKeyboardButton("Corsican",callback_data ="co"),
InlineKeyboardButton("Croatian",callback_data = "hr"),
InlineKeyboardButton("Czech", callback_data = "cs"),
],
[ InlineKeyboardButton("Danish",callback_data = "da"),
InlineKeyboardButton("Dutch",callback_data = "nl"),
InlineKeyboardButton("Esperanto",callback_data = "eo"),
],
[InlineKeyboardButton(" Next --->",callback_data = "page2")
]
] )
keybord2= InlineKeyboardMarkup([
[InlineKeyboardButton("English",callback_data = "en"),
InlineKeyboardButton("Estonian",callback_data = "et"),
InlineKeyboardButton("Finnish",callback_data = "fi")
],
[InlineKeyboardButton("French",callback_data = "fr"),
InlineKeyboardButton("Frisian",callback_data = "fy"),
InlineKeyboardButton("Galician",callback_data = "gl")
],
[InlineKeyboardButton("Georgian",callback_data = "ka"),
InlineKeyboardButton("German",callback_data = "de"),
InlineKeyboardButton("Greek",callback_data = "el")
],
[InlineKeyboardButton("Gujarati",callback_data = "gu"),
InlineKeyboardButton("Haitian Creole",callback_data = "ht"),
InlineKeyboardButton("Hausa",callback_data ="ha")
],
[InlineKeyboardButton("Hindi",callback_data = "hi"),
InlineKeyboardButton("Hungarian",callback_data = "hu"),
InlineKeyboardButton("Icelandic",callback_data = "is")
],
[InlineKeyboardButton("Igbo",callback_data = "ig"),
InlineKeyboardButton("Indonesian",callback_data = "id"),
InlineKeyboardButton("Irish",callback_data = "ga")
],
[InlineKeyboardButton("<--- Back",callback_data = "page1"),
InlineKeyboardButton(" Next --->",callback_data = "page3"),
]
])
keybord3 = InlineKeyboardMarkup([
[ InlineKeyboardButton("Italian",callback_data = "it"),
InlineKeyboardButton("Japanese",callback_data = "ja"),
InlineKeyboardButton("Javanese",callback_data = "jv")
],
[InlineKeyboardButton("Kannada",callback_data = "kn"),
InlineKeyboardButton("Kazakh",callback_data = "kk"),
InlineKeyboardButton("Khmer",callback_data = "km")
],
[InlineKeyboardButton("Kinyarwanda",callback_data = "rw"),
InlineKeyboardButton("Korean",callback_data ="ko"),
InlineKeyboardButton("Kurdish",callback_data = "ku")
],
[ InlineKeyboardButton("Kyrgyz",callback_data ="ky"),
InlineKeyboardButton("Lao",callback_data = "lo"),
InlineKeyboardButton("Latin",callback_data = "la")
],
[InlineKeyboardButton("Latvian",callback_data = "lv"),
InlineKeyboardButton('Lithuanian',callback_data ="lt"),
InlineKeyboardButton("Luxembourgish",callback_data = "lb")
],
[InlineKeyboardButton("Macedonian",callback_data = "mk"),
InlineKeyboardButton("Malagasy",callback_data ="mg"),
InlineKeyboardButton("Malay",callback_data ="ms")
],
[InlineKeyboardButton("<--- Back",callback_data = "page2"),
InlineKeyboardButton(" Next --->",callback_data = "page4")
]
])
keybord4 = InlineKeyboardMarkup([
[InlineKeyboardButton("Malayalam",callback_data = "ml"),
InlineKeyboardButton("Maltese",callback_data = "mt"),
InlineKeyboardButton("Maori",callback_data = "mi")
],
[InlineKeyboardButton("Marathi",callback_data = "mr"),
InlineKeyboardButton("Mongolian",callback_data = "mn"),
InlineKeyboardButton("Myanmar (Burmese)",callback_data = "my")
],
[InlineKeyboardButton("Nepali",callback_data ="ne"),
InlineKeyboardButton("Norwegian",callback_data = "no"),
InlineKeyboardButton("Nyanja (Chichewa)",callback_data = "ny")
],
[InlineKeyboardButton("Odia",callback_data = "or"),
InlineKeyboardButton("Pashto",callback_data = "ps"),
InlineKeyboardButton("Persian",callback_data = "fa"),
],
[InlineKeyboardButton("Polish",callback_data = "pl"),
InlineKeyboardButton("Portuguese",callback_data = "pt"),
InlineKeyboardButton("Punjabi",callback_data = "pa"),
],
[InlineKeyboardButton("Romanian",callback_data = "ro"),
InlineKeyboardButton("Russian",callback_data = "ru"),
InlineKeyboardButton("Samoan",callback_data= "sm"),
],
[InlineKeyboardButton("<--- Back",callback_data = "page3"),
InlineKeyboardButton("Next --->",callback_data = "page5")
]
])
keybord5 = InlineKeyboardMarkup([
[InlineKeyboardButton("Scots Gaelic",callback_data = "gd"),
InlineKeyboardButton("Serbian",callback_data = "sr"),
InlineKeyboardButton("Sesotho",callback_data = "st")
],
[InlineKeyboardButton("Shona",callback_data ="sn"),
InlineKeyboardButton("Sindhi",callback_data ="sd"),
InlineKeyboardButton("Sinhala (Sinhalese)",callback_data = "si")
],
[InlineKeyboardButton("Slovak",callback_data = "sk"),
InlineKeyboardButton("Slovenian",callback_data = "sl"),
InlineKeyboardButton("Somali",callback_data = "so")
],
[InlineKeyboardButton("Spanish",callback_data = "es"),
InlineKeyboardButton("Sundanese",callback_data ="su"),
InlineKeyboardButton("Swahili",callback_data ="sw")
],
[InlineKeyboardButton("Swedish",callback_data = "sv"),
InlineKeyboardButton("Tagalog (Filipino)",callback_data ='tl'),
InlineKeyboardButton("Tajik",callback_data = "tg")
],
[InlineKeyboardButton("Tamil",callback_data = "ta"),
InlineKeyboardButton("Tatar",callback_data = "tt"),
InlineKeyboardButton("Telugu",callback_data = "te")
],
[InlineKeyboardButton("<--- Back",callback_data = "page4"),
InlineKeyboardButton("Next --->",callback_data = "page6")
] ])
keybord6 = InlineKeyboardMarkup([
[InlineKeyboardButton("Thai",callback_data = "th"),
InlineKeyboardButton("Turkish",callback_data = "tr"),
InlineKeyboardButton("!Not Valid",callback_data ="en")
],
[InlineKeyboardButton("Ukrainian",callback_data = "uk"),
InlineKeyboardButton("Urdu",callback_data = "ur"),
InlineKeyboardButton("Uyghur",callback_data ="ug")
],
[InlineKeyboardButton("Uzbek",callback_data = "uz"),
InlineKeyboardButton("Vietnamese",callback_data ="vi"),
InlineKeyboardButton("Welsh",callback_data = "cy")
],
[InlineKeyboardButton("Xhosa",callback_data = "xh"),
InlineKeyboardButton("Yiddish",callback_data = "yi"),
InlineKeyboardButton("Yoruba",callback_data = "yo")],
[InlineKeyboardButton("<--- Back",callback_data = "page5")
] ])
tr_text = update.message.reply_to_message.text
cb_data = update.data
if cb_data== "page2":
await update.message.edit("Select language 👇",reply_markup = keybord2)
elif cb_data == "page1":
await update.message.edit("Select language 👇",reply_markup =keybord1)
elif cb_data =="page3":
await update.message.edit("Select language 👇",reply_markup =keybord3)
elif cb_data == "page4":
await update.message.edit("Select language 👇",reply_markup =keybord4)
elif cb_data =="page5":
await update.message.edit("Select language 👇",reply_markup =keybord5)
elif cb_data =="page6":
await update.message.edit("Select language 👇",reply_markup =keybord6)
else :
try:
translator = Translator()
translation = translator.translate(tr_text,dest = cb_data)
except Exception as e:
await update.message.edit(f"Error : {e}")
return
try:
for i in list:
if list[i]==translation.src:
fromt = i
if list[i] == translation.dest:
to = i
await update.message.edit(f"Translated from **{fromt.capitalize()}** To **{to.capitalize()}**\n\n```{translation.text}```\n\n join @lntechnical")
except Exception as e:
await update.message.edit(f"Translated from **{translation.src}** To **{translation.dest}**\n\n```{translation.text}```\n\n join @lntechnical")
| 44.493103
| 596
| 0.622801
|
af3b2d1e5223f688c78235fd3ec326e4f16dd8c7
| 10,493
|
py
|
Python
|
venv/Lib/site-packages/pandas/core/indexes/timedeltas.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
venv/Lib/site-packages/pandas/core/indexes/timedeltas.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/Lib/site-packages/pandas/core/indexes/timedeltas.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
""" implement the TimedeltaIndex """
from pandas._libs import index as libindex, lib
from pandas._libs.tslibs import Timedelta, to_offset
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import doc
from pandas.core.dtypes.common import (
TD64NS_DTYPE,
is_float,
is_integer,
is_scalar,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
pandas_dtype,
)
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin,
DatetimeTimedeltaMixin,
)
from pandas.core.indexes.extension import inherit_names
@inherit_names(
["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
+ TimedeltaArray._field_ops,
TimedeltaArray,
wrap=True,
)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"_other_ops",
"components",
"to_pytimedelta",
"sum",
"std",
"median",
"_format_native_types",
],
TimedeltaArray,
)
class TimedeltaIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with.
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
Which is an integer/float number.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "timedeltaindex"
_engine_type = libindex.TimedeltaEngine
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
_is_numeric_dtype = True
_data: TimedeltaArray
# -------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
unit=None,
freq=lib.no_default,
closed=None,
dtype=TD64NS_DTYPE,
copy=False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray) and freq is lib.no_default:
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
if isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr, name=name)
@classmethod
def _simple_new(cls, values: TimedeltaArray, name: Label = None):
assert isinstance(values, TimedeltaArray)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# -------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
# -------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# Have to repeat the check for 'timedelta64' (not ns) dtype
# so that we can return a numeric index, since pandas will return
# a TimedeltaIndex when dtype='timedelta'
result = self._data.astype(dtype, copy=copy)
if self.hasnans:
return Index(result, name=self.name)
return Index(result.astype("i8"), name=self.name)
return DatetimeIndexOpsMixin.astype(self, dtype, copy=copy)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
return is_timedelta64_dtype(dtype)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int, slice, or ndarray[int]
"""
if not is_scalar(key):
raise InvalidIndexError(key)
try:
key = self._data._validate_scalar(key, cast_str=True)
except TypeError as err:
raise KeyError(key) from err
return Index.get_loc(self, key, method, tolerance)
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
"""
assert kind in ["loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution_string)
if side == "left":
return lbound
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
elif is_integer(label) or is_float(label):
self._invalid_indexer("slice", label)
return label
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "timedelta"
@property
def inferred_type(self) -> str:
return "timedelta64"
TimedeltaIndex._add_logical_methods_disabled()
def timedelta_range(
start=None, end=None, periods=None, freq=None, name=None, closed=None
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq, _ = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
return TimedeltaIndex._simple_new(tdarr, name=name)
| 31.322388
| 96
| 0.609168
|
58278e5dda782620b1e63fe03ae40dd4e43bf210
| 226
|
py
|
Python
|
api_for_selenium/employee_app/urls.py
|
sayantansingha01/interview1
|
8ecbbcf7d293830fe4e46a98aedef9bc6d1d1272
|
[
"MIT"
] | null | null | null |
api_for_selenium/employee_app/urls.py
|
sayantansingha01/interview1
|
8ecbbcf7d293830fe4e46a98aedef9bc6d1d1272
|
[
"MIT"
] | null | null | null |
api_for_selenium/employee_app/urls.py
|
sayantansingha01/interview1
|
8ecbbcf7d293830fe4e46a98aedef9bc6d1d1272
|
[
"MIT"
] | null | null | null |
from django.urls import path
from rest_framework.routers import SimpleRouter
from . import views
router = SimpleRouter()
router.register(r'employee_api', views.EmployeeApi, basename="employee_api")
urlpatterns = router.urls
| 25.111111
| 76
| 0.814159
|
727ef39aa35e79158f6876368df407c115f9e8ce
| 58,237
|
py
|
Python
|
pandas/tests/test_strings.py
|
garaud/pandas
|
cb8c130f75e1e2e2a286144961dc71b647ce973a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2015-09-03T17:39:02.000Z
|
2017-12-31T15:39:02.000Z
|
pandas/tests/test_strings.py
|
garaud/pandas
|
cb8c130f75e1e2e2a286144961dc71b647ce973a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 5
|
2021-03-19T08:36:48.000Z
|
2022-01-13T01:52:34.000Z
|
pandas/tests/test_strings.py
|
garaud/pandas
|
cb8c130f75e1e2e2a286144961dc71b647ce973a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2015-09-03T17:39:05.000Z
|
2015-09-03T17:39:05.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta, date
import os
import operator
import re
import warnings
import nose
from numpy import nan as NA
import numpy as np
from numpy.testing import assert_array_equal
from numpy.random import randint
from pandas.compat import range, lrange, u, unichr
import pandas.compat as compat
from pandas import (Index, Series, TimeSeries, DataFrame, isnull, notnull,
bdate_range, date_range, MultiIndex)
import pandas.core.common as com
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(tm.TestCase):
_multiprocess_can_split_ = True
def test_api(self):
# GH 6106, GH 9322
self.assertIs(Series.str, strings.StringMethods)
self.assertIsInstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .str accessor"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
tm.assert_isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
assert_array_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
self.assertTrue(isinstance(el, compat.string_types) or isnull(el))
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
self.assertEqual(s.dropna().values.item(), 'l')
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
self.assertEqual(i, 100)
self.assertEqual(s, 1)
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
self.assertFalse(i)
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20))
for _ in range(4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
self.assertEqual(i, 100)
self.assertEqual(s, 'h')
def test_cat(self):
one = ['a', 'a', 'b', 'b', 'c', NA]
two = ['a', NA, 'b', 'd', 'foo', NA]
# single array
result = strings.str_cat(one)
self.assertTrue(isnull(result))
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
self.assertEqual(result, exp)
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
self.assertEqual(result, exp)
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
self.assertEqual(result, exp)
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = ['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA']
self.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = ['aa', NA, 'bb', 'bd', 'cfoo', NA]
tm.assert_almost_equal(result, exp)
def test_count(self):
values = ['foo', 'foofoo', NA, 'foooofooofommmfoo']
result = strings.str_count(values, 'f[o]+')
exp = [1, 2, NA, 4]
tm.assert_almost_equal(result, exp)
result = Series(values).str.count('f[o]+')
tm.assert_isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = [1, NA, 0, NA, NA, 0, NA, NA, NA]
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.count('a')
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = [1, 2, NA, 4]
tm.assert_almost_equal(result, exp)
result = Series(values).str.count('f[o]+')
tm.assert_isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_contains(self):
values = ['foo', NA, 'fooommm__foo', 'mmm_', 'foommm[_]+bar']
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = [False, NA, True, True, False]
tm.assert_almost_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = [False, NA, False, False, True]
tm.assert_almost_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = [False, False, True, True]
self.assertEqual(result.dtype, np.bool_)
tm.assert_almost_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = [True, False, True, True]
tm.assert_almost_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = [True, False, True, False]
tm.assert_almost_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = [False, NA, False, NA, NA, True, NA, NA, NA]
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.contains('o')
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = [u('foo'), NA, u('fooommm__foo'), u('mmm_')]
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = [False, np.nan, True, True]
tm.assert_almost_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = [False, False, True, True]
tm.assert_almost_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = [False, False, True, True]
self.assertEqual(result.dtype, np.bool_)
tm.assert_almost_equal(result, expected)
# na
values = Series(['om', 'foo',np.nan])
res = values.str.contains('foo', na="foo")
self.assertEqual (res.ix[2], "foo")
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_startswith(mixed, 'f')
xp = [False, NA, False, NA, NA, True, NA, NA, NA]
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = [False, NA, False, NA, NA, False, NA, NA, NA]
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(),
"blah", None, 1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None,
1, 2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = ['a', NA, 'b', NA, NA, 'foo', NA, NA, NA]
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = ['a', NA, 'b', NA, NA, 'foo', NA, NA, NA]
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
#flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace("(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo',
None, 1, 2.])
rs = Series(mixed).str.repeat(3)
xp = ['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA]
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA,
u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA,
u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA,
u('dddddd')])
tm.assert_series_equal(result, exp)
def test_deprecated_match(self):
# Old match behavior, deprecated (but still default) in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning():
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([('BAD__', 'BAD'), NA, []])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
with tm.assert_produces_warning():
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = [('BAD_', 'BAD'), NA, ('BAD_', 'BAD'), NA, NA, [], NA, NA, NA]
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
with tm.assert_produces_warning():
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([(u('BAD__'), u('BAD')), NA, []])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning():
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# If no groups, use new behavior even when as_indexer is False.
# (Old behavior is pretty much useless in this case.)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
with tm.assert_produces_warning():
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
xp = [True, NA, True, NA, NA, False, NA, NA, NA]
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
with tm.assert_produces_warning():
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)')
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)')
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)')
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# no groups
s = Series(['A1', 'B2', 'C3'])
f = lambda: s.str.extract('[ABC][123]')
self.assertRaises(ValueError, f)
# only non-capturing groups
f = lambda: s.str.extract('(?:[AB]).*')
self.assertRaises(ValueError, f)
# one group, no matches
result = s.str.extract('(_)')
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)')
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]')
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])')
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# named group/groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])')
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]], columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
result = s.str.extract('(?P<letter>[AB])')
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])')
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]], columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])')
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract('([AB])([123])(?:[123])')
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract('(?P<letter>[AB])?(?P<number>[123])')
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']], columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract('(?P<letter>[ABC])(?P<number>[123])?')
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]], columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# single group renames series properly
s = Series(['A1', 'A2'])
result = s.str.extract(r'(?P<uno>A)\d')
tm.assert_equal(result.name, 'uno')
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract('(\d)')
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract('(?P<letter>\D)(?P<number>\d)?')
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]], columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
check_index(index())
def test_extract_single_series_name_is_preserved(self):
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])')
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
self.assertEqual(r.name, e.name)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=str)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_list = Series(dtype=list)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
tm.assert_equal('', empty.str.cat())
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a','b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_series_equal(empty_str, empty.str.extract('()'))
tm.assert_frame_equal(DataFrame(columns=[0,1], dtype=str), empty.str.extract('()()'))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_list.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_list, empty_list.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_list, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False, False]
alpha_e = [True, True, True, False, False, False, True, False, False, False]
digit_e = [False, False, False, True, False, False, False, True, False, False]
num_e = [False, False, False, True, False, False, False, True, False, False]
space_e = [False, False, False, False, False, False, False, False, False, True]
lower_e = [False, True, False, False, False, False, False, False, False, False]
upper_e = [True, False, False, False, True, False, True, False, False, False]
title_e = [True, False, True, False, True, False, False, False, False, False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
self.assertEqual(str_s.str.isalnum().tolist(), [v.isalnum() for v in values])
self.assertEqual(str_s.str.isalpha().tolist(), [v.isalpha() for v in values])
self.assertEqual(str_s.str.isdigit().tolist(), [v.isdigit() for v in values])
self.assertEqual(str_s.str.isspace().tolist(), [v.isspace() for v in values])
self.assertEqual(str_s.str.islower().tolist(), [v.islower() for v in values])
self.assertEqual(str_s.str.isupper().tolist(), [v.isupper() for v in values])
self.assertEqual(str_s.str.istitle().tolist(), [v.istitle() for v in values])
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', unichr(0x00bc), unichr(0x2605),
unichr(0x1378), unichr(0xFF13), 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u('A'), u('3'), unichr(0x00bc), unichr(0x2605),
unichr(0x1378), unichr(0xFF13), u('four')]
self.assertEqual(s.str.isnumeric().tolist(), [v.isnumeric() for v in unicodes])
self.assertEqual(s.str.isdecimal().tolist(), [v.isdecimal() for v in unicodes])
values = ['A', np.nan, unichr(0x00bc), unichr(0x2605),
np.nan, unichr(0xFF13), 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan,
u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if com.notnull(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan,
u('fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if com.notnull(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'),
u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
with tm.assertRaisesRegexp(TypeError, "expected a string object, not int"):
result = values.str.find(0)
with tm.assertRaisesRegexp(TypeError, "expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(),
'ee', None, 1, 2.])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(),
'ee', None, 1, 2.])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(),
'ee', None, 1, 2.])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA,
u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"):
result = values.str.pad(5, fillchar=5)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(),
'c', 'eee', None, 1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA,
NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA,
NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA,
NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA,
u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA,
u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assertRaisesRegexp(TypeError, "fillchar must be a character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values])
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.split('_')
xp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')],
[u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['Wes McKinney', 'Travis Oliphant'])
result = s.str.split()
self.assertEqual(result[1], ['Travis', 'Oliphant'])
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', return_type='frame')
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', return_type='frame')
exp = DataFrame({0: ['some', 'with'], 1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', return_type='frame')
exp = DataFrame({0: ['some', 'one'], 1: ['unequal', 'of'],
2: ['splits', 'these'], 3: [NA, 'things'],
4: [NA, 'is'], 5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', return_type='frame')
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assertRaisesRegexp(ValueError, "return_type must be"):
s.str.split('_', return_type="some_invalid_type")
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1),
(3, 10, 2), (3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isnull(s) else NA for s in
values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA,
NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA,
u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA,
u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'),
u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with trailing
# whitespace equal to width
values = Series([u('hello world'), u('hello world!'),
u('hello world!!'), u('abcdefabcde'),
u('abcdefabcdef'), u('abcdefabcdefa'),
u('ab ab ab ab '), u('ab ab ab ab a'),
u('\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'),
u('hello\nworld!!'), u('abcdefabcde'),
u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'),
u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA,
NA, NA, NA])
tm.assert_isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan,
u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
import re
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan,
True, False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_more_replace(self):
# PR #1179
import re
s = Series(['A', 'B', 'C', 'Aaba', 'Baca',
'', NA, 'CABA', 'dog', 'cat'])
result = s.str.replace('A', 'YYY')
expected = Series(['YYY', 'B', 'C', 'YYYaba', 'Baca', '', NA,
'CYYYBYYY', 'dog', 'cat'])
assert_series_equal(result, expected)
result = s.str.replace('A', 'YYY', case=False)
expected = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', '', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
assert_series_equal(result, expected)
result = s.str.replace('^.a|dog', 'XX-XX ', case=False)
expected = Series(['A', 'B', 'C', 'XX-XX ba', 'XX-XX ca', '', NA,
'XX-XX BA', 'XX-XX ', 'XX-XX t'])
assert_series_equal(result, expected)
def test_string_slice_get_syntax(self):
s = Series(['YYY', 'B', 'C', 'YYYYYYbYYY', 'BYYYcYYY', NA,
'CYYYBYYY', 'dog', 'cYYYt'])
result = s.str[0]
expected = s.str.get(0)
assert_series_equal(result, expected)
result = s.str[:3]
expected = s.str.slice(stop=3)
assert_series_equal(result, expected)
result = s.str[2::-1]
expected = s.str.slice(start=2, step=-1)
assert_series_equal(result, expected)
def test_string_slice_out_of_bounds(self):
s = Series([(1, 2), (1,), (3,4,5)])
result = s.str[1]
expected = Series([2, np.nan, 4])
assert_series_equal(result, expected)
s = Series(['foo', 'b', 'ba'])
result = s.str[1]
expected = Series(['o', np.nan, 'a'])
assert_series_equal(result, expected)
def test_match_findall_flags(self):
data = {'Dave': 'dave@google.com', 'Steve': 'steve@gmail.com',
'Rob': 'rob@gmail.com', 'Wes': np.nan}
data = Series(data)
pat = pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = data.str.match(pat, flags=re.IGNORECASE)
assert issubclass(w[-1].category, UserWarning)
self.assertEqual(result[0], ('dave', 'google', 'com'))
result = data.str.findall(pat, flags=re.IGNORECASE)
self.assertEqual(result[0][0], ('dave', 'google', 'com'))
result = data.str.count(pat, flags=re.IGNORECASE)
self.assertEqual(result[0], 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = data.str.contains(pat, flags=re.IGNORECASE)
assert issubclass(w[-1].category, UserWarning)
self.assertEqual(result[0], True)
def test_encode_decode(self):
base = Series([u('a'), u('b'), u('a\xe4')])
series = base.str.encode('utf-8')
f = lambda x: x.decode('utf-8')
result = series.str.decode('utf-8')
exp = series.map(f)
tm.assert_series_equal(result, exp)
def test_encode_decode_errors(self):
encodeBase = Series([u('a'), u('b'), u('a\x9d')])
self.assertRaises(UnicodeEncodeError,
encodeBase.str.encode, 'cp1252')
f = lambda x: x.encode('cp1252', 'ignore')
result = encodeBase.str.encode('cp1252', 'ignore')
exp = encodeBase.map(f)
tm.assert_series_equal(result, exp)
decodeBase = Series([b'a', b'b', b'a\x9d'])
self.assertRaises(UnicodeDecodeError,
decodeBase.str.decode, 'cp1252')
f = lambda x: x.decode('cp1252', 'ignore')
result = decodeBase.str.decode('cp1252', 'ignore')
exp = decodeBase.map(f)
tm.assert_series_equal(result, exp)
def test_cat_on_filtered_index(self):
df = DataFrame(index=MultiIndex.from_product([[2011, 2012], [1,2,3]],
names=['year', 'month']))
df = df.reset_index()
df = df[df.month > 1]
str_year = df.year.astype('str')
str_month = df.month.astype('str')
str_both = str_year.str.cat(str_month, sep=' ')
self.assertEqual(str_both.loc[1], '2011 2')
str_multiple = str_year.str.cat([str_month, str_month], sep=' ')
self.assertEqual(str_multiple.loc[1], '2011 2 2')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 38.213255
| 107
| 0.551024
|
e3f9d95c9d98fd1213f15ff961c81af56ddc0ad9
| 2,262
|
py
|
Python
|
glr/config.py
|
imlegend19/Google-Landmark-Recognition
|
fe2f58794e4d6163c4fff6cd2913337ca44876c3
|
[
"MIT"
] | 2
|
2020-12-10T13:17:55.000Z
|
2020-12-24T06:34:07.000Z
|
glr/config.py
|
imlegend19/Google-Landmark-Recognition
|
fe2f58794e4d6163c4fff6cd2913337ca44876c3
|
[
"MIT"
] | null | null | null |
glr/config.py
|
imlegend19/Google-Landmark-Recognition
|
fe2f58794e4d6163c4fff6cd2913337ca44876c3
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
ROOT = os.path.dirname(os.path.abspath(__file__))
DATASET_DIR = os.path.join(ROOT, "paris")
TRAIN_LF = os.path.join(ROOT, "train_lf")
TEST_LF = os.path.join(ROOT, "test_lf")
TRAIN_GF = os.path.join(ROOT, "data/train_gf.pkl")
TEST_GF = os.path.join(ROOT, "data/test_gf.pkl")
CORRUPTED = [
'louvre/paris_louvre_000136.jpg',
'louvre/paris_louvre_000146.jpg',
'moulinrouge/paris_moulinrouge_000422.jpg',
'museedorsay/paris_museedorsay_001059.jpg',
'notredame/paris_notredame_000188.jpg',
'pantheon/paris_pantheon_000284.jpg',
'pantheon/paris_pantheon_000960.jpg',
'pantheon/paris_pantheon_000974.jpg',
'pompidou/paris_pompidou_000195.jpg',
'pompidou/paris_pompidou_000196.jpg',
'pompidou/paris_pompidou_000201.jpg',
'pompidou/paris_pompidou_000467.jpg',
'pompidou/paris_pompidou_000640.jpg',
'sacrecoeur/paris_sacrecoeur_000299.jpg',
'sacrecoeur/paris_sacrecoeur_000330.jpg',
'sacrecoeur/paris_sacrecoeur_000353.jpg',
'triomphe/paris_triomphe_000662.jpg',
'triomphe/paris_triomphe_000833.jpg',
'triomphe/paris_triomphe_000863.jpg',
'triomphe/paris_triomphe_000867.jpg'
]
for i in range(len(CORRUPTED)):
CORRUPTED[i] = os.path.join(DATASET_DIR, CORRUPTED[i])
# DEBUGGING PARAMS:
MAX_NUM_EMBEDDINGS = -1
# Retrieval & re-ranking parameters:
NUM_TO_RERANK = 6
TOP_K = 3
# RANSAC parameters:
MAX_INLIER_SCORE = 26
MAX_REPROJECTION_ERROR = 6.0
MAX_RANSAC_ITERATIONS = 100000
HOMOGRAPHY_CONFIDENCE = 0.95
# DELG model:
SAVED_MODEL_DIR = os.path.join(ROOT, 'delg-saved-models/local_and_global')
DELG_MODEL = tf.saved_model.load(SAVED_MODEL_DIR)
DELG_IMAGE_SCALES_TENSOR = tf.convert_to_tensor([0.70710677, 1.0, 1.4142135])
DELG_SCORE_THRESHOLD_TENSOR = tf.constant(175.)
DELG_INPUT_TENSOR_NAMES = [
'input_image:0', 'input_scales:0', 'input_abs_thres:0'
]
# Global feature extraction:
NUM_EMBEDDING_DIMENSIONS = 2048
GLOBAL_FEATURE_EXTRACTION_FN = DELG_MODEL.prune(
DELG_INPUT_TENSOR_NAMES,
['global_descriptors:0']
)
# Local feature extraction:
LOCAL_FEATURE_NUM_TENSOR = tf.constant(1000)
LOCAL_FEATURE_EXTRACTION_FN = DELG_MODEL.prune(
DELG_INPUT_TENSOR_NAMES + ['input_max_feature_num:0'],
['boxes:0', 'features:0']
)
| 29.763158
| 77
| 0.761715
|
6deda222f66ba6ac8cde07213a7a94d78581eca8
| 20,157
|
py
|
Python
|
gpt2_trainings_scripts/train.py
|
AbrahamSanders/gutenberg-dialog
|
9b5bb2ebb208c2cc34ec6ed38477fc67445c5cc7
|
[
"MIT"
] | 50
|
2020-04-28T03:20:46.000Z
|
2022-03-07T09:17:09.000Z
|
gpt2_trainings_scripts/train.py
|
AbrahamSanders/gutenberg-dialog
|
9b5bb2ebb208c2cc34ec6ed38477fc67445c5cc7
|
[
"MIT"
] | 1
|
2021-01-31T21:50:57.000Z
|
2021-02-02T21:06:33.000Z
|
gpt2_trainings_scripts/train.py
|
ricsinaruto/gutenberg-dialog
|
30bbf1b055fed961b09af9c6ea045cc5ef98bf47
|
[
"MIT"
] | 4
|
2020-04-23T09:07:54.000Z
|
2021-07-22T16:58:35.000Z
|
# Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
import math
import logging
from pprint import pformat
from argparse import ArgumentParser
from collections import defaultdict
from itertools import chain
import sys
import torch
from torch.nn.parallel import DistributedDataParallel, DataParallel
from torch.utils.data import DataLoader, Dataset
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
from transformers import (AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME)
from utils import get_dataset, make_logdir
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
ATTR_TO_SPECIAL_TOKEN = {'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>',
'additional_special_tokens': ['<speaker1>', '<speaker2>']}
MODEL_INPUTS = ["input_ids", "mc_token_ids", "lm_labels", "mc_labels", "token_type_ids"]
PADDED_INPUTS = ["input_ids", "lm_labels", "token_type_ids"]
logger = logging.getLogger(__file__)
def average_distributed_scalar(scalar, args):
""" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. """
if args.local_rank == -1:
return scalar
scalar_t = torch.tensor(scalar, dtype=torch.float, device=args.device) / torch.distributed.get_world_size()
torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)
return scalar_t.item()
def add_special_tokens_(model, tokenizer):
""" Add special tokens to the tokenizer and the model if they have not already been added. """
orig_num_tokens = len(tokenizer.encoder)
num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there
if num_added_tokens > 0:
model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)
def build_input_from_segments(history, reply, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply. """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
sequence = [[bos]] + history + [reply + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
input_seq = list(chain(*sequence))
if len(input_seq) > 500:
if len(sequence) == 3:
sequence = [sequence[0], sequence[1][:-(len(input_seq) - 500)], sequence[2]]
elif len(sequence) == 5:
sequence = [sequence[0], sequence[3], sequence[4]]
else:
print(sequence)
instance = {}
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-100] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-100] * sum(len(s) for s in sequence[:-1])) + [-100] + sequence[-1][1:]
return instance
def interm_gc(args, tokenizer):
personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
num_candidates = 2
logger.info("Build inputs and labels")
datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
for dataset_name, dataset in personachat.items():
for i, dialog in enumerate(dataset):
# i don't want that much val data
if dataset_name == 'train' or i < 100000000:
for utterance in dialog["utterances"]:
for j, candidate in enumerate(utterance["candidates"][-num_candidates:]):
lm_labels = bool(j == num_candidates - 1)
instance = build_input_from_segments(utterance["history"][-3:].copy(), candidate, tokenizer, lm_labels)
for input_name, input_array in instance.items():
datasets[dataset_name][input_name].append(input_array)
datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
datasets[dataset_name]["n_candidates"] = num_candidates
if not i % 1000000:
print(i)
return datasets
class PaddedDataset(Dataset):
def __init__(self, args, name, tokenizer):
self.path = args.data_nuggets
params = torch.load(os.path.join(self.path, 'params_' + name))
self.padding = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
self.length = params['length']
self.name = name
def __getitem__(self, index):
try:
nugget = torch.load(os.path.join(self.path, 'nuggets_' + self.name + '/' + str(index)))
except:
print(index)
nugget = torch.load(os.path.join(self.path, 'nuggets_' + self.name + '/1'))
t1 = nugget['input_ids']
t3 = nugget['lm_labels']
t5 = nugget['token_type_ids']
t2 = nugget['mc_token_ids']
t4 = nugget['mc_labels']
return (t1, t2, t3, t4, t5, self.padding)
def __len__(self):
if self.length == 10000:
return 100000
else:
return self.length
def save_data(args, data, tokenizer, split):
for i, t in enumerate(data['mc_token_ids']):
data['mc_token_ids'][i] = torch.tensor(t)
for i, t in enumerate(data['mc_labels']):
data['mc_labels'][i] = torch.tensor(t)
for name in PADDED_INPUTS:
for i, t in enumerate(data[name]):
data[name][i] = [torch.tensor(t[0]), torch.tensor(t[1])]
# save data to disk
print(len(data['mc_labels']))
for i in range(len(data['mc_labels'])):
nug_name = os.path.join(args.data_nuggets, 'nuggets_' + split + '/' + str(i))
if not os.path.isfile(nug_name):
nugget = dict([(input_name, data[input_name][i]) for input_name in MODEL_INPUTS])
torch.save(nugget, nug_name)
if not i % 10000:
print(i)
# save params to disk
torch.save({'length': len(data['input_ids'])}, os.path.join(args.data_nuggets, 'params_' + split))
def interm_gc2(args, tokenizer):
if args.run_data:
datasets = interm_gc(args, tokenizer)
logger.info("Convert to tensor")
for dataset_name in datasets:
for input_name in MODEL_INPUTS:
if input_name != "mc_labels":
new_list = []
for i, x in enumerate(datasets[dataset_name][input_name]):
if i % 2:
new_list.append([datasets[dataset_name][input_name][i - 1], x])
datasets[dataset_name][input_name] = new_list
save_data(args, datasets['valid'], tokenizer, 'valid')
save_data(args, datasets['train'], tokenizer, 'train')
sys.exit()
return PaddedDataset(args, 'train', tokenizer), PaddedDataset(args, 'valid', tokenizer)
def collate_fn(batch):
def build_batch(ex):
tensors = [0, 1, 2, 3, 4]
for i, t in enumerate(ex[:5]):
if i == 0 or i == 4:
# change this back
#t[0] = torch.tensor(t[0])
#t[1] = torch.tensor(t[1])
c1 = torch.cat((t[0], torch.tensor([ex[5]] * (max_l - len(t[0])))))
c2 = torch.cat((t[1], torch.tensor([ex[5]] * (max_l - len(t[1])))))
tensors[i] = torch.cat((c1.unsqueeze(0), c2.unsqueeze(0)))
elif i == 2:
#t[0] = torch.tensor(t[0])
#t[1] = torch.tensor(t[1])
c1 = torch.cat((t[0], torch.tensor([-100] * (max_l - len(t[0])))))
c2 = torch.cat((t[1], torch.tensor([-100] * (max_l - len(t[1])))))
tensors[i] = torch.cat((c1.unsqueeze(0), c2.unsqueeze(0)))
else:
tensors[i] = t
return tuple(tensors)
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
else:
max_l = max(max([len(x[0][0]), len(x[0][1])]) for x in batch) + 1
batch = list(map(build_batch, batch))
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
def get_data_loaders(args, tokenizer):
""" Prepare the dataset for training and evaluation """
train_dataset, valid_dataset = interm_gc2(args, tokenizer)
logger.info("Build train and validation dataloaders")
train_loader = DataLoader(train_dataset, sampler=None, collate_fn=collate_fn, batch_size=args.train_batch_size, shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset, sampler=None, collate_fn=collate_fn, batch_size=args.valid_batch_size, shuffle=False)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
return train_loader, valid_loader, train_sampler, valid_sampler
my_dataset = 'data/opensubtitles_es/'
def train():
parser = ArgumentParser()
parser.add_argument("--run_data", type=bool, default=False)
parser.add_argument("--eval_freq", type=int, default=200000)
parser.add_argument("--save_freq", type=int, default=2000)
parser.add_argument("--data_nuggets", type=str, default=my_dataset)
parser.add_argument("--dataset_path", type=str, default=my_dataset + 'json.txt', help="Path or url of the dataset. If empty download from S3.")
parser.add_argument("--dataset_cache", type=str, default=my_dataset + 'cache', help="Path or url of the dataset cache")
parser.add_argument("--model_checkpoint", type=str, default="gpt2", help="Path, url or short name of the model")
parser.add_argument("--model", type=str, default="gpt2")
parser.add_argument("--eval_before_start", type=bool, default=False, help="If true start with a first evaluation before training")
parser.add_argument("--num_candidates", type=int, default=2, help="Number of candidates for training")
parser.add_argument("--max_history", type=int, default=1, help="Number of previous exchanges to keep in history")
parser.add_argument("--train_batch_size", type=int, default=2, help="Batch size for training")
parser.add_argument("--valid_batch_size", type=int, default=2, help="Batch size for validation")
parser.add_argument("--gradient_accumulation_steps", type=int, default=8, help="Accumulate gradients on several steps")
parser.add_argument("--lr", type=float, default=6.25e-5, help="Learning rate")
parser.add_argument("--lm_coef", type=float, default=2.0, help="LM loss coefficient")
parser.add_argument("--mc_coef", type=float, default=1.0, help="Multiple-choice loss coefficient")
parser.add_argument("--max_norm", type=float, default=1.0, help="Clipping gradient norm")
parser.add_argument("--n_epochs", type=int, default=50, help="Number of training epochs")
parser.add_argument("--personality_permutations", type=int, default=1, help="Number of permutations of personality sentences")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--fp16", type=str, default="", help="Set to O0, O1, O2 or O3 for fp16 training (see apex documentation)")
parser.add_argument("--local_rank", type=int, default=-1, help="Local rank for distributed training (-1: not distributed)")
args = parser.parse_args()
# logging is set to INFO (resp. WARN) for main (resp. auxiliary) process. logger.info => log main process only, logger.warning => log all processes
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Running process %d", args.local_rank) # This is a logger.warning: it will be printed by all distributed processes
logger.info("Arguments: %s", pformat(args))
# Initialize distributed training if needed
args.distributed = (args.local_rank != -1)
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
logger.info("Prepare tokenizer, pretrained model and optimizer.")
tokenizer_class = GPT2Tokenizer if "gpt2" in args.model else OpenAIGPTTokenizer # cant use Autotokenizer because checkpoint could be a Path
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
model_class = GPT2DoubleHeadsModel if "gpt2" in args.model else OpenAIGPTDoubleHeadsModel
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
# Add special tokens if they are not already added
add_special_tokens_(model, tokenizer)
optimizer = AdamW(model.parameters(), lr=args.lr, correct_bias=True)
# Prepare model for FP16 and distributed training if needed (order is important, distributed should be the last)
if args.fp16:
from apex import amp # Apex is only required if we use fp16 training
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16)
#if args.distributed:
#model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
#model = DataParallel(model)
logger.info("Prepare datasets")
train_loader, val_loader, train_sampler, valid_sampler = get_data_loaders(args, tokenizer)
# Training function and trainer
def update(engine, batch):
model.train()
#print(batch)
batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch
(lm_loss), (mc_loss), *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
mc_labels=mc_labels, lm_labels=lm_labels
)
loss = (lm_loss * args.lm_coef + mc_loss * args.mc_coef) / args.gradient_accumulation_steps
# DATAPARALLEL
#loss = loss.sum()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm)
if engine.state.iteration % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return loss.item()
trainer = Engine(update)
# Evaluation function and evaluator (evaluator output is the input of the metrics)
def inference(engine, batch):
model.eval()
with torch.no_grad():
batch = tuple(input_tensor.to(args.device) for input_tensor in batch)
input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch
logger.info(tokenizer.decode(input_ids[0, -1, :].tolist()))
# if we dont send labels to model, it doesnt return losses
lm_logits, mc_logits, *_ = model(
input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,
)
lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))
lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)
return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)
evaluator = Engine(inference)
# Attach evaluation to trainer: we evaluate when we start the training and at the end of each epoch
trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(val_loader))
if args.n_epochs < 1:
trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(val_loader))
if args.eval_before_start:
trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(val_loader))
# Make sure distributed data samplers split the dataset nicely between the distributed processes
if args.distributed:
trainer.add_event_handler(Events.EPOCH_STARTED, lambda engine: train_sampler.set_epoch(engine.state.epoch))
evaluator.add_event_handler(Events.EPOCH_STARTED, lambda engine: valid_sampler.set_epoch(engine.state.epoch))
# Linearly decrease the learning rate from lr to zero
scheduler = PiecewiseLinear(optimizer, "lr", [(0, args.lr), (args.n_epochs * len(train_loader), 0.0)])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# Prepare metrics - note how we compute distributed metrics
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
metrics = {"nll": Loss(torch.nn.CrossEntropyLoss(ignore_index=-100), output_transform=lambda x: (x[0][0], x[1][0])),
"accuracy": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}
metrics.update({"average_nll": MetricsLambda(average_distributed_scalar, metrics["nll"], args),
"average_accuracy": MetricsLambda(average_distributed_scalar, metrics["accuracy"], args)})
metrics["average_ppl"] = MetricsLambda(math.exp, metrics["average_nll"])
for name, metric in metrics.items():
metric.attach(evaluator, name)
# On the main process: add progress bar, tensorboard, checkpoints and save model, configuration and tokenizer before we start to train
if args.local_rank in [-1, 0]:
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names=["loss"])
evaluator.add_event_handler(Events.COMPLETED, lambda _: pbar.log_message("Validation: %s" % pformat(evaluator.state.metrics)))
log_dir = make_logdir(args.model_checkpoint)
tb_logger = TensorboardLogger(log_dir)
tb_logger.attach(trainer, log_handler=OutputHandler(tag="training", metric_names=["loss"]), event_name=Events.ITERATION_COMPLETED)
tb_logger.attach(trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED)
tb_logger.attach(evaluator, log_handler=OutputHandler(tag="validation", metric_names=list(metrics.keys()), another_engine=trainer), event_name=Events.EPOCH_COMPLETED)
checkpoint_handler = ModelCheckpoint(log_dir, 'checkpoint', save_interval=1, n_saved=100)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': getattr(model, 'module', model)}) # "getattr" takes care of distributed encapsulation
torch.save(args, log_dir + '/model_training_args.bin')
getattr(model, 'module', model).config.to_json_file(os.path.join(log_dir, CONFIG_NAME))
tokenizer.save_pretrained(log_dir)
# Run the training
trainer.run(train_loader, max_epochs=args.n_epochs)
# On the main process: close tensorboard logger and rename the last checkpoint (for easy re-loading with OpenAIGPTModel.from_pretrained method)
if args.local_rank in [-1, 0] and args.n_epochs > 0:
os.rename(os.path.join(log_dir, checkpoint_handler._saved[-1][1]), os.path.join(log_dir, WEIGHTS_NAME)) # TODO: PR in ignite to have better access to saved file paths (cleaner)
tb_logger.close()
if __name__ == "__main__":
train()
| 50.3925
| 185
| 0.675696
|
0db1ad8137fd5633a8594fed336cbd56b122fcbb
| 1,543
|
py
|
Python
|
custom_components/pr_custom_component/const.py
|
alandtse/auto_custom_component
|
572b3587621869f48e8cd75fadcee2a568a09afa
|
[
"Apache-2.0"
] | 5
|
2021-04-18T07:21:21.000Z
|
2021-09-11T03:41:54.000Z
|
custom_components/pr_custom_component/const.py
|
alandtse/auto_custom_component
|
572b3587621869f48e8cd75fadcee2a568a09afa
|
[
"Apache-2.0"
] | 12
|
2021-03-21T06:27:34.000Z
|
2021-04-29T02:55:59.000Z
|
custom_components/pr_custom_component/const.py
|
alandtse/pr_custom_component
|
572b3587621869f48e8cd75fadcee2a568a09afa
|
[
"Apache-2.0"
] | null | null | null |
"""
PRCustomComponent for Home Assistant.
SPDX-License-Identifier: Apache-2.0
Constants Platform
For more details about this integration, please refer to
https://github.com/alandtse/pr_custom_component
"""
# Base component constants
NAME = "pr_custom_component"
DOMAIN = "pr_custom_component"
HACS_DOMAIN = "hacs"
DOMAIN_DATA = f"{DOMAIN}_data"
VERSION = "0.2.0"
ISSUE_URL = "https://github.com/alandtse/pr_custom_component/issues"
# GitHub constants
PATCH_DOMAIN = "patch-diff.githubusercontent.com"
PATCH_PATH_PREFIX = "raw"
PATCH_PATH_SUFFIX = ".patch"
API_DOMAIN = "api.github.com"
API_PATH_PREFIX = "repos"
# HA Constants
COMPONENT_PATH = "homeassistant/components/"
CUSTOM_COMPONENT_PATH = "custom_components/"
TRANSLATIONS_PATH = "translations/"
STRING_FILE = "strings.json"
ENGLISH_JSON = "en.json"
# Icons
ICON = "mdi:update"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "power"
SENSOR_DEVICE_CLASS = "timestamp"
# Platforms
BINARY_SENSOR = "binary_sensor"
SENSOR = "sensor"
SWITCH = "switch"
PLATFORMS = [BINARY_SENSOR, SENSOR, SWITCH]
# Configuration and options
CONF_ENABLED = "enabled"
CONF_PR_URL = "pr_url"
# Defaults
DEFAULT_NAME = DOMAIN
STARTUP_MESSAGE = f"""
-------------------------------------------------------------------
{NAME}
Version: {VERSION}
This is a custom integration!
If you have any issues with this you need to open an issue here:
{ISSUE_URL}
-------------------------------------------------------------------
"""
EXCEPTION_TEMPLATE = "An exception of type {0} occurred. Arguments:\n{1!r}"
| 23.029851
| 75
| 0.697991
|
a744202d662bc5ed27e1561a19a80dbea7277c1b
| 4,852
|
py
|
Python
|
.kodi/addons/plugin.video.salts/scrapers/wso_scraper.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | null | null | null |
.kodi/addons/plugin.video.salts/scrapers/wso_scraper.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | null | null | null |
.kodi/addons/plugin.video.salts/scrapers/wso_scraper.py
|
C6SUMMER/allinclusive-kodi-pi
|
8baf247c79526849c640c6e56ca57a708a65bd11
|
[
"Apache-2.0"
] | 2
|
2018-04-17T17:34:39.000Z
|
2020-07-26T03:43:33.000Z
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import xbmcaddon
import time
from salts_lib.trans_utils import i18n
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
BASE_URL = 'http://watchseries-online.ch'
class WSO_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
self.max_pages = int(xbmcaddon.Addon().getSetting('%s-max_pages' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'wso.ch'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cache_limit=.5)
match = re.search('href=(?:\'|")([^"\']+)(?:"|\')>Click Here to Play', html)
if match:
return match.group(1)
else:
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern = 'class="[^"]*tdhost".*?href="([^"]+)">([^<]+)'
for match in re.finditer(pattern, html, re.DOTALL):
stream_url, host = match.groups()
hoster = {'multi-part': False, 'host': host.lower(), 'class': self, 'url': stream_url, 'quality': self._get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(WSO_Scraper, self)._default_get_url(video)
@classmethod
def get_settings(cls):
settings = super(WSO_Scraper, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-max_pages" type="slider" range="1,50" option="int" label=" %s" default="1" visible="eq(-6,true)"/>' % (name, i18n('max_pages')))
return settings
def search(self, video_type, title, year):
url = urlparse.urljoin(self.base_url, '/index')
html = self._http_get(url, cache_limit=24)
results = []
for list_match in re.finditer('class="ddmcc"(.*?)</div>', html, re.DOTALL):
list_frag = list_match.group(1)
norm_title = self._normalize_title(title)
pattern = 'href="([^"]+)">([^<]+)'
for match in re.finditer(pattern, list_frag):
url, match_title = match.groups('')
if norm_title in self._normalize_title(match_title):
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': ''}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
episode_pattern = '<h2>\s*<a\s+href="([^"]+)[^>]+title="[^"]+[Ss]%02d[Ee]%02d[ "]' % (int(video.season), int(video.episode))
title_pattern = ''
airdate_pattern = '<h2>\s*<a\s+href="([^"]+)[^>]+title="[^"]+{year} {p_month} {p_day}[ \)"]'
for page in xrange(1, self.max_pages + 1):
url = show_url
if page > 1: url += '%s/page/%s' % (show_url, page)
# if page is blank, don't continue getting pages
url = urlparse.urljoin(self.base_url, url)
html = self._http_get(url, cache_limit=2)
if not html:
return
ep_url = super(WSO_Scraper, self)._default_get_episode_url(url, video, episode_pattern, title_pattern, airdate_pattern)
if ep_url is not None:
return ep_url
def _http_get(self, url, data=None, cache_limit=8):
return super(WSO_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
| 40.773109
| 209
| 0.617477
|
e4c7a83b0d60ca784cf14ea99282dee784cd7fb5
| 177
|
py
|
Python
|
DLplatform/learning/__init__.py
|
chelseajohn/dlplatform
|
429e42c598039d1e9fd1df3da4247f391915a31b
|
[
"Apache-2.0"
] | 5
|
2020-05-05T08:54:26.000Z
|
2021-02-20T07:36:28.000Z
|
DLplatform/learning/__init__.py
|
zagazao/dlplatform
|
ab32af8f89cfec4b478203bd5d13ce2d30e89ba7
|
[
"Apache-2.0"
] | 1
|
2020-11-16T14:15:53.000Z
|
2020-11-16T14:15:53.000Z
|
DLplatform/learning/__init__.py
|
zagazao/dlplatform
|
ab32af8f89cfec4b478203bd5d13ce2d30e89ba7
|
[
"Apache-2.0"
] | 4
|
2020-05-05T08:56:57.000Z
|
2020-07-22T11:28:52.000Z
|
from DLplatform.learning.learner import IncrementalLearner
from DLplatform.learning.factories import LearnerFactory
from DLplatform.learning.deeplearning.kerasNN import KerasNN
| 44.25
| 60
| 0.892655
|
421e15267bb2414fc10153d98a7f921d3d8bac79
| 1,142
|
py
|
Python
|
bauh/gems/arch/cpu_manager.py
|
alesmuc/bauh
|
5f9fbd7f38eea6f54b5d6a97848c1ee8b6a43ecb
|
[
"Zlib"
] | null | null | null |
bauh/gems/arch/cpu_manager.py
|
alesmuc/bauh
|
5f9fbd7f38eea6f54b5d6a97848c1ee8b6a43ecb
|
[
"Zlib"
] | null | null | null |
bauh/gems/arch/cpu_manager.py
|
alesmuc/bauh
|
5f9fbd7f38eea6f54b5d6a97848c1ee8b6a43ecb
|
[
"Zlib"
] | null | null | null |
import multiprocessing
import os
import traceback
from bauh.commons.system import new_root_subprocess
def supports_performance_mode():
return os.path.exists('/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor')
def all_in_performance() -> bool:
for i in range(multiprocessing.cpu_count()):
with open('/sys/devices/system/cpu/cpu{}/cpufreq/scaling_governor'.format(i)) as f:
if f.read().strip() != 'performance':
return False
return False
def set_mode(mode: str, root_password: str):
new_gov_file = '/tmp/bauh_scaling_governor'
with open(new_gov_file, 'w+') as f:
f.write(mode)
for i in range(multiprocessing.cpu_count()):
try:
gov_file = '/sys/devices/system/cpu/cpu{}/cpufreq/scaling_governor'.format(i)
replace = new_root_subprocess(['cp', new_gov_file, gov_file], root_password=root_password)
replace.wait()
except:
traceback.print_exc()
if os.path.exists(new_gov_file):
try:
os.remove(new_gov_file)
except:
traceback.print_exc()
| 29.282051
| 102
| 0.642732
|
c3728c5debf368235ae0382d859e73ca1866f816
| 7,139
|
py
|
Python
|
tensorflow/python/kernel_tests/benchmark_test.py
|
ln0119/tensorflow-fast-rcnn
|
e937e6394818c9a320754237651d7fe083b1020d
|
[
"Apache-2.0"
] | 73
|
2017-01-05T09:06:08.000Z
|
2021-11-06T14:00:50.000Z
|
tensorflow/python/kernel_tests/benchmark_test.py
|
minhhoai2/tensorflow
|
da88903d5e29230d68d861053aa1dea1432c0696
|
[
"Apache-2.0"
] | 8
|
2017-04-10T10:36:20.000Z
|
2021-02-07T01:02:32.000Z
|
tensorflow/python/kernel_tests/benchmark_test.py
|
minhhoai2/tensorflow
|
da88903d5e29230d68d861053aa1dea1432c0696
|
[
"Apache-2.0"
] | 151
|
2016-11-10T09:01:15.000Z
|
2022-01-18T08:13:49.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import random
import tensorflow as tf
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
# Used by SomeRandomBenchmark class below.
_ran_somebenchmark_1 = [False]
_ran_somebenchmark_2 = [False]
_ran_somebenchmark_but_shouldnt = [False]
class SomeRandomBenchmark(tf.test.Benchmark):
"""This Benchmark should automatically be registered in the registry."""
def _dontRunThisBenchmark(self):
_ran_somebenchmark_but_shouldnt[0] = True
def notBenchmarkMethod(self):
_ran_somebenchmark_but_shouldnt[0] = True
def benchmark1(self):
_ran_somebenchmark_1[0] = True
def benchmark2(self):
_ran_somebenchmark_2[0] = True
class TestReportingBenchmark(tf.test.Benchmark):
"""This benchmark (maybe) reports some stuff."""
def benchmarkReport1(self):
self.report_benchmark(iters=1)
def benchmarkReport2(self):
self.report_benchmark(
iters=2, name="custom_benchmark_name",
extras={"number_key": 3, "other_key": "string"})
def benchmark_times_an_op(self):
with tf.Session() as sess:
a = tf.constant(0.0)
a_plus_a = a + a
self.run_op_benchmark(
sess, a_plus_a, min_iters=1000, store_trace=True,
name="op_benchmark")
class BenchmarkTest(tf.test.TestCase):
def testGlobalBenchmarkRegistry(self):
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
self.assertEqual(len(registry), 2)
self.assertTrue(SomeRandomBenchmark in registry)
self.assertTrue(TestReportingBenchmark in registry)
def testRunSomeRandomBenchmark(self):
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run other benchmarks, but this wont run the one we care about
benchmark._run_benchmarks("unrelated")
# Validate that SomeBenchmark has not run yet
self.assertFalse(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
# Run all the benchmarks, avoid generating any reports
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom")
# Validate that SomeRandomBenchmark ran correctly
self.assertTrue(_ran_somebenchmark_1[0])
self.assertTrue(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
_ran_somebenchmark_1[0] = False
_ran_somebenchmark_2[0] = False
_ran_somebenchmark_but_shouldnt[0] = False
# Test running a specific method of SomeRandomBenchmark
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
benchmark._run_benchmarks("SomeRandom.*1$")
self.assertTrue(_ran_somebenchmark_1[0])
self.assertFalse(_ran_somebenchmark_2[0])
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
def testReportingBenchmark(self):
tempdir = tf.test.get_temp_dir()
try:
tf.gfile.MakeDirs(tempdir)
except OSError as e:
# It's OK if the directory already exists.
if " exists:" not in str(e):
raise e
prefix = os.path.join(
tempdir, "reporting_bench_%016x_" % random.getrandbits(64))
expected_output_file = "%s%s" % (
prefix, "TestReportingBenchmark.benchmarkReport1")
expected_output_file_2 = "%s%s" % (
prefix, "TestReportingBenchmark.custom_benchmark_name")
expected_output_file_3 = "%s%s" % (
prefix, "TestReportingBenchmark.op_benchmark")
try:
self.assertFalse(tf.gfile.Exists(expected_output_file))
# Run benchmark but without env, shouldn't write anything
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should run without writing anything
self.assertFalse(tf.gfile.Exists(expected_output_file))
# Runbenchmark with env, should write
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
reporting = TestReportingBenchmark()
reporting.benchmarkReport1() # This should write
reporting.benchmarkReport2() # This should write
reporting.benchmark_times_an_op() # This should write
# Check the files were written
self.assertTrue(tf.gfile.Exists(expected_output_file))
self.assertTrue(tf.gfile.Exists(expected_output_file_2))
self.assertTrue(tf.gfile.Exists(expected_output_file_3))
# Check the contents are correct
expected_1 = test_log_pb2.BenchmarkEntry()
expected_1.name = "TestReportingBenchmark.benchmarkReport1"
expected_1.iters = 1
expected_2 = test_log_pb2.BenchmarkEntry()
expected_2.name = "TestReportingBenchmark.custom_benchmark_name"
expected_2.iters = 2
expected_2.extras["number_key"].double_value = 3
expected_2.extras["other_key"].string_value = "string"
expected_3 = test_log_pb2.BenchmarkEntry()
expected_3.name = "TestReportingBenchmark.op_benchmark"
expected_3.iters = 1000
def read_benchmark_entry(f):
s = tf.gfile.GFile(f, "rb").read()
entries = test_log_pb2.BenchmarkEntries.FromString(s)
self.assertEquals(1, len(entries.entry))
return entries.entry[0]
read_benchmark_1 = read_benchmark_entry(expected_output_file)
self.assertProtoEquals(expected_1, read_benchmark_1)
read_benchmark_2 = read_benchmark_entry(expected_output_file_2)
self.assertProtoEquals(expected_2, read_benchmark_2)
read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
self.assertEquals(expected_3.name, read_benchmark_3.name)
self.assertEquals(expected_3.iters, read_benchmark_3.iters)
self.assertGreater(read_benchmark_3.wall_time, 0)
full_trace = read_benchmark_3.extras["full_trace_chrome_format"]
json_trace = json.loads(full_trace.string_value)
self.assertTrue(isinstance(json_trace, dict))
self.assertTrue("traceEvents" in json_trace.keys())
finally:
tf.gfile.DeleteRecursively(tempdir)
if __name__ == "__main__":
tf.test.main()
| 35.874372
| 80
| 0.734417
|
5cbf03cf0ad219c7cb7672cbc76a022d83dbaa4f
| 1,527
|
py
|
Python
|
examples/template_security.py
|
babak1369/TemplateSecurity
|
3b4aa70d51e02f3fa0d99e2e068cadd7a865baf0
|
[
"MIT"
] | 1
|
2020-03-20T05:21:39.000Z
|
2020-03-20T05:21:39.000Z
|
examples/template_security.py
|
babak1369/TemplateSecurity
|
3b4aa70d51e02f3fa0d99e2e068cadd7a865baf0
|
[
"MIT"
] | null | null | null |
examples/template_security.py
|
babak1369/TemplateSecurity
|
3b4aa70d51e02f3fa0d99e2e068cadd7a865baf0
|
[
"MIT"
] | null | null | null |
import numpy as np
import garbledcircuit as gc
import time
"""
in this example we are checking to see if the query query = np.array([0,25,23,4]) is close enough (threshold = 4)
to the template A = np.array([0,25,24,3])
"""
A = np.array([0,25,24,3])
dimension = 4
precision = 10
security = 100
threshold = 4
ts = gc.TemplateSecurity(A,precision,threshold)
wires_mult,wires_euc,et_list,et_euc,gc_euc,list_of_gc, keys_euc,keys_list_mult,square_sum_query,current,m = ts.parallel_euc_setup()
available_keys_euc = keys_euc[square_sum_query[1,0]:square_sum_query[1,0]+2*precision]
mult_keys_np = np.array(keys_list_mult)
available_keys_mult = mult_keys_np[:,precision:2*precision,:]
query = np.array([0,25,23,1114])
wires_euc,wires_mult = ts.parallel_prepare_query(query,square_sum_query,wires_euc,wires_mult,available_keys_euc,available_keys_mult)
t = time.time()
wi = gc.group_degarbling_(np.array(et_list),wires_mult,list_of_gc[0].circuit,list_of_gc[0].security)
wires_euc[0:2*precision*dimension,:] = wi[:,m.matrix[1,:]].reshape((2*precision*dimension,security+1))
wires = gc.degarbling_(et_euc,wires_euc,gc_euc.circuit,security)
print("computation time: ", time.time()-t)
result_wire_number = current.matrix[1,2*precision-1]
print("the last bit of the result is in wire number = ", current.matrix[1,2*precision-1])
print (" the output is : ", wires[result_wire_number])
print("it is authenticated if output = " , keys_euc[result_wire_number][9])
print("it is not authenticated if output = " , keys_euc[result_wire_number][0])
| 47.71875
| 132
| 0.769483
|
0a19c6e79fb377825e3f9ce2b3bb1db464cadaa9
| 3,070
|
py
|
Python
|
safe_transaction_service/notifications/views.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 67
|
2019-08-16T16:26:42.000Z
|
2022-03-21T20:32:43.000Z
|
safe_transaction_service/notifications/views.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 550
|
2019-07-11T12:09:06.000Z
|
2022-03-31T16:32:00.000Z
|
safe_transaction_service/notifications/views.py
|
byteflyfunny/safe-transaction-service
|
2a1a855d9881181a57692057aeb91c9fd8ae3de5
|
[
"MIT"
] | 83
|
2019-12-06T11:22:32.000Z
|
2022-03-30T10:09:22.000Z
|
import logging
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.generics import CreateAPIView, DestroyAPIView
from rest_framework.response import Response
from safe_transaction_service.history.models import SafeContract
from . import serializers
from .models import FirebaseDevice
from .utils import get_safe_owners
logger = logging.getLogger(__name__)
class FirebaseDeviceCreateView(CreateAPIView):
"""
Creates a new FirebaseDevice. If uuid is not provided a new device will be created.
If a uuid for an existing Safe is provided the FirebaseDevice will be updated with all the new data provided.
Safes provided on the request are always added and never removed/replaced
Signature must sign `keccack256('gnosis-safe{timestamp-epoch}{uuid}{cloud_messaging_token}{safes_sorted}':
- `{timestamp-epoch}` must be an integer (no milliseconds)
- `{safes_sorted}` must be checksummed safe addresses sorted and joined with no spaces
"""
serializer_class = serializers.FirebaseDeviceSerializer
response_serializer_class = (
serializers.FirebaseDeviceSerializerWithOwnersResponseSerializer
)
@swagger_auto_schema(
responses={200: response_serializer_class(), 400: "Invalid data"}
)
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
response_serializer = self.response_serializer_class(
data=serializer.validated_data
)
response_serializer.is_valid(raise_exception=True)
headers = self.get_success_headers(response_serializer.data)
return Response(
response_serializer.data, status=status.HTTP_201_CREATED, headers=headers
)
class FirebaseDeviceDeleteView(DestroyAPIView):
"""
Remove a FirebaseDevice
"""
queryset = FirebaseDevice.objects.all()
class FirebaseDeviceSafeDeleteView(DestroyAPIView):
"""
Remove a Safe for a FirebaseDevice
"""
queryset = FirebaseDevice.objects.all()
def perform_destroy(self, firebase_device: FirebaseDevice):
safe_address = self.kwargs["address"]
try:
safe_contract = SafeContract.objects.get(address=safe_address)
firebase_device.safes.remove(safe_contract)
current_owners = {
owner
for safe in firebase_device.safes.values_list("address", flat=True)
for owner in get_safe_owners(safe)
}
# Remove owners not linked to any Safe
firebase_device.owners.exclude(owner__in=current_owners).delete()
except SafeContract.DoesNotExist:
logger.info(
"Cannot remove safe=%s for firebase_device with uuid=%s",
safe_address,
self.kwargs["pk"],
)
| 36.117647
| 113
| 0.701303
|
1207537653559b153e8a8bce049dd7571ab7b4ea
| 6,010
|
py
|
Python
|
ScanQLi-master/scanqli.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
ScanQLi-master/scanqli.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
ScanQLi-master/scanqli.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#!/usr/bin/python
import function
import requests
import time
import config
from termcolor import colored
import optparse_mooi
import optparse
import validators
import progressbar
import json
from operator import is_not
from functools import partial
import logo
import numpy
import os
try:
import urlparse # Python2
except ImportError:
import urllib.parse as urlparse # Python3
# Define parser
examples_message = """\nExamples:
python scanqli.py -u 'http://127.0.0.1/test/?p=news' -o output.log\n python scanqli.py -u 'https://127.0.0.1/test/' -r -c '{"PHPSESSID":"4bn7uro8qq62ol4o667bejbqo3" , "Session":"Mzo6YWMwZGRmOWU2NWQ1N2I2YTU2YjI0NTMzODZjZDVkYjU="}'\n"""
logo_message = logo.chooselogo()
parser = optparse.OptionParser(description=logo_message, usage = "python scanqli.py -u [url] [options]", epilog = examples_message, formatter=optparse_mooi.CompactHelpFormatter(align_long_opts=True, metavar_column=20))
groupscan = optparse.OptionGroup(parser, "Scanning")
groupoutput = optparse.OptionGroup(parser, "Output")
groupscan.add_option('-u', "--url", action="store", dest="url", help="URL to scan", default=None)
groupscan.add_option('-U', "--urllist", action="store", metavar="file", dest="urllist", help="URL list to scan (one line by url)", default=None)
groupscan.add_option('-i', "--ignore", action="append", metavar="url", dest="iurl", help="Ignore given URLs during scan", default=None)
groupscan.add_option('-I', "--ignorelist", action="store", metavar="file", dest="iurllist", help="Ignore given URLs list (one line by url)", default=None)
groupscan.add_option('-c', "--cookies", action="store", metavar="cookies", dest="cookies", help="Scan with given cookies", default=None, type=str)
groupscan.add_option('-s', "--nosslcheck", action="store_true", dest="nosslcheck", help="Don't verify SSL certs")
groupscan.add_option('-q', "--quick", action="store_true", dest="quick", help="Check only very basic vulns", default=None)
groupscan.add_option('-r', "--recursive", action="store_true", dest="recursive", help="Recursive URL scan (will follow each href)", default=False)
groupscan.add_option('-w', "--wait", action="store", metavar="seconds", dest="waittime", help="Wait time between each request", default=None, type=str)
groupoutput.add_option('-v', "--verbose", action="store_true", dest="verbose", help="Display all tested URLs", default=False)
groupoutput.add_option('-o', "--output", action="store", metavar="file", dest="output", help="Write outputs in file", default=None)
parser.add_option_group(groupscan)
parser.add_option_group(groupoutput)
options, args = parser.parse_args()
# Check requiered arg
if not options.url and not options.urllist:
parser.print_help()
exit(0)
elif options.url and validators.url(options.url):
url = [options.url]
elif options.urllist:
text_file = open(options.urllist, "r")
url = text_file.read().split('\n')
url = filter(partial(is_not, ""), url)
for infile in url:
if not validators.url(infile):
function.PrintError("-u " + infile, "Malformed URL. Please given a valid URL")
exit(0)
else:
function.PrintError("-u " + options.url, "Malformed URL. Please given a valid URL")
exit(0)
# Check verbose args
function.verbose = options.verbose
# Check log file write perm
if options.output:
if function.CheckFilePerm(options.output):
progressbar.logfile = options.output
else:
function.PrintError("-o " + options.output, "No write permission for output file")
exit(0)
# Check Banned URLs
if options.iurl:
for bannedurl in options.iurl:
if validators.url(bannedurl):
config.BannedURLs.append(bannedurl)
else:
function.PrintError("-i " + bannedurl, "Malformed URL. Please given a valid URL")
exit(0)
if options.iurllist:
try:
filelist = open(options.iurllist, "r")
for iurl in filelist:
if validators.url(iurl):
config.BannedURLs.append(iurl.replace("\n", ""))
else:
function.PrintError("-I " + options.iurllist + " : " + iurl, "Malformed URL. Please given a valid URL")
exit(0)
except IOError:
function.PrintError("-I " + options.iurllist, "Unable to read the given file")
exit(0)
# Cookies
if options.cookies:
function.cookies = json.loads(options.cookies)
# NoSSLCheck
if options.nosslcheck:
function.verifyssl = False
# Wait time
if options.waittime:
function.waittime = float(options.waittime)
# Quick scan
if options.quick:
config.scantype = "quick"
# init config
config.init()
# Start
starttime = time.time()
print(logo.chooselogo() + "\n")
try:
if options.recursive:
baseurl = []
for uniturl in url:
if uniturl[-1:] != "/" and os.path.splitext(urlparse.urlparse(uniturl).path)[1] == "":
uniturl = uniturl + "/"
baseurl.append(uniturl)
print("Base URL = " + uniturl)
pageset = function.GetAllPages(baseurl)
print(str(len(pageset)) + " URLs founds")
else:
pageset = {None:None}
for uniturl in url:
print("URL = " + uniturl)
pageset.update({uniturl:function.GetHTML(uniturl)})
pageset.pop(None)
print("----------------------------")
function.vulnscanstrated = True
result = function.CheckPageListAllVulns(pageset)
except KeyboardInterrupt:
print("\nStopped after " + str(round(time.time() - starttime, 2)) + " seconds")
exit(0)
print("----------------------------")
try:
resultlen = numpy.shape(result)[0] * numpy.shape(result)[1]
except IndexError:
resultlen = 0
if resultlen <= 1:
print(colored(str(resultlen) + " vulnerability ", attrs=["bold"]) + "found in " + str(round(time.time() - starttime, 2)) + " seconds!")
else:
print(colored(str(resultlen) + " vulnerabilities ", attrs=["bold"]) + "founds in " + str(round(time.time() - starttime, 2)) + " seconds!")
| 38.525641
| 237
| 0.669717
|
877a91f7e8e73abd38ce94542dc017ac6f7fbf37
| 24,703
|
py
|
Python
|
google/ads/googleads/v7/services/services/keyword_plan_campaign_keyword_service/client.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v7/services/services/keyword_plan_campaign_keyword_service/client.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v7/services/services/keyword_plan_campaign_keyword_service/client.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v7.resources.types import (
keyword_plan_campaign_keyword,
)
from google.ads.googleads.v7.services.types import (
keyword_plan_campaign_keyword_service,
)
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import (
KeywordPlanCampaignKeywordServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import KeywordPlanCampaignKeywordServiceGrpcTransport
class KeywordPlanCampaignKeywordServiceClientMeta(type):
"""Metaclass for the KeywordPlanCampaignKeywordService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[KeywordPlanCampaignKeywordServiceTransport]]
_transport_registry["grpc"] = KeywordPlanCampaignKeywordServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[KeywordPlanCampaignKeywordServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class KeywordPlanCampaignKeywordServiceClient(
metaclass=KeywordPlanCampaignKeywordServiceClientMeta
):
"""Service to manage Keyword Plan campaign keywords.
KeywordPlanCampaign is required to add the campaign keywords.
Only negative keywords are supported. A maximum of 1000 negative
keywords are allowed per plan. This includes both campaign
negative keywords and ad group negative keywords.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordPlanCampaignKeywordServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordPlanCampaignKeywordServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> KeywordPlanCampaignKeywordServiceTransport:
"""Return the transport used by the client instance.
Returns:
KeywordPlanCampaignKeywordServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def keyword_plan_campaign_path(
customer_id: str, keyword_plan_campaign_id: str,
) -> str:
"""Return a fully-qualified keyword_plan_campaign string."""
return "customers/{customer_id}/keywordPlanCampaigns/{keyword_plan_campaign_id}".format(
customer_id=customer_id,
keyword_plan_campaign_id=keyword_plan_campaign_id,
)
@staticmethod
def parse_keyword_plan_campaign_path(path: str) -> Dict[str, str]:
"""Parse a keyword_plan_campaign path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/keywordPlanCampaigns/(?P<keyword_plan_campaign_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def keyword_plan_campaign_keyword_path(
customer_id: str, keyword_plan_campaign_keyword_id: str,
) -> str:
"""Return a fully-qualified keyword_plan_campaign_keyword string."""
return "customers/{customer_id}/keywordPlanCampaignKeywords/{keyword_plan_campaign_keyword_id}".format(
customer_id=customer_id,
keyword_plan_campaign_keyword_id=keyword_plan_campaign_keyword_id,
)
@staticmethod
def parse_keyword_plan_campaign_keyword_path(path: str) -> Dict[str, str]:
"""Parse a keyword_plan_campaign_keyword path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/keywordPlanCampaignKeywords/(?P<keyword_plan_campaign_keyword_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[
str, KeywordPlanCampaignKeywordServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the keyword plan campaign keyword service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.KeywordPlanCampaignKeywordServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, KeywordPlanCampaignKeywordServiceTransport):
# transport is a KeywordPlanCampaignKeywordServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = KeywordPlanCampaignKeywordServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_keyword_plan_campaign_keyword(
self,
request: keyword_plan_campaign_keyword_service.GetKeywordPlanCampaignKeywordRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> keyword_plan_campaign_keyword.KeywordPlanCampaignKeyword:
r"""Returns the requested plan in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v7.services.types.GetKeywordPlanCampaignKeywordRequest`):
The request object.
Request message for
[KeywordPlanCampaignKeywordService.GetKeywordPlanCampaignKeyword][google.ads.googleads.v7.services.KeywordPlanCampaignKeywordService.GetKeywordPlanCampaignKeyword].
resource_name (:class:`str`):
Required. The resource name of the
plan to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v7.resources.types.KeywordPlanCampaignKeyword:
A Keyword Plan Campaign keyword.
Only negative keywords are supported for
Campaign Keyword.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a keyword_plan_campaign_keyword_service.GetKeywordPlanCampaignKeywordRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
keyword_plan_campaign_keyword_service.GetKeywordPlanCampaignKeywordRequest,
):
request = keyword_plan_campaign_keyword_service.GetKeywordPlanCampaignKeywordRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_keyword_plan_campaign_keyword
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_keyword_plan_campaign_keywords(
self,
request: keyword_plan_campaign_keyword_service.MutateKeywordPlanCampaignKeywordsRequest = None,
*,
customer_id: str = None,
operations: Sequence[
keyword_plan_campaign_keyword_service.KeywordPlanCampaignKeywordOperation
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> keyword_plan_campaign_keyword_service.MutateKeywordPlanCampaignKeywordsResponse:
r"""Creates, updates, or removes Keyword Plan campaign keywords.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__
`KeywordPlanAdGroupKeywordError <>`__
`KeywordPlanCampaignKeywordError <>`__ `QuotaError <>`__
`RequestError <>`__ `ResourceCountLimitExceededError <>`__
Args:
request (:class:`google.ads.googleads.v7.services.types.MutateKeywordPlanCampaignKeywordsRequest`):
The request object.
Request message for
[KeywordPlanCampaignKeywordService.MutateKeywordPlanCampaignKeywords][google.ads.googleads.v7.services.KeywordPlanCampaignKeywordService.MutateKeywordPlanCampaignKeywords].
customer_id (:class:`str`):
Required. The ID of the customer
whose campaign keywords are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v7.services.types.KeywordPlanCampaignKeywordOperation]`):
Required. The list of operations to
perform on individual Keyword Plan
campaign keywords.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v7.services.types.MutateKeywordPlanCampaignKeywordsResponse:
Response message for a Keyword Plan
campaign keyword mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a keyword_plan_campaign_keyword_service.MutateKeywordPlanCampaignKeywordsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
keyword_plan_campaign_keyword_service.MutateKeywordPlanCampaignKeywordsRequest,
):
request = keyword_plan_campaign_keyword_service.MutateKeywordPlanCampaignKeywordsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_keyword_plan_campaign_keywords
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("KeywordPlanCampaignKeywordServiceClient",)
| 42.299658
| 188
| 0.646075
|
aee5f99e43f63caad7362676a57a0648734cfd0d
| 2,960
|
py
|
Python
|
terra/feeds.py
|
ClaudioMeinberg/tuneeco_gold
|
4eb5ab64e1ab4c7dcd7a0e34299bbbf6e7813dc0
|
[
"MIT"
] | null | null | null |
terra/feeds.py
|
ClaudioMeinberg/tuneeco_gold
|
4eb5ab64e1ab4c7dcd7a0e34299bbbf6e7813dc0
|
[
"MIT"
] | null | null | null |
terra/feeds.py
|
ClaudioMeinberg/tuneeco_gold
|
4eb5ab64e1ab4c7dcd7a0e34299bbbf6e7813dc0
|
[
"MIT"
] | null | null | null |
from django.contrib.syndication import views
from terra.models import Video, Feed
from django.utils.feedgenerator import Rss201rev2Feed
from django.utils import timezone
from django.utils.safestring import mark_safe
from html.parser import HTMLParser
unescape = HTMLParser().unescape
class TerraVideoFeed(Rss201rev2Feed):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
attrs['xmlns:content'] = 'http://purl.org/rss/1.0/modules/content/'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
content = item['content']
handler.startElement('content:encoded', {})
handler._write(f'<![CDATA[ {content} ]]>')
handler.endElement('content:encoded')
video = dict(url=item['url_feed_video'])
video['type'] = item['video_file_type']
video['duration'] = item['video_duration']
video['medium'] = 'video'
handler.startElement("media:content", video)
thumbnail = dict(url=item['url_feed_thumb'])
handler.addQuickElement('media:thumbnail', '', thumbnail)
handler.endElement("media:content")
def root_attributes(self):
attrs = super().root_attributes()
return attrs
def add_root_elements(self, handler):
super().add_root_elements(handler)
class LatestVideosFeed(views.Feed):
feed_type = TerraVideoFeed
def get_object(self, request, slug, item_id=None):
feed = Feed.objects.get(slug=slug)
self.title = feed.title
self.link = feed.link
self.description = feed.description
return feed
def items(self, feed):
return Video.objects.filter(
processed=True,
published__lt=timezone.now(),
feed=feed,
status=Video.Status.APROVADO
).order_by('-published')[:feed.size]
def item_title(self, item):
return item.title
def item_link(self, item):
return item.article_link
def item_guid(self, item):
return item.id
def item_guid_is_permalink(self, obj):
pass
item_guid_is_permalink = False
def item_description(self, item):
return item.description
def item_categories(self, item):
return item.categories.all()
def item_pubdate(self, item):
return item.published
def item_extra_kwargs(self, item):
thumb_url = f'{item.feed.thumbnail_uri}{item.url_feed_thumb}'
thumb_str = f'<img src="{thumb_url}" style="display:none; ">'
content = f'{item.content}{thumb_str}'
return {
'url_feed_video': f'{item.feed.video_uri}{item.url_feed_video}',
'url_feed_thumb': thumb_url,
'video_file_type': item.video_file_type,
'video_duration': str(item.video_duration),
'content': content,
}
| 28.737864
| 76
| 0.643581
|
ba1cef98f03ef5e2573d6c73a385ae1ae59b4ba7
| 50,390
|
py
|
Python
|
decoding/GAD/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | 1
|
2021-11-07T00:30:05.000Z
|
2021-11-07T00:30:05.000Z
|
decoding/GAD/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
decoding/GAD/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("transformer")
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args):
layer = TransformerEncoderLayer(args)
if getattr(args, "checkpoint_activations", False):
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(src_tokens,
src_lengths,
return_all_hiddens,
token_embeddings)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = (src_tokens.device.type == "xla" or encoder_padding_mask.any())
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if encoder_padding_mask is not None:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = TransformerDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
parallel_forward_start_pos: Optional[int] = None
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
parallel_forward_start_pos=parallel_forward_start_pos
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
parallel_forward_start_pos: Optional[int] = None
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
parallel_forward_start_pos
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
parallel_forward_start_pos: Optional[int] = None
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state if parallel_forward_start_pos is None else None
)
if self.embed_positions is not None
else None
)
original_len = None
if incremental_state is not None: # inference
if parallel_forward_start_pos is None: # one-by-one
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
else: # aggressive
original_len = prev_output_tokens.size(1)
prev_output_tokens = prev_output_tokens[:, parallel_forward_start_pos:]
if positions is not None:
positions = positions[:, parallel_forward_start_pos:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
# train | aggressive inference
if (incremental_state is None or parallel_forward_start_pos is not None) and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x, dim=original_len)
if parallel_forward_start_pos is not None:
self_attn_mask = self_attn_mask[parallel_forward_start_pos:]
else: # one-by-one inference
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor, dim=None):
# tensor: t, b, h
if dim is None:
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("transformer", "transformer_tiny")
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 44.124343
| 159
| 0.630899
|
ad533c77942205b46993c2daca5bd7d635258a8f
| 2,739
|
py
|
Python
|
trashtalk/urls.py
|
hcote/TrashTalk
|
eb60cff7451f8d26bf141123d6a3580167583827
|
[
"MIT"
] | 8
|
2017-10-04T02:29:13.000Z
|
2019-10-09T03:38:35.000Z
|
trashtalk/urls.py
|
hcote/TrashTalk
|
eb60cff7451f8d26bf141123d6a3580167583827
|
[
"MIT"
] | 108
|
2017-09-15T23:13:12.000Z
|
2018-05-21T18:26:15.000Z
|
trashtalk/urls.py
|
hcote/TrashTalk
|
eb60cff7451f8d26bf141123d6a3580167583827
|
[
"MIT"
] | 10
|
2017-09-06T02:36:01.000Z
|
2020-09-15T20:13:33.000Z
|
"""trashtalk URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include, static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.conf import settings
from accounts.views import (LoginView, UserDashboardView, user_signup_view, user_signup_create)
from cleanups.views.template_views import (cleanup_new, cleanup_edit, cleanup_list,
cleanup_show, cleanup_create, cleanup_update,
cleanup_join_view, cleanup_delete)
urlpatterns = [
# Homepage
url(r'^$', LoginView.as_view(), name='home'),
# Admin Pages
url(r'^admin/', admin.site.urls),
# API
url(r'^api/v1/', include('api_urls', namespace='api')),
# Auth
url(r'^login/', LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^signup/', user_signup_view, name='register'),
url(r'^user/create', user_signup_create, name='create-user'),
# User
# TODO: Issue #83 - Move to accounts/urls.py
url(r'^dashboard/', UserDashboardView.as_view(), name='dashboard'),
# Cleanups
# TODO: Issue #83 - Move to cleanups/urls.py
url(r'^cleanups/$', cleanup_list, name='cleanups-list'),
url(r'^cleanups/new/$', cleanup_new, name='cleanup-new'),
url(r'^cleanups/create/$', cleanup_create, name='cleanup-create'),
url(r'^cleanups/(?P<pk>[0-9]+)/edit/$', cleanup_edit, name='cleanup-edit'),
url(r'^cleanups/(?P<pk>[0-9]+)/update/$', cleanup_update, name='cleanup-update'),
url(r'^cleanups/(?P<pk>[0-9]+)/join/$', cleanup_join_view, name='join-cleanup'),
url(r'^cleanups/(?P<pk>[0-9]+)/delete$', cleanup_delete, name='cleanup-delete'),
url(r'^cleanups/(?P<pk>[0-9]+)/$', cleanup_show, name='cleanup-detail'),
# Development Only
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
] + static.static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls))]
| 42.796875
| 95
| 0.667762
|
411a15eb974de3ccd78275b5992c7cd4abfda23e
| 4,351
|
py
|
Python
|
test/functional/test_framework/muhash.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/muhash.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/muhash.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Native Python MuHash3072 implementation."""
import hashlib
import unittest
from .util import modinv
def rot32(v, bits):
"""Rotate the 32-bit value v left by bits bits."""
bits %= 32 # Make sure the term below does not throw an exception
return ((v << bits) & 0xffffffff) | (v >> (32 - bits))
def chacha20_doubleround(s):
"""Apply a ChaCha20 double round to 16-element state array s.
See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439
"""
QUARTER_ROUNDS = [(0, 4, 8, 12),
(1, 5, 9, 13),
(2, 6, 10, 14),
(3, 7, 11, 15),
(0, 5, 10, 15),
(1, 6, 11, 12),
(2, 7, 8, 13),
(3, 4, 9, 14)]
for a, b, c, d in QUARTER_ROUNDS:
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rot32(s[d] ^ s[a], 16)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rot32(s[b] ^ s[c], 12)
s[a] = (s[a] + s[b]) & 0xffffffff
s[d] = rot32(s[d] ^ s[a], 8)
s[c] = (s[c] + s[d]) & 0xffffffff
s[b] = rot32(s[b] ^ s[c], 7)
def chacha20_32_to_384(key32):
"""Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output."""
# See RFC 8439 section 2.3 for chacha20 parameters
CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574]
key_bytes = [0]*8
for i in range(8):
key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i+1))], 'little')
INITIALIZATION_VECTOR = [0] * 4
init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR
out = bytearray()
for counter in range(6):
init[12] = counter
s = init.copy()
for _ in range(10):
chacha20_doubleround(s)
for i in range(16):
out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little'))
return bytes(out)
def data_to_num3072(data):
"""Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations."""
bytes384 = chacha20_32_to_384(data)
return int.from_bytes(bytes384, 'little')
class MuHash3072:
"""Class representing the MuHash3072 computation of a set.
See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/vidcoin-dev/2017-May/014337.html
"""
MODULUS = 2**3072 - 1103717
def __init__(self):
"""Initialize for an empty set."""
self.numerator = 1
self.denominator = 1
def insert(self, data):
"""Insert a byte array data in the set."""
self.numerator = (self.numerator * data_to_num3072(data)) % self.MODULUS
def remove(self, data):
"""Remove a byte array from the set."""
self.denominator = (self.denominator * data_to_num3072(data)) % self.MODULUS
def digest(self):
"""Extract the final hash. Does not modify this object."""
val = (self.numerator * modinv(self.denominator, self.MODULUS)) % self.MODULUS
bytes384 = val.to_bytes(384, 'little')
return hashlib.sha256(bytes384).digest()
class TestFrameworkMuhash(unittest.TestCase):
def test_muhash(self):
muhash = MuHash3072()
muhash.insert([0]*32)
muhash.insert([1] + [0]*31)
muhash.remove([2] + [0]*31)
finalized = muhash.digest()
# This mirrors the result in the C++ MuHash3072 unit test
self.assertEqual(finalized[::-1].hex(), "a44e16d5e34d259b349af21c06e65d653915d2e208e4e03f389af750dc0bfdc3")
def test_chacha20(self):
def chacha_check(key, result):
self.assertEqual(chacha20_32_to_384(key)[:64].hex(), result)
# Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7
# Since the nonce is hardcoded to 0 in our function we only use those vectors.
chacha_check([0]*32, "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586")
chacha_check([0]*31 + [1], "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963")
| 39.198198
| 166
| 0.622156
|
8ae667b6f6dbf5c337a16a02c3b3436d95f6f3fd
| 2,552
|
py
|
Python
|
train.py
|
MaggieChenchen/SingleGan_new
|
06180be2479ce931af464b2dfdd091a646cc9b61
|
[
"MIT"
] | null | null | null |
train.py
|
MaggieChenchen/SingleGan_new
|
06180be2479ce931af464b2dfdd091a646cc9b61
|
[
"MIT"
] | null | null | null |
train.py
|
MaggieChenchen/SingleGan_new
|
06180be2479ce931af464b2dfdd091a646cc9b61
|
[
"MIT"
] | null | null | null |
import time
from options.train_options import TrainOptions
from data.dataloader import CreateDataLoader
#from util.visualizer import Visualizer
from models.single_gan import SingleGAN
import torch.nn as nn #!!!!!!!!
def main():
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset_size = len(data_loader) * opt.batchSize
#visualizer = Visualizer(opt)
model = SingleGAN()
model.initialize(opt)
#model = nn.DataParallel(model) #device_ids=[0, 1, 2]) # !!!!!!duo gpu
#model = model.cuda()
total_steps = 0
lr = opt.lr
for epoch in range(1, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
save_result = True
for i, data in enumerate(data_loader):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter = total_steps - dataset_size * (epoch - 1)
#model.update_model(data)
errD,errG,loss_q=model.update_model(data)####lossq
if save_result or total_steps % opt.display_freq == 0:
save_result = save_result or total_steps % opt.update_html_freq == 0
print('mode:{} dataset:{}'.format(opt.mode,opt.name))
#visualizer.display_current_results(model.get_current_visuals(), epoch, ncols=1, save_result=save_result)
save_result = False
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
#visualizer.print_current_errors(epoch, epoch_iter, errors, t)
#if opt.display_id > 0:
# visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %(epoch, total_steps))
model.save('latest')
print('errD:{} , errG:{},loss_q:{}'.format(errD,errG,loss_q)) #print loss
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %(epoch, total_steps))
model.save('latest')
model.save(epoch)
if epoch > opt.niter:
lr -= opt.lr / opt.niter_decay
model.update_lr(lr)
if __name__ == '__main__':
main()
| 40.507937
| 122
| 0.575235
|
7297633243bca82146d3fd17dd4d122b75fa8baa
| 4,284
|
py
|
Python
|
src/Consult.py
|
henrique2m/PisCofins
|
faee04aa3c3d57404092a2656a76e28aa9acc569
|
[
"MIT"
] | null | null | null |
src/Consult.py
|
henrique2m/PisCofins
|
faee04aa3c3d57404092a2656a76e28aa9acc569
|
[
"MIT"
] | null | null | null |
src/Consult.py
|
henrique2m/PisCofins
|
faee04aa3c3d57404092a2656a76e28aa9acc569
|
[
"MIT"
] | null | null | null |
import os
import PySimpleGUI as view
import Popup
import List
import json
import Global
class WindowCreditConsult:
def select(self, listCompany=[]):
countList = len(listCompany)
popup = Popup.Popup()
if countList == 0:
popup.alert('INFORME', 'NÃO EXISTE NENHUMA EMPRESA CADASTRADA.')
return False
file = Global.DATABASE()
def index(name = '', tax= ''):
with open(file, 'r') as json_file:
datas = json.load(json_file)
count = len(datas)
listSelect = []
for index in range(count):
if 'competence' in datas[index]:
if datas[index]['competence']['foreignKey'] == name:
if tax == 'pis':
if datas[index]['competence']['pis'] == True:
listSelect.append(datas[index]['competence'])
elif tax == 'cofins':
if datas[index]['competence']['cofins'] == True:
listSelect.append(datas[index]['competence'])
if listSelect == []:
return False
return listSelect
colOne = [
[view.Text('EMPRESAS:', background_color='#6272a4')],
[view.Listbox(
values=listCompany,
size=(15,7),
key='company'
)]
]
colTwo = [
[view.Frame('IMPOSTO',
[
[
view.Radio('PIS', 'tax', key='pis', background_color='#6272a4'),
view.Radio('COFINS', 'tax', key='cofins', background_color='#6272a4')
]
], background_color='#6272a4'
)],
[view.Button('CONSULTAR',
key='submit',
button_color=['#ffffff','#3CB371'],
size=(17,1)
)
],
[view.Button('CANCELAR',
key='cancel',
button_color=['#ffffff','#ff5555'],
size=(17,1)
)
]
]
layout = [
[
view.Column(colOne),
view.Column(colTwo, element_justification='center')
]
]
window = view.Window('CONSULTAR CRÉDITO',
layout,
location=(830, 220),
icon=Global.ICON()
)
while True:
event, values = window.read()
if event in (view.WIN_CLOSED, 'cancel'):
break
elif event == 'submit':
popup = Popup.Popup()
companySelect = '' if values['company'] == [] else values['company'][0]
if companySelect == '':
popup.alert('INFORME', 'POR FAVOR, SELECIONE UMA EMPRESA.')
elif not values['pis'] and not values['cofins']:
popup.alert('INFORME', 'POR FAVOR, SELECIONE UM IMPOSTO.')
else:
tax = 'pis' if values['pis'] == True else 'cofins'
listCosultCompany = index(companySelect, tax)
if listCosultCompany:
list_ = List.windowListCredit()
list_.index(listCosultCompany)
else:
popup.alert('INFORME', 'NÃO EXISTE COMPETÊNCIA RELACIONADA A ESSE IMPOSTO.')
window.close()
| 35.7
| 104
| 0.36788
|
44a3ef03f04532548a1ccd3a3952257d120e9707
| 3,184
|
py
|
Python
|
api/settings.py
|
AppointmentGuru/ClassGuru
|
4c171c8af92268d548aebf8dbbddc70b0040f7d6
|
[
"MIT"
] | null | null | null |
api/settings.py
|
AppointmentGuru/ClassGuru
|
4c171c8af92268d548aebf8dbbddc70b0040f7d6
|
[
"MIT"
] | null | null | null |
api/settings.py
|
AppointmentGuru/ClassGuru
|
4c171c8af92268d548aebf8dbbddc70b0040f7d6
|
[
"MIT"
] | null | null | null |
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's(7u&oq9n6l1_b)kt=!!tjh_namd8i$vp$g3w!c2=jjh-=sbkj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party
'rest_framework',
# custom
'api',
'classbooker',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 25.269841
| 93
| 0.6875
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.