hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acb0189693eb876f8e38efe75a69fbde86a03b56 | 1,868 | py | Python | infra/cifuzz/filestore_utils.py | ittiam-systems/oss-fuzz | e2e731bc448214f14ba73ded2094da591b79db26 | [
"Apache-2.0"
] | 1 | 2022-01-21T13:58:32.000Z | 2022-01-21T13:58:32.000Z | infra/cifuzz/filestore_utils.py | ittiam-systems/oss-fuzz | e2e731bc448214f14ba73ded2094da591b79db26 | [
"Apache-2.0"
] | null | null | null | infra/cifuzz/filestore_utils.py | ittiam-systems/oss-fuzz | e2e731bc448214f14ba73ded2094da591b79db26 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External filestore interface. Cannot be depended on by filestore code."""
import filestore
import filestore.filesystem
import filestore.git
import filestore.github_actions
import filestore.gsutil
import filestore.no_filestore
import filestore.gitlab
FILESTORE_MAPPING = {
'filesystem': filestore.filesystem.FilesystemFilestore,
'gsutil': filestore.gsutil.GSUtilFilestore,
'github-actions': filestore.github_actions.GithubActionsFilestore,
'git': filestore.git.GitFilestore,
# TODO(metzman): Change to "no-filestore"
'no_filestore': filestore.no_filestore.NoFilestore,
'gitlab': filestore.gitlab.GitlabFilestore,
}
def get_filestore(config):
"""Returns the correct filestore object based on the platform in |config|.
Raises an exception if there is no correct filestore for the platform."""
if config.platform == config.Platform.EXTERNAL_GITHUB:
ci_filestore = filestore.github_actions.GithubActionsFilestore(config)
if not config.git_store_repo:
return ci_filestore
return filestore.git.GitFilestore(config, ci_filestore)
filestore_cls = FILESTORE_MAPPING.get(config.filestore)
if filestore_cls is None:
raise filestore.FilestoreError(
f'Filestore: {config.filestore} doesn\'t exist.')
return filestore_cls(config)
| 38.122449 | 76 | 0.773019 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External filestore interface. Cannot be depended on by filestore code."""
import filestore
import filestore.filesystem
import filestore.git
import filestore.github_actions
import filestore.gsutil
import filestore.no_filestore
import filestore.gitlab
FILESTORE_MAPPING = {
'filesystem': filestore.filesystem.FilesystemFilestore,
'gsutil': filestore.gsutil.GSUtilFilestore,
'github-actions': filestore.github_actions.GithubActionsFilestore,
'git': filestore.git.GitFilestore,
# TODO(metzman): Change to "no-filestore"
'no_filestore': filestore.no_filestore.NoFilestore,
'gitlab': filestore.gitlab.GitlabFilestore,
}
def get_filestore(config):
"""Returns the correct filestore object based on the platform in |config|.
Raises an exception if there is no correct filestore for the platform."""
if config.platform == config.Platform.EXTERNAL_GITHUB:
ci_filestore = filestore.github_actions.GithubActionsFilestore(config)
if not config.git_store_repo:
return ci_filestore
return filestore.git.GitFilestore(config, ci_filestore)
filestore_cls = FILESTORE_MAPPING.get(config.filestore)
if filestore_cls is None:
raise filestore.FilestoreError(
f'Filestore: {config.filestore} doesn\'t exist.')
return filestore_cls(config)
| 0 | 0 | 0 |
70d11de1855a1874186450cf24993de95679b8b1 | 512 | py | Python | pypeline/core/__init__.py | shelper/pypeline | 70e800a8b40592e9cde9491c77860143a732e6d3 | [
"0BSD"
] | null | null | null | pypeline/core/__init__.py | shelper/pypeline | 70e800a8b40592e9cde9491c77860143a732e6d3 | [
"0BSD"
] | 1 | 2016-03-29T15:16:57.000Z | 2016-03-30T02:06:03.000Z | pypeline/core/__init__.py | shelper/pyoct | 70e800a8b40592e9cde9491c77860143a732e6d3 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pypeline.core
~~~~~~~~~~~~~
this module contains all the processing method, and advanced algorithms for OCT signal processing
calibrate:
disp_comp:
sp2struct:
@phase, @intensity, @complex
despeckle: (2D, 3D, etc.)
angiograph: (2D, 3D, etc.)
@speckle_var, @
"""
# register/load all the functions in the settings folder, so can be found and called to form pipeline
# use watchdog to monitor the content change in the settings folder
__all__ = ['pipeline', 'funcwrap']
| 18.962963 | 101 | 0.695313 | # -*- coding: utf-8 -*-
"""
pypeline.core
~~~~~~~~~~~~~
this module contains all the processing method, and advanced algorithms for OCT signal processing
calibrate:
disp_comp:
sp2struct:
@phase, @intensity, @complex
despeckle: (2D, 3D, etc.)
angiograph: (2D, 3D, etc.)
@speckle_var, @
"""
# register/load all the functions in the settings folder, so can be found and called to form pipeline
# use watchdog to monitor the content change in the settings folder
__all__ = ['pipeline', 'funcwrap']
| 0 | 0 | 0 |
81eb8d6fae7be1535358c719b212f7d885fa1871 | 28 | py | Python | recipe/run_test.py | ocefpaf/homura-feedstock | 5aefc20349c36307c9022989d2e34c9b03687b26 | [
"BSD-3-Clause"
] | null | null | null | recipe/run_test.py | ocefpaf/homura-feedstock | 5aefc20349c36307c9022989d2e34c9b03687b26 | [
"BSD-3-Clause"
] | 3 | 2017-03-23T01:40:37.000Z | 2019-11-24T11:45:09.000Z | recipe/run_test.py | ocefpaf/homura-feedstock | 5aefc20349c36307c9022989d2e34c9b03687b26 | [
"BSD-3-Clause"
] | 3 | 2017-03-17T02:26:39.000Z | 2019-11-24T11:42:31.000Z | from homura import download
| 14 | 27 | 0.857143 | from homura import download
| 0 | 0 | 0 |
867fe71bcdfc46805d08b3f1884a4d83ccb24f9a | 2,987 | py | Python | api/middlewares/application.py | lumi-io/whyphi-flask | 6effc05257e63c415ac28909a5b3f5905b719373 | [
"MIT"
] | null | null | null | api/middlewares/application.py | lumi-io/whyphi-flask | 6effc05257e63c415ac28909a5b3f5905b719373 | [
"MIT"
] | null | null | null | api/middlewares/application.py | lumi-io/whyphi-flask | 6effc05257e63c415ac28909a5b3f5905b719373 | [
"MIT"
] | null | null | null | import base64
import six
import uuid
from mimetypes import guess_extension, guess_type
import io
from api import app
import boto3
import logging
import botocore.exceptions
BUCKET = app.config["S3_BUCKET_NAME"]
def parse_graduation_date(date):
""" Parses graduation date string generated by frontend into suitable format
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
date_splitted = date.split("-")
year = date_splitted[0]
month = date_splitted[1]
months = {
"01": "January",
"02": "February",
"03": "March",
"04": "April",
"05": "May",
"06": "June",
"07": "July",
"08": "August",
"09": "September",
"10": "October",
"11": "November",
"12": "December"
}
return months[month] + " " + year
def get_file_extension(data):
""" Helper function to get file extension of base64 file
Parameters
----------
date : str
base64 representation of a file/data.
Returns
-------
string
extension of the base64 file/data.
"""
extension = (data.split(";")[0]).split(":")[-1]
return extension
def decode_and_upload_base64_file(data, file_name):
""" Function to decode base64 files
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
url_data = upload_base64_file(data, BUCKET, file_name)
return url_data
def upload_base64_file(data, bucket_name, file_name):
""" Helper function to decode and upload base64 files
Parameters
----------
data : str
base64 data of file
bucket_name : str
name of S3 bucket.
file_name : str
name of file + path to be saved within s3.
Returns
-------
string
URL of file within s3.
"""
logging.info("Retrieving base64 data of file.")
file_extension = get_file_extension(data)
base64_data = data.split(",")[-1]
try:
logging.info("Decoding base64 data into binary data")
decoded_file = base64.b64decode(base64_data)
except Exception as e:
logging.error(e)
try:
logging.info("Uploading file into s3 bucket.")
client = boto3.client('s3')
client.upload_fileobj(
io.BytesIO(decoded_file),
bucket_name,
file_name,
ExtraArgs={
'ACL': 'public-read',
"ContentType": file_extension
}
)
except Exception as e:
raise e
return f"https://{bucket_name}.s3.amazonaws.com/{file_name}"
| 23.335938 | 80 | 0.555072 | import base64
import six
import uuid
from mimetypes import guess_extension, guess_type
import io
from api import app
import boto3
import logging
import botocore.exceptions
BUCKET = app.config["S3_BUCKET_NAME"]
def parse_graduation_date(date):
""" Parses graduation date string generated by frontend into suitable format
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
date_splitted = date.split("-")
year = date_splitted[0]
month = date_splitted[1]
months = {
"01": "January",
"02": "February",
"03": "March",
"04": "April",
"05": "May",
"06": "June",
"07": "July",
"08": "August",
"09": "September",
"10": "October",
"11": "November",
"12": "December"
}
return months[month] + " " + year
def get_file_extension(data):
""" Helper function to get file extension of base64 file
Parameters
----------
date : str
base64 representation of a file/data.
Returns
-------
string
extension of the base64 file/data.
"""
extension = (data.split(";")[0]).split(":")[-1]
return extension
def decode_and_upload_base64_file(data, file_name):
""" Function to decode base64 files
Parameters
----------
date : str
string of user inputted graduation date.
Returns
-------
string
string of graduation date only with Month and Year.
"""
url_data = upload_base64_file(data, BUCKET, file_name)
return url_data
def upload_base64_file(data, bucket_name, file_name):
""" Helper function to decode and upload base64 files
Parameters
----------
data : str
base64 data of file
bucket_name : str
name of S3 bucket.
file_name : str
name of file + path to be saved within s3.
Returns
-------
string
URL of file within s3.
"""
logging.info("Retrieving base64 data of file.")
file_extension = get_file_extension(data)
base64_data = data.split(",")[-1]
try:
logging.info("Decoding base64 data into binary data")
decoded_file = base64.b64decode(base64_data)
except Exception as e:
logging.error(e)
try:
logging.info("Uploading file into s3 bucket.")
client = boto3.client('s3')
client.upload_fileobj(
io.BytesIO(decoded_file),
bucket_name,
file_name,
ExtraArgs={
'ACL': 'public-read',
"ContentType": file_extension
}
)
except Exception as e:
raise e
return f"https://{bucket_name}.s3.amazonaws.com/{file_name}"
| 0 | 0 | 0 |
0d2eef58c467878b848c335d4af69d61aaac5e5a | 646 | py | Python | ubiops_cli/constants.py | UbiOps/command-line-interface | 598cd78de1984bd7721101e9842f5ba7d16ead30 | [
"Apache-2.0"
] | 1 | 2021-09-12T17:28:46.000Z | 2021-09-12T17:28:46.000Z | ubiops_cli/constants.py | UbiOps/command-line-interface | 598cd78de1984bd7721101e9842f5ba7d16ead30 | [
"Apache-2.0"
] | null | null | null | ubiops_cli/constants.py | UbiOps/command-line-interface | 598cd78de1984bd7721101e9842f5ba7d16ead30 | [
"Apache-2.0"
] | 1 | 2021-09-12T17:28:48.000Z | 2021-09-12T17:28:48.000Z | STRUCTURED_TYPE = 'structured'
ML_MODEL_FILE_NAME_KEY = "ML_MODEL_FILE_NAME"
ML_MODEL_FILE_NAME_VALUE = "model"
SYS_DEPLOYMENT_FILE_NAME_KEY = "SYS_DEPLOYMENT_FILE_NAME"
SYS_DEPLOYMENT_FILE_NAME_VALUE = "deployment"
STATUS_UNAVAILABLE = 'unavailable'
SUCCESS_STATUSES = ['completed', 'available', 'success']
WARNING_STATUSES = ['queued', 'pending', 'processing', 'building', 'validating', 'deploying', 'running',
'confirmation', 'confirmation_pending']
ERROR_STATUSES = ['failed', 'cancelled_pending', 'cancelled']
DEFAULT_IGNORE_FILE = '.ubiops-ignore'
UPDATE_TIME = 30 # seconds to wait between update and new zip upload
| 43.066667 | 104 | 0.76161 | STRUCTURED_TYPE = 'structured'
ML_MODEL_FILE_NAME_KEY = "ML_MODEL_FILE_NAME"
ML_MODEL_FILE_NAME_VALUE = "model"
SYS_DEPLOYMENT_FILE_NAME_KEY = "SYS_DEPLOYMENT_FILE_NAME"
SYS_DEPLOYMENT_FILE_NAME_VALUE = "deployment"
STATUS_UNAVAILABLE = 'unavailable'
SUCCESS_STATUSES = ['completed', 'available', 'success']
WARNING_STATUSES = ['queued', 'pending', 'processing', 'building', 'validating', 'deploying', 'running',
'confirmation', 'confirmation_pending']
ERROR_STATUSES = ['failed', 'cancelled_pending', 'cancelled']
DEFAULT_IGNORE_FILE = '.ubiops-ignore'
UPDATE_TIME = 30 # seconds to wait between update and new zip upload
| 0 | 0 | 0 |
99ac055c3fc124fc648750cc08faa1cfc1aaaa64 | 3,676 | py | Python | divulga/migrations/0001_initial.py | SaviorsServices/CommunityService | 2fc6a838b469856f8e8e6315be70a083690d1c60 | [
"MIT"
] | null | null | null | divulga/migrations/0001_initial.py | SaviorsServices/CommunityService | 2fc6a838b469856f8e8e6315be70a083690d1c60 | [
"MIT"
] | 22 | 2018-04-03T23:24:48.000Z | 2018-06-27T13:46:34.000Z | divulga/migrations/0001_initial.py | SaviorsServices/ComunityService | 2fc6a838b469856f8e8e6315be70a083690d1c60 | [
"MIT"
] | 3 | 2018-04-04T12:54:37.000Z | 2018-09-13T00:40:13.000Z | # Generated by Django 2.0.3 on 2018-06-14 17:03
from django.db import migrations, models
import django.db.models.deletion
| 43.247059 | 221 | 0.564472 | # Generated by Django 2.0.3 on 2018-06-14 17:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='CommunityAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('start_date', models.DateField()),
('close_date', models.DateField()),
('description', models.CharField(max_length=500)),
('category', models.CharField(choices=[('SAUDE', 'Saude'), ('EDUCACIONAL', 'Educacional'), ('CORTE DE CABELO', 'Corte de cabelo'), ('OUTRA CATEGORIA', 'Outra Categoria')], default='SAUDE', max_length=40)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
migrations.CreateModel(
name='Establishment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('openHour', models.TimeField()),
('closeHour', models.TimeField()),
('telefone', models.CharField(max_length=15)),
('cidade', models.CharField(max_length=30)),
('bairro', models.CharField(max_length=50)),
('endereco', models.CharField(max_length=140)),
('cep', models.CharField(max_length=10)),
('email', models.CharField(max_length=140)),
],
),
migrations.CreateModel(
name='Donation',
fields=[
('communityaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='divulga.CommunityAction')),
('item_name', models.CharField(max_length=50)),
('amount', models.IntegerField()),
('email', models.CharField(max_length=140)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('divulga.communityaction',),
),
migrations.CreateModel(
name='HealthService',
fields=[
('communityaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='divulga.CommunityAction')),
('start_hour', models.TimeField()),
('close_hour', models.TimeField()),
('email', models.CharField(max_length=140)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('divulga.communityaction',),
),
migrations.AddField(
model_name='communityaction',
name='establishment',
field=models.ManyToManyField(to='divulga.Establishment'),
),
migrations.AddField(
model_name='communityaction',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_divulga.communityaction_set+', to='contenttypes.ContentType'),
),
]
| 0 | 3,529 | 23 |
d9f1b2b979157bf825fddc752ed273ab8c4cc826 | 1,782 | py | Python | scripts/cscap/syncauth_sites2drive.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 5 | 2017-05-20T04:51:55.000Z | 2022-03-07T18:55:27.000Z | scripts/cscap/syncauth_sites2drive.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 275 | 2017-03-09T20:31:30.000Z | 2022-03-30T22:43:47.000Z | scripts/cscap/syncauth_sites2drive.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 3 | 2020-06-01T15:03:06.000Z | 2021-02-01T13:46:58.000Z | """
Sync authorized users on Google Sites to Google Drive
"""
import pyiem.cscap_utils as util
import gdata.gauth
import gdata.sites.client as sclient
config = util.get_config()
def get_sites_client(config, site="sustainablecorn"):
"""Return an authorized sites client"""
token = gdata.gauth.OAuth2Token(
client_id=config["appauth"]["client_id"],
client_secret=config["appauth"]["app_secret"],
user_agent="daryl.testing",
scope=config["googleauth"]["scopes"],
refresh_token=config["googleauth"]["refresh_token"],
)
sites_client = sclient.SitesClient(site=site)
token.authorize(sites_client)
return sites_client
spr_client = get_sites_client(config)
service = util.get_driveclient(config)
site_users = []
for acl in spr_client.get_acl_feed().entry:
userid = acl.scope.value
if userid not in site_users:
site_users.append(acl.scope.value)
# Get a listing of current permissions
perms = (
service.permissions().list(fileId=config["cscap"]["folderkey"]).execute()
)
for item in perms.get("items", []):
email = item["emailAddress"]
if email in site_users:
site_users.remove(email)
continue
print("Email: %s can access Drive, not sites" % (email,))
for loser in site_users:
print(loser)
# continue
id_resp = service.permissions().getIdForEmail(email=loser).execute()
id2 = id_resp["id"]
print(
("Adding %s[%s] as writer to CSCAP Internal Documents Collection")
% (loser, id2)
)
newperm = dict(
id=id2, type="user", role="writer", sendNotificationEmails=False
)
res = (
service.permissions()
.insert(fileId=config["cscap"]["folderkey"], body=newperm)
.execute()
)
print(res)
| 27.415385 | 77 | 0.666105 | """
Sync authorized users on Google Sites to Google Drive
"""
import pyiem.cscap_utils as util
import gdata.gauth
import gdata.sites.client as sclient
config = util.get_config()
def get_sites_client(config, site="sustainablecorn"):
"""Return an authorized sites client"""
token = gdata.gauth.OAuth2Token(
client_id=config["appauth"]["client_id"],
client_secret=config["appauth"]["app_secret"],
user_agent="daryl.testing",
scope=config["googleauth"]["scopes"],
refresh_token=config["googleauth"]["refresh_token"],
)
sites_client = sclient.SitesClient(site=site)
token.authorize(sites_client)
return sites_client
spr_client = get_sites_client(config)
service = util.get_driveclient(config)
site_users = []
for acl in spr_client.get_acl_feed().entry:
userid = acl.scope.value
if userid not in site_users:
site_users.append(acl.scope.value)
# Get a listing of current permissions
perms = (
service.permissions().list(fileId=config["cscap"]["folderkey"]).execute()
)
for item in perms.get("items", []):
email = item["emailAddress"]
if email in site_users:
site_users.remove(email)
continue
print("Email: %s can access Drive, not sites" % (email,))
for loser in site_users:
print(loser)
# continue
id_resp = service.permissions().getIdForEmail(email=loser).execute()
id2 = id_resp["id"]
print(
("Adding %s[%s] as writer to CSCAP Internal Documents Collection")
% (loser, id2)
)
newperm = dict(
id=id2, type="user", role="writer", sendNotificationEmails=False
)
res = (
service.permissions()
.insert(fileId=config["cscap"]["folderkey"], body=newperm)
.execute()
)
print(res)
| 0 | 0 | 0 |
a2f8bf7a0d8cafbf30e38045388953f85c506d7a | 518 | py | Python | src/level2/압축.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | 1 | 2021-01-03T13:01:33.000Z | 2021-01-03T13:01:33.000Z | src/level2/압축.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | src/level2/압축.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | from string import ascii_uppercase as au
if __name__ == '__main__':
print(solution("KAKAO")) | 27.263158 | 62 | 0.486486 | from string import ascii_uppercase as au
def solution(msg):
vocab = {word:idx for idx, word in enumerate(au, start=1)}
answer, i = [], 0
while i < len(msg):
for j in range(len(msg), i, -1):
if msg[i:j] in vocab:
if j < len(msg):
vocab[msg[i:j+1]] = len(vocab) + 1
answer.append(vocab[msg[i:j]])
i = j - 1
break
i += 1
return answer
if __name__ == '__main__':
print(solution("KAKAO")) | 398 | 0 | 23 |
01cdb696da3f8e18e71e8702db8a80f9fa01696b | 2,515 | py | Python | dapricot/auth/models.py | softapr/django_apricot | 911b6627a5ffaf3f7b13a099ca129f3a2ffda558 | [
"BSD-3-Clause"
] | null | null | null | dapricot/auth/models.py | softapr/django_apricot | 911b6627a5ffaf3f7b13a099ca129f3a2ffda558 | [
"BSD-3-Clause"
] | null | null | null | dapricot/auth/models.py | softapr/django_apricot | 911b6627a5ffaf3f7b13a099ca129f3a2ffda558 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.core.mail import send_mail
from django.contrib.auth import models as auth_models
from django.contrib.auth.base_user import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .managers import UserManager | 35.422535 | 79 | 0.630616 | from django.db import models
from django.core.mail import send_mail
from django.contrib.auth import models as auth_models
from django.contrib.auth.base_user import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .managers import UserManager
class Group(auth_models.Group):
class Meta:
db_table = 'dapricot_auth_group'
class PermissionsMixin(auth_models.PermissionsMixin):
groups = models.ManyToManyField(
Group,
verbose_name=_('groups'),
blank=True,
help_text=_(
'The groups this user belongs to. A user will get all permissions '
'granted to each of their groups.'
),
related_name="user_set",
related_query_name="user",
)
class Meta:
db_table = 'dapricot_auth_permissionsmixin'
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique=True)
username = models.CharField(_('username'), max_length=30, unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True)
is_active = models.BooleanField(_('active'), default=True)
is_staff = models.BooleanField(_('is staff'), default=False)
avatar = models.ImageField(upload_to='dapricot/avatars/',
null=True,
blank=True)
objects = UserManager()
USERNAME_FIELD = settings.USERNAME_FIELD
if settings.USERNAME_FIELD=='email':
REQUIRED_FIELDS = ['username']
elif settings.USERNAME_FIELD=='username':
REQUIRED_FIELDS = ['email']
class Meta:
db_table = 'dapricot_auth_user'
def get_full_name(self):
'''
Returns the first_name plus the last_name, with a space in between.
'''
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
'''
Returns the short name for the user.
'''
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
'''
Sends an email to this User.
'''
send_mail(subject, message, from_email, [self.email], **kwargs) | 0 | 2,068 | 77 |
6efcb966022fbb32b64b3c3f2d0f3ec814d5eda6 | 9,686 | py | Python | test.py | snowymo/PerceptualSimilarity | d1fe323db9a3b33c1d6749f3bbc1365aebe5ae70 | [
"BSD-2-Clause"
] | null | null | null | test.py | snowymo/PerceptualSimilarity | d1fe323db9a3b33c1d6749f3bbc1365aebe5ae70 | [
"BSD-2-Clause"
] | null | null | null | test.py | snowymo/PerceptualSimilarity | d1fe323db9a3b33c1d6749f3bbc1365aebe5ae70 | [
"BSD-2-Clause"
] | null | null | null | import torch
import lpips
# from IPython import embed
import cv2
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import re
use_gpu = False # Whether to use GPU
spatial = True # Return a spatial map of perceptual distance.
# Linearly calibrated models (LPIPS)
loss_fn = lpips.LPIPS(net='alex', spatial=spatial) # Can also set net = 'squeeze' or 'vgg'
# loss_fn = lpips.LPIPS(net='alex', spatial=spatial, lpips=False) # Can also set net = 'squeeze' or 'vgg'
if (use_gpu):
loss_fn.cuda()
## Example usage with dummy tensors
dummy_im0 = torch.zeros(1, 3, 64, 64) # image should be RGB, normalized to [-1,1]
dummy_im1 = torch.zeros(1, 3, 64, 64)
if (use_gpu):
dummy_im0 = dummy_im0.cuda()
dummy_im1 = dummy_im1.cuda()
dist = loss_fn.forward(dummy_im0, dummy_im1)
## Example usage with images
# replace with ours
# read image and load them
scenes=["lobby", "stones", "barbershop", "classroom"]
scenes=["stones"]
# scenes=["mc", "gallery"]
# scenes=["lobby"]
imgCount = 25
step = 5
fovCount = int((110-5)/step)
result = np.empty(shape=(fovCount * len(scenes) + 2, 2 + imgCount * 5))
result[0, 0] = 0 #scene id
result[0, 1] = 368 #fov
# result[0, 2] = 687 #our
# result[0, 3] = 6373 #nerf
# result[0, 4] = 36832 #fovea
result4curve = np.empty(shape = (imgCount*fovCount*len(scenes), 6))
# f=open('lpips_result_fova_anova.csv','a')
anova = np.empty(shape=(len(scenes)*imgCount+1, 2 + 3 * fovCount)) # 4*fovCount previously
# anova[0,0] = "scene"
# anova[0,1] = "imgid"
# for i in range(5,110,5):
# anova[0, 2 + 3 * (int(i / 5) - 1)] = "our-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 1] = "nerf-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 2] = "fovea-" + str(i)
for sceneID, scene in enumerate(scenes):
# if scene == "lobby":
# continue
# # if scene == "mc":
# # continue
# if sceneID < 4:
# continue
folder = './figs/' + scene
imgs_gt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt')
if re.match(r'view_[0-9]+.png', f)]
imgs_our = [os.path.join(folder + '_our', f) for f in listdir(folder + '_our') if isfile(join(folder + '_our', f))]
imgs_nerf = [os.path.join(folder + '_nerf', f) for f in listdir(folder + '_nerf') if isfile(join(folder + '_nerf', f))]
imgs_fgt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt') if re.match(r'view_[0-9]+_RT_k3.0.png', f)]
print(len(imgs_our),len(imgs_nerf))
for imgID in range(imgCount):
anova[sceneID * imgCount + imgID + 1, 0] = sceneID
anova[sceneID * imgCount + imgID + 1, 1] = imgID
# img_gt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '.png')
# G:\My Drive\eval_mono\mono\ref_as_left_eye\
img_gt = cv2.imread(imgs_gt[imgID])
img_our = cv2.imread(imgs_our[imgID])
img_nerf = cv2.imread(imgs_nerf[imgID])
# fgtpath = folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png'
# img_fgt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
img_fgt = cv2.imread(imgs_fgt[imgID])
print(imgs_fgt[imgID],imgs_our[imgID],imgs_nerf[imgID])
# img_our_left = cv2.imread('./imgs/eval_mono/ref_as_left_eye/'+scene+'/view' + f'{imgID:04d}' + '_blended.png')
# img_our_right = cv2.imread(
# './imgs/eval_mono/ref_as_right_eye/' + scene + '/view' + f'{imgID:04d}' + '_blended.png')
# img_nerf = cv2.imread('./imgs/NeRF_'+scene+'/' + f'{imgID:03d}' + '.png')
# img_gtfova = cv2.imread('./imgs/gt_'+scene+'/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
# print('./imgs/gt_' + scene + '/view_' + f'{imgID:04d}' + '.png')
print(img_gt.shape)
# print(img_gtfova.shape)
height, width = img_gt.shape[:2]
for fov in range(5,110,step):
rect_top = height / 2 - float(fov) / 110.0 * height / 2
rect_top = int(rect_top)
rect_btm = height / 2 + float(fov) / 110.0 * height / 2
rect_btm = int(rect_btm)
rect_left = width / 2 - float(fov) / 110.0 * width / 2
rect_left = int(rect_left)
rect_right = width / 2 + float(fov) / 110.0 * width / 2
rect_right = int(rect_right)
# print(rect_top,rect_btm,rect_left,rect_right)
crop_img_gt = img_gt[rect_top:rect_btm, rect_left:rect_right]
ex_ref = lpips.im2tensor(crop_img_gt[:,:,::-1])
crop_img_our = img_our[rect_top:rect_btm, rect_left:rect_right]
ex_p0 = lpips.im2tensor(crop_img_our[:, :, ::-1])
# crop_img_our_left = img_our_left[rect_top:rect_btm, rect_left:rect_right]
# ex_p0l = lpips.im2tensor(crop_img_our_left[:,:,::-1])
# crop_img_our_right = img_our_right[rect_top:rect_btm, rect_left:rect_right]
# ex_p0r = lpips.im2tensor(crop_img_our_right[:, :, ::-1])
crop_img_nerf = img_nerf[rect_top:rect_btm, rect_left:rect_right]
ex_p1 = lpips.im2tensor(crop_img_nerf[:,:,::-1])
# crop_img_gt_fova = img_gtfova[rect_top:rect_btm, rect_left:rect_right]
crop_img_fgt = img_fgt[rect_top:rect_btm, rect_left:rect_right]
ex_p2 = lpips.im2tensor(crop_img_fgt[:, :, ::-1])
if (use_gpu):
ex_ref = ex_ref.cuda()
ex_p0 = ex_p0.cuda()
# ex_p0l = ex_p0l.cuda()
# ex_p0r = ex_p0r.cuda()
ex_p1 = ex_p1.cuda()
ex_p2 = ex_p2.cuda()
ex_d0 = loss_fn.forward(ex_ref, ex_p0)
# ex_d0l = loss_fn.forward(ex_ref, ex_p0l)
# ex_d0r = loss_fn.forward(ex_ref, ex_p0r)
ex_d1 = loss_fn.forward(ex_ref, ex_p1)
ex_d2 = loss_fn.forward(ex_ref, ex_p2)
if not spatial:
# print('SPATIAL fov %d Distances: OUR %.3f, NERF %.3f' % (fov, ex_d0, ex_d1))
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (fov, ex_d0, ex_d1, ex_d2))
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0] = sceneID # scene id
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 1] = fov
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0 * imgCount + 2+imgID] = ex_d0
result[sceneID*fovCount + imgID * 21 + int(fov / 5 - 1) + 1, 1 * imgCount + 2+imgID] = ex_d1
result[sceneID * fovCount + int(fov / 5 - 1) + 1, 2 * imgCount + 2+imgID] = ex_d2
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)] = ex_d0
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+1] = ex_d1
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+2] = ex_d2
result4curve[sceneID * fovCount * imgCount + imgID * fovCount + int(fov / step - 1)] = [sceneID, fov, ex_d0, ex_d1, ex_d2, imgID]
else:
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (
fov, ex_d0.mean(), ex_d1.mean(), ex_d2.mean())) # The mean distance is approximately the same as the non-spatial distance
exd0mean = ex_d0.mean()
exd1mean = ex_d1.mean()
exd2mean = ex_d2.mean()
# print('fov %d Distances: OUR %.3f,, NeRF %.3f' % (
# fov, ex_d0.mean(), ex_d1.mean(),
# )) # The mean distance is approximately the same as the non-spatial distance
result[sceneID * fovCount + int((fov-5) / step) + 1, 0] = sceneID # scene id
result[sceneID * fovCount + int((fov-5) / step) + 1, 1] = fov
result[sceneID * fovCount + int((fov-5) / step) + 1, 0 * imgCount + 2 + imgID] = ex_d0.mean()
# result[sceneID * fovCount + int((fov - 5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d0r.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d1.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 2 * imgCount + 2+imgID] = ex_d2.mean()
fovidx = int((fov-5) / step)
anova[sceneID * imgCount + imgID + 1, 2 +fovidx ] = ex_d0.mean()
# anova[sceneID * imgCount + imgID + 1, 2 + fovCount + fovidx] = ex_d0r.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 1 + fovidx] = ex_d1.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 2 + fovidx] = ex_d2.mean()
# DEBUG
idx = sceneID * fovCount * imgCount + imgID * fovCount + int((fov-5) / step)
# print("idx",idx,"result4curve.shape",result4curve.shape)
# print(sceneID, fov, exd0mean, exd0mean.detach().numpy(), imgID)
result4curve[idx] = [sceneID, fov, exd0mean.detach().numpy(), exd1mean.detach().numpy(), exd2mean.detach().numpy(), imgID]
# Visualize a spatially-varying distance map between ex_p0 and ex_ref
# import pylab
# pylab.imshow(ex_d0[0, 0, ...].data.cpu().numpy())
# pylab.show()
# np.savetxt(f, anova[(sceneID) * 8+1:sceneID * 8+9], delimiter=',')
np.savetxt('lpips_curve_125_' + scene + '.csv', result4curve, delimiter=',')
np.savetxt('lpips_fov_125_' + scene+'.csv', result, delimiter=',')
np.savetxt('lpips_anova_125_' + scene+'.csv', anova, delimiter=',')
# crop_img = img[y:y+h, x:x+w]
# ex_ref = lpips.im2tensor(lpips.load_image('./imgs/ex_ref.png'))
# ex_p0 = lpips.im2tensor(lpips.load_image('./imgs/ex_p0.png'))
# ex_p1 = lpips.im2tensor(lpips.load_image('./imgs/ex_p1.png'))
| 51.248677 | 145 | 0.571753 | import torch
import lpips
# from IPython import embed
import cv2
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import re
use_gpu = False # Whether to use GPU
spatial = True # Return a spatial map of perceptual distance.
# Linearly calibrated models (LPIPS)
loss_fn = lpips.LPIPS(net='alex', spatial=spatial) # Can also set net = 'squeeze' or 'vgg'
# loss_fn = lpips.LPIPS(net='alex', spatial=spatial, lpips=False) # Can also set net = 'squeeze' or 'vgg'
if (use_gpu):
loss_fn.cuda()
## Example usage with dummy tensors
dummy_im0 = torch.zeros(1, 3, 64, 64) # image should be RGB, normalized to [-1,1]
dummy_im1 = torch.zeros(1, 3, 64, 64)
if (use_gpu):
dummy_im0 = dummy_im0.cuda()
dummy_im1 = dummy_im1.cuda()
dist = loss_fn.forward(dummy_im0, dummy_im1)
## Example usage with images
# replace with ours
# read image and load them
scenes=["lobby", "stones", "barbershop", "classroom"]
scenes=["stones"]
# scenes=["mc", "gallery"]
# scenes=["lobby"]
imgCount = 25
step = 5
fovCount = int((110-5)/step)
result = np.empty(shape=(fovCount * len(scenes) + 2, 2 + imgCount * 5))
result[0, 0] = 0 #scene id
result[0, 1] = 368 #fov
# result[0, 2] = 687 #our
# result[0, 3] = 6373 #nerf
# result[0, 4] = 36832 #fovea
result4curve = np.empty(shape = (imgCount*fovCount*len(scenes), 6))
# f=open('lpips_result_fova_anova.csv','a')
anova = np.empty(shape=(len(scenes)*imgCount+1, 2 + 3 * fovCount)) # 4*fovCount previously
# anova[0,0] = "scene"
# anova[0,1] = "imgid"
# for i in range(5,110,5):
# anova[0, 2 + 3 * (int(i / 5) - 1)] = "our-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 1] = "nerf-" + str(i)
# anova[0, 2 + 3 * (int(i / 5) - 1) + 2] = "fovea-" + str(i)
for sceneID, scene in enumerate(scenes):
# if scene == "lobby":
# continue
# # if scene == "mc":
# # continue
# if sceneID < 4:
# continue
folder = './figs/' + scene
imgs_gt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt')
if re.match(r'view_[0-9]+.png', f)]
imgs_our = [os.path.join(folder + '_our', f) for f in listdir(folder + '_our') if isfile(join(folder + '_our', f))]
imgs_nerf = [os.path.join(folder + '_nerf', f) for f in listdir(folder + '_nerf') if isfile(join(folder + '_nerf', f))]
imgs_fgt = [os.path.join(folder + '_gt', f) for f in listdir(folder + '_gt') if re.match(r'view_[0-9]+_RT_k3.0.png', f)]
print(len(imgs_our),len(imgs_nerf))
for imgID in range(imgCount):
anova[sceneID * imgCount + imgID + 1, 0] = sceneID
anova[sceneID * imgCount + imgID + 1, 1] = imgID
# img_gt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '.png')
# G:\My Drive\eval_mono\mono\ref_as_left_eye\
img_gt = cv2.imread(imgs_gt[imgID])
img_our = cv2.imread(imgs_our[imgID])
img_nerf = cv2.imread(imgs_nerf[imgID])
# fgtpath = folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png'
# img_fgt = cv2.imread(folder + '_gt' + '/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
img_fgt = cv2.imread(imgs_fgt[imgID])
print(imgs_fgt[imgID],imgs_our[imgID],imgs_nerf[imgID])
# img_our_left = cv2.imread('./imgs/eval_mono/ref_as_left_eye/'+scene+'/view' + f'{imgID:04d}' + '_blended.png')
# img_our_right = cv2.imread(
# './imgs/eval_mono/ref_as_right_eye/' + scene + '/view' + f'{imgID:04d}' + '_blended.png')
# img_nerf = cv2.imread('./imgs/NeRF_'+scene+'/' + f'{imgID:03d}' + '.png')
# img_gtfova = cv2.imread('./imgs/gt_'+scene+'/view_' + f'{imgID:04d}' + '_RT_k3.0.png')
# print('./imgs/gt_' + scene + '/view_' + f'{imgID:04d}' + '.png')
print(img_gt.shape)
# print(img_gtfova.shape)
height, width = img_gt.shape[:2]
for fov in range(5,110,step):
rect_top = height / 2 - float(fov) / 110.0 * height / 2
rect_top = int(rect_top)
rect_btm = height / 2 + float(fov) / 110.0 * height / 2
rect_btm = int(rect_btm)
rect_left = width / 2 - float(fov) / 110.0 * width / 2
rect_left = int(rect_left)
rect_right = width / 2 + float(fov) / 110.0 * width / 2
rect_right = int(rect_right)
# print(rect_top,rect_btm,rect_left,rect_right)
crop_img_gt = img_gt[rect_top:rect_btm, rect_left:rect_right]
ex_ref = lpips.im2tensor(crop_img_gt[:,:,::-1])
crop_img_our = img_our[rect_top:rect_btm, rect_left:rect_right]
ex_p0 = lpips.im2tensor(crop_img_our[:, :, ::-1])
# crop_img_our_left = img_our_left[rect_top:rect_btm, rect_left:rect_right]
# ex_p0l = lpips.im2tensor(crop_img_our_left[:,:,::-1])
# crop_img_our_right = img_our_right[rect_top:rect_btm, rect_left:rect_right]
# ex_p0r = lpips.im2tensor(crop_img_our_right[:, :, ::-1])
crop_img_nerf = img_nerf[rect_top:rect_btm, rect_left:rect_right]
ex_p1 = lpips.im2tensor(crop_img_nerf[:,:,::-1])
# crop_img_gt_fova = img_gtfova[rect_top:rect_btm, rect_left:rect_right]
crop_img_fgt = img_fgt[rect_top:rect_btm, rect_left:rect_right]
ex_p2 = lpips.im2tensor(crop_img_fgt[:, :, ::-1])
if (use_gpu):
ex_ref = ex_ref.cuda()
ex_p0 = ex_p0.cuda()
# ex_p0l = ex_p0l.cuda()
# ex_p0r = ex_p0r.cuda()
ex_p1 = ex_p1.cuda()
ex_p2 = ex_p2.cuda()
ex_d0 = loss_fn.forward(ex_ref, ex_p0)
# ex_d0l = loss_fn.forward(ex_ref, ex_p0l)
# ex_d0r = loss_fn.forward(ex_ref, ex_p0r)
ex_d1 = loss_fn.forward(ex_ref, ex_p1)
ex_d2 = loss_fn.forward(ex_ref, ex_p2)
if not spatial:
# print('SPATIAL fov %d Distances: OUR %.3f, NERF %.3f' % (fov, ex_d0, ex_d1))
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (fov, ex_d0, ex_d1, ex_d2))
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0] = sceneID # scene id
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 1] = fov
result[sceneID*fovCount + int(fov / 5 - 1) + 1, 0 * imgCount + 2+imgID] = ex_d0
result[sceneID*fovCount + imgID * 21 + int(fov / 5 - 1) + 1, 1 * imgCount + 2+imgID] = ex_d1
result[sceneID * fovCount + int(fov / 5 - 1) + 1, 2 * imgCount + 2+imgID] = ex_d2
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)] = ex_d0
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+1] = ex_d1
anova[sceneID * imgCount + imgID + 1, 2 + 3 * int(fov / 5 - 1)+2] = ex_d2
result4curve[sceneID * fovCount * imgCount + imgID * fovCount + int(fov / step - 1)] = [sceneID, fov, ex_d0, ex_d1, ex_d2, imgID]
else:
print('fov %d Distances: OUR %.3f, NeRF %.3f, FOVA %.3f' % (
fov, ex_d0.mean(), ex_d1.mean(), ex_d2.mean())) # The mean distance is approximately the same as the non-spatial distance
exd0mean = ex_d0.mean()
exd1mean = ex_d1.mean()
exd2mean = ex_d2.mean()
# print('fov %d Distances: OUR %.3f,, NeRF %.3f' % (
# fov, ex_d0.mean(), ex_d1.mean(),
# )) # The mean distance is approximately the same as the non-spatial distance
result[sceneID * fovCount + int((fov-5) / step) + 1, 0] = sceneID # scene id
result[sceneID * fovCount + int((fov-5) / step) + 1, 1] = fov
result[sceneID * fovCount + int((fov-5) / step) + 1, 0 * imgCount + 2 + imgID] = ex_d0.mean()
# result[sceneID * fovCount + int((fov - 5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d0r.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 1 * imgCount + 2 + imgID] = ex_d1.mean()
result[sceneID * fovCount + int((fov-5) / step) + 1, 2 * imgCount + 2+imgID] = ex_d2.mean()
fovidx = int((fov-5) / step)
anova[sceneID * imgCount + imgID + 1, 2 +fovidx ] = ex_d0.mean()
# anova[sceneID * imgCount + imgID + 1, 2 + fovCount + fovidx] = ex_d0r.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 1 + fovidx] = ex_d1.mean()
anova[sceneID * imgCount + imgID + 1, 2 + fovCount * 2 + fovidx] = ex_d2.mean()
# DEBUG
idx = sceneID * fovCount * imgCount + imgID * fovCount + int((fov-5) / step)
# print("idx",idx,"result4curve.shape",result4curve.shape)
# print(sceneID, fov, exd0mean, exd0mean.detach().numpy(), imgID)
result4curve[idx] = [sceneID, fov, exd0mean.detach().numpy(), exd1mean.detach().numpy(), exd2mean.detach().numpy(), imgID]
# Visualize a spatially-varying distance map between ex_p0 and ex_ref
# import pylab
# pylab.imshow(ex_d0[0, 0, ...].data.cpu().numpy())
# pylab.show()
# np.savetxt(f, anova[(sceneID) * 8+1:sceneID * 8+9], delimiter=',')
np.savetxt('lpips_curve_125_' + scene + '.csv', result4curve, delimiter=',')
np.savetxt('lpips_fov_125_' + scene+'.csv', result, delimiter=',')
np.savetxt('lpips_anova_125_' + scene+'.csv', anova, delimiter=',')
# crop_img = img[y:y+h, x:x+w]
# ex_ref = lpips.im2tensor(lpips.load_image('./imgs/ex_ref.png'))
# ex_p0 = lpips.im2tensor(lpips.load_image('./imgs/ex_p0.png'))
# ex_p1 = lpips.im2tensor(lpips.load_image('./imgs/ex_p1.png'))
| 0 | 0 | 0 |
19d8276c955ff42989715272f497d31975832189 | 3,434 | py | Python | scrape.py | metakgp/twerp | 57cc4b87bff2ce52f5b67765d9b961b9e94b3023 | [
"MIT"
] | 5 | 2018-11-18T10:31:59.000Z | 2020-09-02T04:55:36.000Z | scrape.py | metakgp/twerp | 57cc4b87bff2ce52f5b67765d9b961b9e94b3023 | [
"MIT"
] | 9 | 2018-11-16T17:11:33.000Z | 2021-04-30T21:01:42.000Z | scrape.py | metakgp/twerp | 57cc4b87bff2ce52f5b67765d9b961b9e94b3023 | [
"MIT"
] | 7 | 2018-11-07T06:57:50.000Z | 2020-09-11T22:09:10.000Z | from __future__ import print_function
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import json
# from google.auth.transport.requests import Request
# Better to use read only scope for not modifying the contents accidentally
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
RANGE_NAME = "A:Z"
# If run this function directly then it will generate two filders in the PWD
# one containing the data (modified, see below) from sheets API and the other
# containing the required dict
def convert_form_to_dict(SPREADSHEET_ID):
"""Uses sheets API to obtain result
Returns the required formatted list containing
nested dicts of responses obtained from the google sheet
"""
creds = None
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME,
majorDimension='ROWS').execute()
values = result.get('values', [])
# Sheets api removing trailing empty spaces in the result
# If somebody has not filled some columns at the end
# then it will not be there in the json data
# example only till column[2] is filled then the rest will not
# be there in the json data of the API
# We will add a "" instead
# This is not the case when some data between is missing inbetween
# for example column[2] is missing but onwards are filled
# sheet automatically adds "" in this case
for item in result['values']:
length = len(item)
if (length < 7):
while(length != 7):
item.append("")
length = length + 1
print('{} values received'.format(len(values)))
all_responses = []
# Obtaining all course codes and making primary keys in Dict, appending
# this into the list
# Also renaming headings as they are in the wiki
values[0][2] = 'Concepts taught in class'
values[0][3] = 'Student Opinion'
values[0][4] = 'How to Crack the Paper'
values[0][5] = 'Classroom resources'
for item in values[1:]:
dict_format = {}
dict_format['Course Code'] = item[1]
dict_format['Timestamp'] = []
for element in values[0][2:]:
dict_format[element] = []
all_responses.append(dict_format)
# filling all the data into the required course code
for item in values[1:]:
for course_data in all_responses:
if(course_data['Course Code'] == item[1]):
course_data['Timestamp'].append(item[0])
index = 2
# ignoring the empty entries
for element in values[0][2:]:
if(item[index] != ""):
course_data[element].append(item[index])
index = index + 1
break
total = [all_responses, result]
return total[0]
if __name__ == '__main__':
answer = convert_form_to_dict()
with open('result.json', 'w') as f:
json.dump(answer[1], f, indent=2)
with open('required_dict.json', 'w') as f:
json.dump(answer[0], f, indent=2)
| 37.326087 | 77 | 0.63279 | from __future__ import print_function
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import json
# from google.auth.transport.requests import Request
# Better to use read only scope for not modifying the contents accidentally
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
RANGE_NAME = "A:Z"
# If run this function directly then it will generate two filders in the PWD
# one containing the data (modified, see below) from sheets API and the other
# containing the required dict
def convert_form_to_dict(SPREADSHEET_ID):
"""Uses sheets API to obtain result
Returns the required formatted list containing
nested dicts of responses obtained from the google sheet
"""
creds = None
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME,
majorDimension='ROWS').execute()
values = result.get('values', [])
# Sheets api removing trailing empty spaces in the result
# If somebody has not filled some columns at the end
# then it will not be there in the json data
# example only till column[2] is filled then the rest will not
# be there in the json data of the API
# We will add a "" instead
# This is not the case when some data between is missing inbetween
# for example column[2] is missing but onwards are filled
# sheet automatically adds "" in this case
for item in result['values']:
length = len(item)
if (length < 7):
while(length != 7):
item.append("")
length = length + 1
print('{} values received'.format(len(values)))
all_responses = []
# Obtaining all course codes and making primary keys in Dict, appending
# this into the list
# Also renaming headings as they are in the wiki
values[0][2] = 'Concepts taught in class'
values[0][3] = 'Student Opinion'
values[0][4] = 'How to Crack the Paper'
values[0][5] = 'Classroom resources'
for item in values[1:]:
dict_format = {}
dict_format['Course Code'] = item[1]
dict_format['Timestamp'] = []
for element in values[0][2:]:
dict_format[element] = []
all_responses.append(dict_format)
# filling all the data into the required course code
for item in values[1:]:
for course_data in all_responses:
if(course_data['Course Code'] == item[1]):
course_data['Timestamp'].append(item[0])
index = 2
# ignoring the empty entries
for element in values[0][2:]:
if(item[index] != ""):
course_data[element].append(item[index])
index = index + 1
break
total = [all_responses, result]
return total[0]
if __name__ == '__main__':
answer = convert_form_to_dict()
with open('result.json', 'w') as f:
json.dump(answer[1], f, indent=2)
with open('required_dict.json', 'w') as f:
json.dump(answer[0], f, indent=2)
| 0 | 0 | 0 |
9575da2ebbfd269ab72b0f3a139e15828d50a8f7 | 1,359 | py | Python | pypreproc/customer.py | flyandlure/pypreproc | 3ec6d03fc54b453878a39059c9afaeae296e726d | [
"MIT"
] | 1 | 2020-06-12T06:04:33.000Z | 2020-06-12T06:04:33.000Z | pypreproc/customer.py | flyandlure/pypreproc | 3ec6d03fc54b453878a39059c9afaeae296e726d | [
"MIT"
] | null | null | null | pypreproc/customer.py | flyandlure/pypreproc | 3ec6d03fc54b453878a39059c9afaeae296e726d | [
"MIT"
] | null | null | null | """
Name: Functions for customer data
Developer: Matt Clarke
Date: Jan 1, 2020
Description: Specific functions for generating customer data including RFM scores.
"""
from lifetimes.utils import summary_data_from_transaction_data
def rfm_model(df, customer_column, date_column, monetary_column):
"""Return an RFM score for each customer using the Lifetimes RFM model.
This score is calculated across the whole DataFrame, so if you have a
customer with numerous orders, it will calculate one value and apply
it across all orders and won't calculate the figure historically.
Args:
:param df: Pandas DataFrame
:param monetary_column: Column containing monetary value of order
:param date_column: Column containing date
:param customer_column: Column containing customer
Returns:
New DataFrame containing RFM data by customer.
T is equal to days since first order and end of period.
Customers with 1 order will be assigned 0 for RFM scores.
"""
# Ensure that inf and NaN values are filled
rfm_df = summary_data_from_transaction_data(df,
customer_column,
date_column,
monetary_value_col=monetary_column)
return rfm_df
| 37.75 | 83 | 0.66961 | """
Name: Functions for customer data
Developer: Matt Clarke
Date: Jan 1, 2020
Description: Specific functions for generating customer data including RFM scores.
"""
from lifetimes.utils import summary_data_from_transaction_data
def rfm_model(df, customer_column, date_column, monetary_column):
"""Return an RFM score for each customer using the Lifetimes RFM model.
This score is calculated across the whole DataFrame, so if you have a
customer with numerous orders, it will calculate one value and apply
it across all orders and won't calculate the figure historically.
Args:
:param df: Pandas DataFrame
:param monetary_column: Column containing monetary value of order
:param date_column: Column containing date
:param customer_column: Column containing customer
Returns:
New DataFrame containing RFM data by customer.
T is equal to days since first order and end of period.
Customers with 1 order will be assigned 0 for RFM scores.
"""
# Ensure that inf and NaN values are filled
rfm_df = summary_data_from_transaction_data(df,
customer_column,
date_column,
monetary_value_col=monetary_column)
return rfm_df
| 0 | 0 | 0 |
d1c17995a7d9bd61b403c13951df1c862e2c5991 | 45 | py | Python | Python/flask_web/structure/app/api_1_0/users.py | castial/CodeSnippets | dba619949976261610869c6d928062196a50929e | [
"MIT"
] | 1 | 2020-06-03T20:50:02.000Z | 2020-06-03T20:50:02.000Z | Python/flask_web/structure/app/api_1_0/users.py | castial/CodeSnippets | dba619949976261610869c6d928062196a50929e | [
"MIT"
] | 5 | 2021-03-09T20:24:45.000Z | 2022-02-26T19:01:13.000Z | Python/flask_web/structure/app/api_1_0/users.py | castial/CodeSnippets | dba619949976261610869c6d928062196a50929e | [
"MIT"
] | null | null | null | from . import api
from flask import jsonify
| 11.25 | 25 | 0.777778 | from . import api
from flask import jsonify
| 0 | 0 | 0 |
d45387c491588d58756a3e28abdc034e0a6cce40 | 446 | py | Python | WhileLoop/Coins.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | 1 | 2020-09-22T13:25:34.000Z | 2020-09-22T13:25:34.000Z | WhileLoop/Coins.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | null | null | null | WhileLoop/Coins.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | 1 | 2020-10-17T09:27:46.000Z | 2020-10-17T09:27:46.000Z | sum = float(input())
counter_of_coins = 0
sum = int(sum*100)
counter_of_coins += sum // 200
sum = sum % 200
counter_of_coins += sum // 100
sum = sum % 100
counter_of_coins += sum // 50
sum = sum % 50
counter_of_coins += sum // 20
sum = sum % 20
counter_of_coins += sum // 10
sum = sum % 10
counter_of_coins += sum // 5
sum = sum % 5
counter_of_coins += sum // 2
sum = sum % 2
if sum == 1:
counter_of_coins += 1
print(int(counter_of_coins)) | 19.391304 | 30 | 0.654709 | sum = float(input())
counter_of_coins = 0
sum = int(sum*100)
counter_of_coins += sum // 200
sum = sum % 200
counter_of_coins += sum // 100
sum = sum % 100
counter_of_coins += sum // 50
sum = sum % 50
counter_of_coins += sum // 20
sum = sum % 20
counter_of_coins += sum // 10
sum = sum % 10
counter_of_coins += sum // 5
sum = sum % 5
counter_of_coins += sum // 2
sum = sum % 2
if sum == 1:
counter_of_coins += 1
print(int(counter_of_coins)) | 0 | 0 | 0 |
e6f28eb0470ba8fc41a20b0cff6ecee64fd44761 | 3,535 | py | Python | demo/live.py | zhenjiaa/ssd.pytorch | 4d72ae69edb4737dfa3eec4526f27967b1603fa5 | [
"MIT"
] | 4 | 2019-11-19T01:04:19.000Z | 2020-10-27T01:44:45.000Z | demo/live.py | zhenjiaa/ssd.pytorch | 4d72ae69edb4737dfa3eec4526f27967b1603fa5 | [
"MIT"
] | 1 | 2020-11-13T11:35:33.000Z | 2020-11-13T11:35:33.000Z | demo/live.py | zhenjiaa/ssd.pytorch | 4d72ae69edb4737dfa3eec4526f27967b1603fa5 | [
"MIT"
] | 1 | 2019-11-28T06:53:32.000Z | 2019-11-28T06:53:32.000Z | from __future__ import print_function
import torch
from torch.autograd import Variable
import cv2
import time
from imutils.video import FPS, WebcamVideoStream
import argparse
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--weights', default='weights/ssd_300_VOC0712.pth',
type=str, help='Trained state_dict file path')
parser.add_argument('--cuda', default=False, type=bool,
help='Use cuda in live demo')
args = parser.parse_args()
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
if __name__ == '__main__':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from data import BaseTransform, VOC_CLASSES as labelmap
from ssd import build_ssd
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \ CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
net = build_ssd('test', 300, 21) # initialize SSD
net.load_state_dict(torch.load(args.weights))
transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))
fps = FPS().start()
cv2_demo(net.eval(), transform)
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# cleanup
cv2.destroyAllWindows()
stream.stop()
| 34.320388 | 132 | 0.579632 | from __future__ import print_function
import torch
from torch.autograd import Variable
import cv2
import time
from imutils.video import FPS, WebcamVideoStream
import argparse
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--weights', default='weights/ssd_300_VOC0712.pth',
type=str, help='Trained state_dict file path')
parser.add_argument('--cuda', default=False, type=bool,
help='Use cuda in live demo')
args = parser.parse_args()
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
if args.cuda:
x = x.cuda()
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff
cv2.imshow('frame', frame)
if key2 == ord('p'): # resume
break
cv2.imshow('frame', frame)
if key == 27: # exit
break
if __name__ == '__main__':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from data import BaseTransform, VOC_CLASSES as labelmap
from ssd import build_ssd
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \ CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
net = build_ssd('test', 300, 21) # initialize SSD
net.load_state_dict(torch.load(args.weights))
transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))
fps = FPS().start()
cv2_demo(net.eval(), transform)
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# cleanup
cv2.destroyAllWindows()
stream.stop()
| 1,764 | 0 | 23 |
b3c5943e19fc570e4004d70db8a9256846b5bacd | 330 | py | Python | day5.py | jlucangelio/adventofcode-2017 | 339cd4488dd6b398a664845199e19ba38f173afa | [
"MIT"
] | null | null | null | day5.py | jlucangelio/adventofcode-2017 | 339cd4488dd6b398a664845199e19ba38f173afa | [
"MIT"
] | null | null | null | day5.py | jlucangelio/adventofcode-2017 | 339cd4488dd6b398a664845199e19ba38f173afa | [
"MIT"
] | null | null | null | INSTRS = []
with open("day5.input") as f:
INSTRS = [int(l.strip()) for l in f.readlines()]
# INSTRS = [0, 3, 0, 1, -3]
pc = 0
steps = 0
while pc >= 0 and pc < len(INSTRS):
offset = INSTRS[pc]
if offset >= 3:
INSTRS[pc] -= 1
else:
INSTRS[pc] += 1
pc += offset
steps += 1
print steps
| 15.714286 | 52 | 0.509091 | INSTRS = []
with open("day5.input") as f:
INSTRS = [int(l.strip()) for l in f.readlines()]
# INSTRS = [0, 3, 0, 1, -3]
pc = 0
steps = 0
while pc >= 0 and pc < len(INSTRS):
offset = INSTRS[pc]
if offset >= 3:
INSTRS[pc] -= 1
else:
INSTRS[pc] += 1
pc += offset
steps += 1
print steps
| 0 | 0 | 0 |
910c8547ee8fba31c4bfb590a2cd02fc18004210 | 12,827 | py | Python | env/lib/python2.7/site-packages/stripe/test/resources/test_customers.py | imran1234567/plutus | c964f18beb139de2645e052eb4c75a6bc0677029 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/stripe/test/resources/test_customers.py | imran1234567/plutus | c964f18beb139de2645e052eb4c75a6bc0677029 | [
"MIT"
] | 8 | 2019-06-10T19:43:54.000Z | 2021-11-15T17:48:16.000Z | Lib/site-packages/stripe/test/resources/test_customers.py | JulioCantu/IndiStore | 723c4ced800d43ffbfd34dc0ff7649b628008416 | [
"bzip2-1.0.6"
] | null | null | null | import datetime
import time
import warnings
import stripe
from stripe.test.helper import (StripeResourceTest, DUMMY_PLAN)
| 29.761021 | 78 | 0.536135 | import datetime
import time
import warnings
import stripe
from stripe.test.helper import (StripeResourceTest, DUMMY_PLAN)
class CustomerTest(StripeResourceTest):
def test_list_customers(self):
stripe.Customer.list()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/customers',
{},
)
def test_create_customer(self):
stripe.Customer.create(description="foo bar", source='tok_visa',
coupon='cu_discount', idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers',
{
'coupon': 'cu_discount',
'description': 'foo bar',
'source': 'tok_visa'
},
{'Idempotency-Key': 'foo'}
)
def test_unset_description(self):
customer = stripe.Customer(id="cus_unset_desc")
customer.description = "Hey"
customer.save(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_unset_desc',
{
'description': 'Hey',
},
{'Idempotency-Key': 'foo'}
)
def test_del_coupon(self):
customer = stripe.Customer(id="cus_unset_desc")
customer.description = "bar"
customer.coupon = "foo"
del customer.coupon
customer.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_unset_desc',
{
'description': 'bar'
},
None
)
def test_cannot_set_empty_string(self):
customer = stripe.Customer()
self.assertRaises(ValueError, setattr, customer, "description", "")
def test_customer_add_card(self):
customer = stripe.Customer.construct_from({
'id': 'cus_add_card',
'sources': {
'object': 'list',
'url': '/v1/customers/cus_add_card/sources',
},
}, 'api_key')
customer.sources.create(source='tok_visa', idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_add_card/sources',
{
'source': 'tok_visa',
},
{'Idempotency-Key': 'foo'}
)
def test_customer_add_source(self):
customer = stripe.Customer.construct_from({
'id': 'cus_add_source',
'sources': {
'object': 'list',
'url': '/v1/customers/cus_add_source/sources',
},
}, 'api_key')
customer.sources.create(source='tok_visa', idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_add_source/sources',
{
'source': 'tok_visa',
},
{'Idempotency-Key': 'foo'}
)
def test_customer_update_card(self):
card = stripe.Card.construct_from({
'customer': 'cus_update_card',
'id': 'ca_update_card',
}, 'api_key')
card.name = 'The Best'
card.save(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_update_card/sources/ca_update_card',
{
'name': 'The Best',
},
{'Idempotency-Key': 'foo'}
)
def test_customer_update_source(self):
source = stripe.BitcoinReceiver.construct_from({
'customer': 'cus_update_source',
'id': 'btcrcv_update_source',
}, 'api_key')
source.name = 'The Best'
source.save(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_update_source/sources/btcrcv_update_source',
{
'name': 'The Best',
},
{'Idempotency-Key': 'foo'}
)
def test_customer_update_alipay_account(self):
aa = stripe.AlipayAccount.construct_from({
'customer': 'cus_update_alipay',
'id': 'ali_update',
}, 'api_key')
aa.metadata = {'name': 'The Best'}
aa.save(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_update_alipay/sources/ali_update',
{
'metadata': {'name': 'The Best'},
},
{'Idempotency-Key': 'foo'}
)
def test_customer_delete_card(self):
card = stripe.Card.construct_from({
'customer': 'cus_delete_card',
'id': 'ca_delete_card',
}, 'api_key')
card.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_delete_card/sources/ca_delete_card',
{},
None
)
def test_customer_delete_source(self):
source = stripe.BitcoinReceiver.construct_from({
'customer': 'cus_delete_source',
'id': 'btcrcv_delete_source',
}, 'api_key')
source.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_delete_source/sources/btcrcv_delete_source',
{},
None
)
def test_customer_delete_alipay_account(self):
aa = stripe.AlipayAccount.construct_from({
'customer': 'cus_delete_alipay',
'id': 'ali_delete',
}, 'api_key')
aa.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_delete_alipay/sources/ali_delete',
{},
None
)
def test_customer_delete_bank_account(self):
source = stripe.BankAccount.construct_from({
'customer': 'cus_delete_source',
'id': 'ba_delete_source',
}, 'api_key')
source.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_delete_source/sources/ba_delete_source',
{},
None
)
def test_customer_verify_bank_account(self):
source = stripe.BankAccount.construct_from({
'customer': 'cus_verify_source',
'id': 'ba_verify_source',
}, 'api_key')
source.verify()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_verify_source/sources/ba_verify_source/verify',
{},
None
)
class CustomerPlanTest(StripeResourceTest):
def test_create_customer(self):
stripe.Customer.create(plan=DUMMY_PLAN['id'], source='tok_visa')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers',
{
'source': 'tok_visa',
'plan': DUMMY_PLAN['id'],
},
None
)
def test_legacy_update_subscription(self):
with warnings.catch_warnings(record=True) as w:
customer = stripe.Customer(id="cus_legacy_sub_update")
customer.update_subscription(idempotency_key='foo',
plan=DUMMY_PLAN['id'])
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_legacy_sub_update/subscription',
{
'plan': DUMMY_PLAN['id'],
},
{'Idempotency-Key': 'foo'}
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
def test_legacy_delete_subscription(self):
with warnings.catch_warnings(record=True) as w:
customer = stripe.Customer(id="cus_legacy_sub_delete")
customer.cancel_subscription()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_legacy_sub_delete/subscription',
{},
None
)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
def test_list_customer_subscriptions(self):
customer = stripe.Customer.construct_from({
'id': 'cus_foo',
'subscriptions': {
'object': 'list',
'url': 'v1/customers/cus_foo/subscriptions',
}
}, 'api_key')
customer.subscriptions.all()
self.requestor_mock.request.assert_called_with(
'get',
'v1/customers/cus_foo/subscriptions',
{},
None
)
def test_create_customer_subscription(self):
customer = stripe.Customer.construct_from({
'id': 'cus_sub_create',
'subscriptions': {
'object': 'list',
'url': '/v1/customers/cus_sub_create/subscriptions',
}
}, 'api_key')
customer.subscriptions.create(plan=DUMMY_PLAN['id'], coupon='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_sub_create/subscriptions',
{
'plan': DUMMY_PLAN['id'],
'coupon': 'foo',
},
None
)
def test_retrieve_customer_subscription(self):
customer = stripe.Customer.construct_from({
'id': 'cus_foo',
'subscriptions': {
'object': 'list',
'url': '/v1/customers/cus_foo/subscriptions',
}
}, 'api_key')
customer.subscriptions.retrieve('sub_cus')
self.requestor_mock.request.assert_called_with(
'get',
'/v1/customers/cus_foo/subscriptions/sub_cus',
{},
None
)
def test_update_customer_subscription(self):
subscription = stripe.Subscription.construct_from({
'id': "sub_update",
'customer': "cus_foo",
}, 'api_key')
trial_end_dttm = datetime.datetime.now() + datetime.timedelta(days=15)
trial_end_int = int(time.mktime(trial_end_dttm.timetuple()))
subscription.trial_end = trial_end_int
subscription.plan = DUMMY_PLAN['id']
subscription.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/subscriptions/sub_update',
{
'plan': DUMMY_PLAN['id'],
'trial_end': trial_end_int,
},
None
)
def test_delete_customer_subscription(self):
subscription = stripe.Subscription.construct_from({
'id': "sub_delete",
'customer': "cus_foo",
}, 'api_key')
subscription.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/subscriptions/sub_delete',
{},
None
)
class CustomerSourcesTests(StripeResourceTest):
def test_create_source(self):
stripe.Customer.create_source(
'cus_123',
source='tok_123'
)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_123/sources',
{'source': 'tok_123'},
None
)
def test_retrieve_source(self):
stripe.Customer.retrieve_source(
'cus_123',
'ba_123'
)
self.requestor_mock.request.assert_called_with(
'get',
'/v1/customers/cus_123/sources/ba_123',
{},
None
)
def test_modify_source(self):
stripe.Customer.modify_source(
'cus_123',
'ba_123',
metadata={'foo': 'bar'}
)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/customers/cus_123/sources/ba_123',
{'metadata': {'foo': 'bar'}},
None
)
def test_delete_source(self):
stripe.Customer.delete_source(
'cus_123',
'ba_123'
)
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/customers/cus_123/sources/ba_123',
{},
None
)
def test_list_sources(self):
stripe.Customer.list_sources(
'cus_123'
)
self.requestor_mock.request.assert_called_with(
'get',
'/v1/customers/cus_123/sources',
{},
None
)
| 11,811 | 66 | 824 |
776659a3fa672781691b51d2b992f9651ef39dae | 16,293 | py | Python | tools/convert_maven_project.py | piotr-kalanski/Resource-Orchestration-Service-Cloud-Development-Kit | 2a12deea757ac69e69708dd9fd159fba12cfba0e | [
"Apache-2.0"
] | 15 | 2020-11-10T02:00:28.000Z | 2022-02-07T19:28:10.000Z | tools/convert_maven_project.py | piotr-kalanski/Resource-Orchestration-Service-Cloud-Development-Kit | 2a12deea757ac69e69708dd9fd159fba12cfba0e | [
"Apache-2.0"
] | 23 | 2021-02-02T04:37:02.000Z | 2022-03-31T06:41:06.000Z | tools/convert_maven_project.py | piotr-kalanski/Resource-Orchestration-Service-Cloud-Development-Kit | 2a12deea757ac69e69708dd9fd159fba12cfba0e | [
"Apache-2.0"
] | 4 | 2021-01-13T05:48:43.000Z | 2022-03-15T11:26:48.000Z | import argparse, os
from xml.dom.minidom import parse
parser = argparse.ArgumentParser()
parser.add_argument('--pom_file_path', type=str, default=None)
args = parser.parse_args()
pom_file_path = args.pom_file_path
"""
此脚本应用于修改maven pom.xml文件,可以将jsii生成的pom.xml文件转化成可进行maven发布的pom.xml文件
输入: pom_file_path
"""
if __name__ == '__main__':
write_xml()
| 56.770035 | 110 | 0.805622 | import argparse, os
from xml.dom.minidom import parse
parser = argparse.ArgumentParser()
parser.add_argument('--pom_file_path', type=str, default=None)
args = parser.parse_args()
pom_file_path = args.pom_file_path
"""
此脚本应用于修改maven pom.xml文件,可以将jsii生成的pom.xml文件转化成可进行maven发布的pom.xml文件
输入: pom_file_path
"""
def get_xml_root_node():
if pom_file_path:
if os.path.exists(pom_file_path):
dom_tree = parse(pom_file_path)
root_node = dom_tree.documentElement
return dom_tree, root_node
else:
print('File path is not exist.')
exit()
else:
print('File path is not specified.')
exit()
def write_xml():
dom_tree, root_node = get_xml_root_node()
# write repositories
repositories_node = dom_tree.createElement("repositories")
repository_node = dom_tree.createElement("repository")
repository_id_node = dom_tree.createElement("id")
repository_id_text_value = dom_tree.createTextNode("publicCentral")
repository_id_node.appendChild(repository_id_text_value)
repository_url_node = dom_tree.createElement("url")
repository_url_text_value = dom_tree.createTextNode("https://repo1.maven.org/maven2/")
repository_url_node.appendChild(repository_url_text_value)
repository_name_node = dom_tree.createElement("name")
repository_name_text_value = dom_tree.createTextNode("repo central")
repository_name_node.appendChild(repository_name_text_value)
repository_node.appendChild(repository_id_node)
repository_node.appendChild(repository_url_node)
repository_node.appendChild(repository_name_node)
repositories_node.appendChild(repository_node)
root_node.appendChild(repositories_node)
# write distributionManagement
distribution_management_node = dom_tree.createElement("distributionManagement")
# write snapshotRepository
snapshot_repository_node = dom_tree.createElement("snapshotRepository")
snapshot_repository_id_node = dom_tree.createElement("id")
snapshot_repository_id_text_value = dom_tree.createTextNode("sonatype-nexus-snapshots")
snapshot_repository_id_node.appendChild(snapshot_repository_id_text_value)
snapshot_repository_url_node = dom_tree.createElement("url")
snapshot_repository_url_text_value = dom_tree.createTextNode(
"https://oss.sonatype.org/content/repositories/snapshots")
snapshot_repository_url_node.appendChild(snapshot_repository_url_text_value)
snapshot_repository_node.appendChild(snapshot_repository_id_node)
snapshot_repository_node.appendChild(snapshot_repository_url_node)
distribution_management_repository_node = dom_tree.createElement("repository")
distribution_management_repository_id_node = dom_tree.createElement("id")
distribution_management_repository_id_text_value = dom_tree.createTextNode("sonatype-nexus-staging")
distribution_management_repository_id_node.appendChild(distribution_management_repository_id_text_value)
distribution_management_repository_url_node = dom_tree.createElement("url")
distribution_management_repository_url_text_value = dom_tree.createTextNode(
"https://oss.sonatype.org/service/local/staging/deploy/maven2/")
distribution_management_repository_url_node.appendChild(distribution_management_repository_url_text_value)
distribution_management_repository_node.appendChild(distribution_management_repository_id_node)
distribution_management_repository_node.appendChild(distribution_management_repository_url_node)
distribution_management_node.appendChild(distribution_management_repository_node)
distribution_management_node.appendChild(snapshot_repository_node)
root_node.appendChild(distribution_management_node)
# write resource
builds = root_node.getElementsByTagName("build")[0]
resources_node = dom_tree.createElement("resources")
resource_node = dom_tree.createElement("resource")
directory_node = dom_tree.createElement("directory")
directory_text_value = dom_tree.createTextNode("src/main/java")
directory_node.appendChild(directory_text_value)
resource_node.appendChild(directory_node)
includes_node = dom_tree.createElement("includes")
include_1_node = dom_tree.createElement("include")
include_2_node = dom_tree.createElement("include")
include_1_node_text_value = dom_tree.createTextNode("**/*.tgz")
include_2_node_text_value = dom_tree.createTextNode("**/*.txt")
include_1_node.appendChild(include_1_node_text_value)
include_2_node.appendChild(include_2_node_text_value)
includes_node.appendChild(include_1_node)
includes_node.appendChild(include_2_node)
resource_node.appendChild(includes_node)
filtering_node = dom_tree.createElement("filtering")
filtering_node_text_value = dom_tree.createTextNode("false")
filtering_node.appendChild(filtering_node_text_value)
resource_node.appendChild(filtering_node)
resources_node.appendChild(resource_node)
builds.appendChild(resources_node)
# write plugin
plugins_node = root_node.getElementsByTagName("plugins")[0]
plugin_1_node = dom_tree.createElement("plugin")
plugin_1_artifact_id_node = dom_tree.createElement("artifactId")
plugin_1_version_node = dom_tree.createElement("version")
plugin_1_artifact_id_node_text_value = dom_tree.createTextNode("maven-deploy-plugin")
plugin_1_version_node_text_value = dom_tree.createTextNode("2.8.1")
plugin_1_artifact_id_node.appendChild(plugin_1_artifact_id_node_text_value)
plugin_1_version_node.appendChild(plugin_1_version_node_text_value)
plugin_1_node.appendChild(plugin_1_artifact_id_node)
plugin_1_node.appendChild(plugin_1_version_node)
plugin_2_node = dom_tree.createElement("plugin")
plugin_2_group_id_node = dom_tree.createElement("groupId")
plugin_2_artifact_id_node = dom_tree.createElement("artifactId")
plugin_2_version_node = dom_tree.createElement("version")
plugin_2_group_id_node_text_value = dom_tree.createTextNode("org.apache.maven.plugins")
plugin_2_artifact_id_node_text_value = dom_tree.createTextNode("maven-gpg-plugin")
plugin_2_version_node_text_value = dom_tree.createTextNode("1.6")
plugin_2_group_id_node.appendChild(plugin_2_group_id_node_text_value)
plugin_2_artifact_id_node.appendChild(plugin_2_artifact_id_node_text_value)
plugin_2_version_node.appendChild(plugin_2_version_node_text_value)
plugin_2_node.appendChild(plugin_2_group_id_node)
plugin_2_node.appendChild(plugin_2_artifact_id_node)
plugin_2_node.appendChild(plugin_2_version_node)
plugin_2_executions_node = dom_tree.createElement("executions")
plugin_2_execution_node = dom_tree.createElement("execution")
plugin_2_execution_id_node = dom_tree.createElement("id")
plugin_2_execution_phase_node = dom_tree.createElement("phase")
plugin_2_execution_id_node_text_value = dom_tree.createTextNode("sign-artifacts")
plugin_2_execution_phase_node_text_value = dom_tree.createTextNode("verify")
plugin_2_execution_id_node.appendChild(plugin_2_execution_id_node_text_value)
plugin_2_execution_phase_node.appendChild(plugin_2_execution_phase_node_text_value)
plugin_2_execution_goals_node = dom_tree.createElement("goals")
plugin_2_execution_goal_node = dom_tree.createElement("goal")
plugin_2_execution_goal_node_text_value = dom_tree.createTextNode("sign")
plugin_2_execution_goal_node.appendChild(plugin_2_execution_goal_node_text_value)
plugin_2_execution_goals_node.appendChild(plugin_2_execution_goal_node)
plugin_2_execution_node.appendChild(plugin_2_execution_id_node)
plugin_2_execution_node.appendChild(plugin_2_execution_phase_node)
plugin_2_execution_node.appendChild(plugin_2_execution_goals_node)
plugin_2_executions_node.appendChild(plugin_2_execution_node)
plugin_2_node.appendChild(plugin_2_executions_node)
plugin_3_node = dom_tree.createElement("plugin")
plugin_3_group_id_node = dom_tree.createElement("groupId")
plugin_3_group_id_node_text_value = dom_tree.createTextNode("org.apache.maven.plugins")
plugin_3_group_id_node.appendChild(plugin_3_group_id_node_text_value)
plugin_3_artifact_id_node = dom_tree.createElement("artifactId")
plugin_3_artifact_id_node_text_value = dom_tree.createTextNode("maven-resources-plugin")
plugin_3_artifact_id_node.appendChild(plugin_3_artifact_id_node_text_value)
plugin_3_version_node = dom_tree.createElement("version")
plugin_3_version_node_text_value = dom_tree.createTextNode("3.1.0")
plugin_3_version_node.appendChild(plugin_3_version_node_text_value)
plugin_3_configuration_node = dom_tree.createElement("configuration")
plugin_3_configuration_encoding_node = dom_tree.createElement("encoding")
plugin_3_configuration_encoding_node_text_value = dom_tree.createTextNode("UTF-8")
plugin_3_configuration_encoding_node.appendChild(plugin_3_configuration_encoding_node_text_value)
plugin_3_configuration_node.appendChild(plugin_3_configuration_encoding_node)
plugin_3_node.appendChild(plugin_3_group_id_node)
plugin_3_node.appendChild(plugin_3_artifact_id_node)
plugin_3_node.appendChild(plugin_3_version_node)
plugin_3_node.appendChild(plugin_3_configuration_node)
plugin_4_node = dom_tree.createElement("plugin")
plugin_4_group_id_node = dom_tree.createElement("groupId")
plugin_4_group_id_node_text_value = dom_tree.createTextNode("org.sonatype.plugins")
plugin_4_group_id_node.appendChild(plugin_4_group_id_node_text_value)
plugin_4_artifact_id_node = dom_tree.createElement("artifactId")
plugin_4_artifact_id_node_text_value = dom_tree.createTextNode("nexus-staging-maven-plugin")
plugin_4_artifact_id_node.appendChild(plugin_4_artifact_id_node_text_value)
plugin_4_version_node = dom_tree.createElement("version")
plugin_4_version_node_text_value = dom_tree.createTextNode("1.6.3")
plugin_4_version_node.appendChild(plugin_4_version_node_text_value)
plugin_4_extensions_node = dom_tree.createElement("extensions")
plugin_4_extensions_node_text_value = dom_tree.createTextNode("true")
plugin_4_extensions_node.appendChild(plugin_4_extensions_node_text_value)
plugin_4_configuration_node = dom_tree.createElement("configuration")
plugin_4_server_id_node = dom_tree.createElement("serverId")
plugin_4_server_id_node_text_value = dom_tree.createTextNode("sonatype-nexus-staging")
plugin_4_server_id_node.appendChild(plugin_4_server_id_node_text_value)
plugin_4_nexus_url_node = dom_tree.createElement("nexusUrl")
plugin_4_nexus_url_node_text_value = dom_tree.createTextNode("https://oss.sonatype.org/")
plugin_4_nexus_url_node.appendChild(plugin_4_nexus_url_node_text_value)
plugin_4_auto_release_after_close_node = dom_tree.createElement("autoReleaseAfterClose")
plugin_4_auto_release_after_close_node_text_value = dom_tree.createTextNode("true")
plugin_4_auto_release_after_close_node.appendChild(plugin_4_auto_release_after_close_node_text_value)
plugin_4_configuration_node.appendChild(plugin_4_server_id_node)
plugin_4_configuration_node.appendChild(plugin_4_nexus_url_node)
plugin_4_configuration_node.appendChild(plugin_4_auto_release_after_close_node)
plugin_4_node.appendChild(plugin_4_group_id_node)
plugin_4_node.appendChild(plugin_4_artifact_id_node)
plugin_4_node.appendChild(plugin_4_version_node)
plugin_4_node.appendChild(plugin_4_extensions_node)
plugin_4_node.appendChild(plugin_4_configuration_node)
plugin_5_node = dom_tree.createElement("plugin")
plugin_5_group_id_node = dom_tree.createElement("groupId")
plugin_5_group_id_node_text_value = dom_tree.createTextNode("org.codehaus.mojo")
plugin_5_group_id_node.appendChild(plugin_5_group_id_node_text_value)
plugin_5_artifact_id_node = dom_tree.createElement("artifactId")
plugin_5_artifact_id_node_text_value = dom_tree.createTextNode("cobertura-maven-plugin")
plugin_5_artifact_id_node.appendChild(plugin_5_artifact_id_node_text_value)
plugin_5_version_node = dom_tree.createElement("version")
plugin_5_version_node_text_value = dom_tree.createTextNode("2.7")
plugin_5_version_node.appendChild(plugin_5_version_node_text_value)
plugin_5_configuration_node = dom_tree.createElement("configuration")
plugin_5_formats_node = dom_tree.createElement("formats")
plugin_5_format_1_node = dom_tree.createElement("format")
plugin_5_format_1_node_text_value = dom_tree.createTextNode("html")
plugin_5_format_1_node.appendChild(plugin_5_format_1_node_text_value)
plugin_5_format_2_node = dom_tree.createElement("format")
plugin_5_format_2_node_text_value = dom_tree.createTextNode("xml")
plugin_5_format_2_node.appendChild(plugin_5_format_2_node_text_value)
plugin_5_formats_node.appendChild(plugin_5_format_1_node)
plugin_5_formats_node.appendChild(plugin_5_format_2_node)
plugin_5_configuration_node.appendChild(plugin_5_formats_node)
plugin_5_node.appendChild(plugin_5_group_id_node)
plugin_5_node.appendChild(plugin_5_artifact_id_node)
plugin_5_node.appendChild(plugin_5_version_node)
plugin_5_node.appendChild(plugin_5_configuration_node)
plugins_node.appendChild(plugin_1_node)
plugins_node.appendChild(plugin_2_node)
plugins_node.appendChild(plugin_3_node)
plugins_node.appendChild(plugin_4_node)
plugins_node.appendChild(plugin_5_node)
# write executions
artifact_id_nodes = root_node.getElementsByTagName("artifactId")
for artifact_id_node in artifact_id_nodes:
if artifact_id_node.childNodes[0].data == "maven-compiler-plugin":
new_plugin_id_node = artifact_id_node.parentNode
for childNode in new_plugin_id_node.childNodes:
if childNode.nodeName == "configuration":
encoding_node = dom_tree.createElement("encoding")
encoding_node_text_value = dom_tree.createTextNode("UTF-8")
encoding_node.appendChild(encoding_node_text_value)
childNode.appendChild(encoding_node)
if artifact_id_node.childNodes[0].data == "maven-javadoc-plugin":
new_plugin_id_node_2 = artifact_id_node.parentNode
for childNode2 in new_plugin_id_node_2.childNodes:
if childNode2.nodeName == "configuration":
doclint_node = dom_tree.createElement("doclint")
doclint_node_text_value = dom_tree.createTextNode("none")
doclint_node.appendChild(doclint_node_text_value)
childNode2.appendChild(doclint_node)
encoding_2_node = dom_tree.createElement("encoding")
encoding_2_node_text_value = dom_tree.createTextNode("UTF-8")
encoding_2_node.appendChild(encoding_2_node_text_value)
childNode2.appendChild(encoding_2_node)
elif childNode2.nodeName == "executions":
new_plugin_id_node_2.removeChild(childNode2)
# write properties
properties_nodes = root_node.getElementsByTagName("properties")
for property_node in properties_nodes:
java_version_node = dom_tree.createElement("java.version")
java_version_node_text_value = dom_tree.createTextNode("1.8")
java_version_node.appendChild(java_version_node_text_value)
property_node.appendChild(java_version_node)
maven_compiler_source_node = dom_tree.createElement("maven.compiler.source")
maven_compiler_source_node_text_value = dom_tree.createTextNode("1.8")
maven_compiler_source_node.appendChild(maven_compiler_source_node_text_value)
property_node.appendChild(maven_compiler_source_node)
maven_compiler_target_node = dom_tree.createElement("maven.compiler.target")
maven_compiler_target_node_text_value = dom_tree.createTextNode("1.8")
maven_compiler_target_node.appendChild(maven_compiler_target_node_text_value)
property_node.appendChild(maven_compiler_target_node)
with open(pom_file_path, 'w') as f:
dom_tree.writexml(f, addindent='\t', newl='\n', encoding='utf-8')
if __name__ == '__main__':
write_xml()
| 15,891 | 0 | 46 |
feaebe9b14b37510e0730c06e2406a578abc8a0f | 2,415 | py | Python | gdsfactory/routing/utils.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/routing/utils.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/routing/utils.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | from typing import Dict, List, Union
from numpy import float64
from gdsfactory.port import Port
def flip(port: Port) -> Port:
"""Returns port copy with Flip Port orientation."""
return port.flip()
def direction_ports_from_list_ports(optical_ports: List[Port]) -> Dict[str, List[Port]]:
"""Returns a dict of WENS ports."""
direction_ports = {x: [] for x in ["E", "N", "W", "S"]}
for p in optical_ports:
p.orientation = (p.orientation + 360.0) % 360
if p.orientation <= 45.0 or p.orientation >= 315:
direction_ports["E"].append(p)
elif p.orientation <= 135.0 and p.orientation >= 45.0:
direction_ports["N"].append(p)
elif p.orientation <= 225.0 and p.orientation >= 135.0:
direction_ports["W"].append(p)
else:
direction_ports["S"].append(p)
for direction, list_ports in list(direction_ports.items()):
if direction in ["E", "W"]:
list_ports.sort(key=lambda p: p.y)
if direction in ["S", "N"]:
list_ports.sort(key=lambda p: p.x)
return direction_ports
def check_ports_have_equal_spacing(list_ports: List[Port]) -> float64:
"""Returns port separation. Raises error if not constant."""
if not isinstance(list_ports, list):
raise ValueError(f"list_ports should be a list of ports, got {list_ports}")
if not list_ports:
raise ValueError("list_ports should not be empty")
orientation = get_list_ports_angle(list_ports)
if orientation in [0, 180]:
xys = [p.y for p in list_ports]
else:
xys = [p.x for p in list_ports]
seps = [round(abs(c2 - c1), 5) for c1, c2 in zip(xys[1:], xys[:-1])]
different_seps = set(seps)
if len(different_seps) > 1:
raise ValueError("Ports should have the same separation. Got {different_seps}")
return different_seps.pop()
def get_list_ports_angle(list_ports: List[Port]) -> Union[float64, int]:
"""Returns the orientation/angle (in degrees) of a list of ports."""
if not list_ports:
return None
if len({p.orientation for p in list_ports}) > 1:
raise ValueError(f"All port angles should be the same. Got {list_ports}")
return list_ports[0].orientation
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2()
d = direction_ports_from_list_ports(c.get_ports_list())
c.show()
| 33.082192 | 88 | 0.643064 | from typing import Dict, List, Union
from numpy import float64
from gdsfactory.port import Port
def flip(port: Port) -> Port:
"""Returns port copy with Flip Port orientation."""
return port.flip()
def direction_ports_from_list_ports(optical_ports: List[Port]) -> Dict[str, List[Port]]:
"""Returns a dict of WENS ports."""
direction_ports = {x: [] for x in ["E", "N", "W", "S"]}
for p in optical_ports:
p.orientation = (p.orientation + 360.0) % 360
if p.orientation <= 45.0 or p.orientation >= 315:
direction_ports["E"].append(p)
elif p.orientation <= 135.0 and p.orientation >= 45.0:
direction_ports["N"].append(p)
elif p.orientation <= 225.0 and p.orientation >= 135.0:
direction_ports["W"].append(p)
else:
direction_ports["S"].append(p)
for direction, list_ports in list(direction_ports.items()):
if direction in ["E", "W"]:
list_ports.sort(key=lambda p: p.y)
if direction in ["S", "N"]:
list_ports.sort(key=lambda p: p.x)
return direction_ports
def check_ports_have_equal_spacing(list_ports: List[Port]) -> float64:
"""Returns port separation. Raises error if not constant."""
if not isinstance(list_ports, list):
raise ValueError(f"list_ports should be a list of ports, got {list_ports}")
if not list_ports:
raise ValueError("list_ports should not be empty")
orientation = get_list_ports_angle(list_ports)
if orientation in [0, 180]:
xys = [p.y for p in list_ports]
else:
xys = [p.x for p in list_ports]
seps = [round(abs(c2 - c1), 5) for c1, c2 in zip(xys[1:], xys[:-1])]
different_seps = set(seps)
if len(different_seps) > 1:
raise ValueError("Ports should have the same separation. Got {different_seps}")
return different_seps.pop()
def get_list_ports_angle(list_ports: List[Port]) -> Union[float64, int]:
"""Returns the orientation/angle (in degrees) of a list of ports."""
if not list_ports:
return None
if len({p.orientation for p in list_ports}) > 1:
raise ValueError(f"All port angles should be the same. Got {list_ports}")
return list_ports[0].orientation
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2()
d = direction_ports_from_list_ports(c.get_ports_list())
c.show()
| 0 | 0 | 0 |
f6370b792886e10db4fb910a3841bee28d30f3bc | 622 | py | Python | authentications.py | Zaddyp/elijahzbot | 906034b9de5c3dfdd65d0bce772ea4f916693291 | [
"Apache-2.0"
] | null | null | null | authentications.py | Zaddyp/elijahzbot | 906034b9de5c3dfdd65d0bce772ea4f916693291 | [
"Apache-2.0"
] | null | null | null | authentications.py | Zaddyp/elijahzbot | 906034b9de5c3dfdd65d0bce772ea4f916693291 | [
"Apache-2.0"
] | null | null | null | import tweepy
from config import create_api
# auth = tweepy.OAuthHandler("CONSUMER_KEY", "CONSUMER_SECRET")
# auth.set_access_token("ACCESS_TOKEN", "ACCESS_TOKEN_SECRET")
| 32.736842 | 85 | 0.699357 | import tweepy
from config import create_api
# auth = tweepy.OAuthHandler("CONSUMER_KEY", "CONSUMER_SECRET")
# auth.set_access_token("ACCESS_TOKEN", "ACCESS_TOKEN_SECRET")
def authentication():
auth = tweepy.OAuthHandler("eLz5PW2HhVvfeJ2hmKjlZmP6g",
"BDGnsOitwOcZJWdvvSwTr5iI2OKGglinEHT6glhCJrpLbHl1GT")
auth.set_access_token("1445122551773110273-OhjDrVIbWb4yKcbew3TfkZJLgPJUNt",
"RPAzEmtTwiRRxlJfWJJegXbPQkvwH5Q17s8SeUzXhURXz")
# api = tweepy.API(auth)
# create API object
api = tweepy.API(auth, wait_on_rate_limit=True)
| 416 | 0 | 25 |
73ac25e316ac68a968a3aeba5aa176813b132bbf | 596 | py | Python | trace_cockpit/middleware.py | guettli/django-trace-cockpit | a36eabb2d54d5f0071b0e9b497062186cb66deca | [
"MIT"
] | 1 | 2021-07-25T18:33:22.000Z | 2021-07-25T18:33:22.000Z | trace_cockpit/middleware.py | guettli/stoic-trace-cockpit | a36eabb2d54d5f0071b0e9b497062186cb66deca | [
"MIT"
] | 6 | 2021-09-16T20:11:22.000Z | 2021-09-27T20:20:32.000Z | trace_cockpit/middleware.py | guettli/stoic-trace-cockpit | a36eabb2d54d5f0071b0e9b497062186cb66deca | [
"MIT"
] | null | null | null | from contextlib import ExitStack
from trace_cockpit.models import TraceConfig
| 29.8 | 75 | 0.686242 | from contextlib import ExitStack
from trace_cockpit.models import TraceConfig
class TraceMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
for config in TraceConfig.objects.filter(is_active=True):
if not config.do_you_want_to_trace(request):
continue
# First match wins. Unfortunately Python does not support
# several traces at once :-(
return config.trace_get_response(self.get_response, request)[1]
return self.get_response(request)
| 437 | 1 | 76 |
b90a694ef6a89af6739d139454f0acb7b03bcac6 | 16,216 | py | Python | iconservice/iiss/reward_calc/storage.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 52 | 2018-08-24T02:28:43.000Z | 2021-07-06T04:44:22.000Z | iconservice/iiss/reward_calc/storage.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 62 | 2018-09-17T06:59:16.000Z | 2021-12-15T06:02:51.000Z | iconservice/iiss/reward_calc/storage.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 35 | 2018-09-14T02:42:10.000Z | 2022-02-05T10:34:46.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import namedtuple
from typing import TYPE_CHECKING, Optional, Tuple, List, Set
from iconcommons import Logger
from ..reward_calc.msg_data import Header, TxData, PRepsData, TxType, make_block_produce_info_key
from ...base.exception import DatabaseException, InternalServiceErrorException
from ...database.db import KeyValueDatabase
from ...icon_constant import (
DATA_BYTE_ORDER, Revision, RC_DATA_VERSION_TABLE, RC_DB_VERSION_0,
IISS_LOG_TAG, ROLLBACK_LOG_TAG
)
from ...iiss.reward_calc.data_creator import DataCreator
from ...utils import bytes_to_hex
from ...utils.msgpack_for_db import MsgPackForDB
if TYPE_CHECKING:
from ...base.address import Address
from ...database.wal import IissWAL
from ..reward_calc.msg_data import Data, DelegationInfo
from ...iconscore.icon_score_context import IconScoreContext
RewardCalcDBInfo = namedtuple('RewardCalcDBInfo', ['path', 'block_height'])
class Storage(object):
"""Manages RC DB which Reward Calculator will use to calculate a reward for each address
"""
CURRENT_IISS_DB_NAME = "current_db"
STANDBY_IISS_DB_NAME_PREFIX = "standby_rc_db"
IISS_RC_DB_NAME_PREFIX = "iiss_rc_db"
KEY_FOR_GETTING_LAST_TRANSACTION_INDEX = b'last_transaction_index'
KEY_FOR_CALC_RESPONSE_FROM_RC = b'calc_response_from_rc'
KEY_FOR_VERSION_AND_REVISION = b'version_and_revision'
@property
@classmethod
@classmethod
@classmethod
def close(self):
"""Close the embedded database.
"""
if self._db:
self._db.close()
self._db = None
@staticmethod
# todo: naming
@staticmethod
def replace_db(self, block_height: int) -> 'RewardCalcDBInfo':
"""
1. Rename current_db to standby_db_{block_height}
2. Create a new current_db for the next calculation period
:param block_height: End block height of the current calc period
:return:
"""
# rename current db -> standby db
assert block_height > 0
self._db.close()
standby_db_path: str = self.rename_current_db_to_standby_db(self._path, block_height)
self._db = self.create_current_db(self._path)
return RewardCalcDBInfo(standby_db_path, block_height)
@classmethod
def finalize_iiss_db(cls,
prev_end_bh: int,
current_db: 'KeyValueDatabase',
prev_db_path: str):
"""
Finalize iiss db before sending to reward calculator (i.e. RC). Process is below
1. Move last Block produce data to previous iiss_db which is to be sent to RC
2. db compaction
:param prev_end_bh: end block height of previous term
:param current_db: newly created db
:param prev_db_path: iiss_db path which is to be finalized and sent to RC (must has been closed)
:return:
"""
bp_key: bytes = make_block_produce_info_key(prev_end_bh)
prev_db: 'KeyValueDatabase' = KeyValueDatabase.from_path(prev_db_path)
cls._move_data_from_current_db_to_prev_db(bp_key,
current_db,
prev_db)
prev_db.close()
cls._process_db_compaction(prev_db_path)
@classmethod
@classmethod
def _process_db_compaction(cls, path: str):
"""
There is compatibility issue between C++ levelDB and go levelDB.
To solve it, should make DB being compacted before reading (from RC).
:param path: DB path to compact
:return:
"""
db = KeyValueDatabase.from_path(path)
db.close()
@classmethod
@classmethod
@classmethod
def get_total_elected_prep_delegated_snapshot(self) -> int:
"""
total_elected_prep_delegated_snapshot =
the delegated amount which the elected P-Reps received at the beginning of this term
- the delegated amount which unregistered P-Reps received in this term
This function is only intended for state backward compatibility
and not used any more after revision is set to 7.
"""
unreg_preps: Set['Address'] = set()
db = self._db.get_sub_db(TxData.PREFIX)
for k, v in db.iterator():
data: 'TxData' = TxData.from_bytes(v)
if data.type == TxType.PREP_UNREGISTER:
unreg_preps.add(data.address)
db = self._db.get_sub_db(PRepsData.PREFIX)
preps: Optional[List['DelegationInfo']] = None
for k, v in db.iterator():
data: 'PRepsData' = PRepsData.from_bytes(k, v)
preps = data.prep_list
break
ret = 0
if preps:
for info in preps:
if info.address not in unreg_preps:
ret += info.value
Logger.info(tag=IISS_LOG_TAG,
msg=f"get_total_elected_prep_delegated_snapshot load: {ret}")
return ret
class IissDBNameRefactor(object):
"""Change iiss_db name: remove revision from iiss_db name
"""
_DB_NAME_PREFIX = Storage.IISS_RC_DB_NAME_PREFIX
@classmethod
@classmethod
@classmethod
| 38.794258 | 116 | 0.653182 | # -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import namedtuple
from typing import TYPE_CHECKING, Optional, Tuple, List, Set
from iconcommons import Logger
from ..reward_calc.msg_data import Header, TxData, PRepsData, TxType, make_block_produce_info_key
from ...base.exception import DatabaseException, InternalServiceErrorException
from ...database.db import KeyValueDatabase
from ...icon_constant import (
DATA_BYTE_ORDER, Revision, RC_DATA_VERSION_TABLE, RC_DB_VERSION_0,
IISS_LOG_TAG, ROLLBACK_LOG_TAG
)
from ...iiss.reward_calc.data_creator import DataCreator
from ...utils import bytes_to_hex
from ...utils.msgpack_for_db import MsgPackForDB
if TYPE_CHECKING:
from ...base.address import Address
from ...database.wal import IissWAL
from ..reward_calc.msg_data import Data, DelegationInfo
from ...iconscore.icon_score_context import IconScoreContext
RewardCalcDBInfo = namedtuple('RewardCalcDBInfo', ['path', 'block_height'])
def get_rc_version(revision: int) -> int:
while revision >= Revision.IISS.value:
version: int = RC_DATA_VERSION_TABLE.get(revision, -1)
if version > -1:
return version
revision -= 1
return RC_DB_VERSION_0
def get_version_and_revision(db: 'KeyValueDatabase') -> Tuple[int, int]:
version_and_revision: Optional[bytes] = db.get(Storage.KEY_FOR_VERSION_AND_REVISION)
if version_and_revision is None:
return -1, -1
version, revision = MsgPackForDB.loads(version_and_revision)
return version, revision
class Storage(object):
"""Manages RC DB which Reward Calculator will use to calculate a reward for each address
"""
CURRENT_IISS_DB_NAME = "current_db"
STANDBY_IISS_DB_NAME_PREFIX = "standby_rc_db"
IISS_RC_DB_NAME_PREFIX = "iiss_rc_db"
KEY_FOR_GETTING_LAST_TRANSACTION_INDEX = b'last_transaction_index'
KEY_FOR_CALC_RESPONSE_FROM_RC = b'calc_response_from_rc'
KEY_FOR_VERSION_AND_REVISION = b'version_and_revision'
def __init__(self):
self._path: str = ""
self._db: Optional['KeyValueDatabase'] = None
# 'None' if open() is not called else 'int'
self._db_iiss_tx_index: int = -1
def open(self, context: 'IconScoreContext', path: str):
revision: int = context.revision
if not os.path.exists(path):
raise DatabaseException(f"Invalid IISS DB path: {path}")
self._path = path
self._db = self.create_current_db(path)
self._db_iiss_tx_index = self._load_last_transaction_index()
Logger.info(tag=IISS_LOG_TAG, msg=f"last_transaction_index on open={self._db_iiss_tx_index}")
# todo: check side effect of WAL
self._supplement_db(context, revision)
def rollback(self, _context: 'IconScoreContext', block_height: int, block_hash: bytes):
Logger.info(tag=ROLLBACK_LOG_TAG,
msg=f"rollback() start: block_height={block_height} block_hash={bytes_to_hex(block_hash)}")
if self._db is not None:
raise InternalServiceErrorException("current_db has been opened on rollback")
if not os.path.exists(self._path):
raise DatabaseException(f"Invalid IISS DB path: {self._path}")
self._db = self.create_current_db(self._path)
self._db_iiss_tx_index = self._load_last_transaction_index()
Logger.info(tag=IISS_LOG_TAG, msg=f"last_transaction_index on open={self._db_iiss_tx_index}")
Logger.info(tag=ROLLBACK_LOG_TAG, msg="rollback() end")
@property
def key_value_db(self) -> 'KeyValueDatabase':
return self._db
def _supplement_db(self, context: 'IconScoreContext', revision: int):
# Supplement db which is made by previous icon service version (as there is no version, revision and header)
if revision < Revision.IISS.value:
return
rc_version, _ = self.get_version_and_revision()
if rc_version == -1:
self._put_version_and_revision(revision)
# On the first change point.
# We have to put Header for RC
if self._db.get(Header.PREFIX) is None:
rc_version, rc_revision = self.get_version_and_revision()
end_block_height: int = context.storage.iiss.get_end_block_height_of_calc(context)
calc_period: int = context.storage.iiss.get_calc_period(context)
prev_end_calc_block_height: int = end_block_height - calc_period
# if this point is new calc start point ...
# we have to set block height in header data.
if prev_end_calc_block_height == context.block.height:
end_block_height: int = context.block.height
header: 'Header' = DataCreator.create_header(rc_version, end_block_height, rc_revision)
self.put_data_directly(header)
Logger.debug(tag=IISS_LOG_TAG, msg=f"No header data. Put Header to db on open: {str(header)}")
@classmethod
def get_standby_rc_db_name(cls, block_height: int) -> str:
return cls._get_db_name(cls.STANDBY_IISS_DB_NAME_PREFIX, block_height)
@classmethod
def get_iiss_rc_db_name(cls, block_height: int) -> str:
return cls._get_db_name(cls.IISS_RC_DB_NAME_PREFIX, block_height)
@classmethod
def _get_db_name(cls, prefix: str, block_height: int) -> str:
return f"{prefix}_{block_height}"
def put_data_directly(self, iiss_data: 'Data', tx_index: Optional[int] = None):
if isinstance(iiss_data, TxData):
key: bytes = iiss_data.make_key(tx_index)
value: bytes = iiss_data.make_value()
else:
key: bytes = iiss_data.make_key()
value: bytes = iiss_data.make_value()
self._db.put(key, value)
def close(self):
"""Close the embedded database.
"""
if self._db:
self._db.close()
self._db = None
def put_calc_response_from_rc(self, iscore: int, block_height: int, state_hash: bytes):
version = 1
response_from_rc: bytes = MsgPackForDB.dumps([version, iscore, block_height, state_hash])
self._db.put(self.KEY_FOR_CALC_RESPONSE_FROM_RC, response_from_rc)
def get_calc_response_from_rc(self) -> Tuple[int, int, Optional[bytes]]:
response_from_rc: Optional[bytes] = self._db.get(self.KEY_FOR_CALC_RESPONSE_FROM_RC)
if response_from_rc is None:
return -1, -1, None
response_from_rc: list = MsgPackForDB.loads(response_from_rc)
version = response_from_rc[0]
if version == 0:
iscore = response_from_rc[1]
block_height = response_from_rc[2]
state_hash = None
elif version == 1:
iscore = response_from_rc[1]
block_height = response_from_rc[2]
state_hash = response_from_rc[3]
else:
raise DatabaseException(f"get_calc_response_from_rc invalid version: {version}")
return iscore, block_height, state_hash
def get_tx_index(self, start_calc: bool) -> int:
tx_index: int = -1
if start_calc:
return tx_index
else:
return self._db_iiss_tx_index
@staticmethod
def put(batch: list, iiss_data: 'Data'):
Logger.debug(tag=IISS_LOG_TAG, msg=f"put data: {str(iiss_data)}")
batch.append(iiss_data)
def commit(self, iiss_wal: 'IissWAL'):
self._db.write_batch(iiss_wal)
self._db_iiss_tx_index = iiss_wal.final_tx_index
Logger.info(tag=IISS_LOG_TAG, msg=f"final_tx_index={iiss_wal.final_tx_index}")
# todo: naming
def _put_version_and_revision(self, revision: int):
version: int = get_rc_version(revision)
version_and_revision: bytes = MsgPackForDB.dumps([version, revision])
self._db.put(self.KEY_FOR_VERSION_AND_REVISION, version_and_revision)
def get_version_and_revision(self) -> Tuple[int, int]:
return get_version_and_revision(self._db)
def _load_last_transaction_index(self) -> int:
encoded_last_index: Optional[bytes] = self._db.get(self.KEY_FOR_GETTING_LAST_TRANSACTION_INDEX)
if encoded_last_index is None:
return -1
else:
return int.from_bytes(encoded_last_index, DATA_BYTE_ORDER)
@staticmethod
def _rename_db(old_db_path: str, new_db_path: str):
if os.path.exists(old_db_path) and not os.path.exists(new_db_path):
shutil.move(old_db_path, new_db_path)
Logger.info(tag=IISS_LOG_TAG, msg=f"Rename db: {old_db_path} -> {new_db_path}")
else:
raise DatabaseException("Cannot create IISS DB because of invalid path. Check both IISS "
"current DB path and IISS DB path")
def replace_db(self, block_height: int) -> 'RewardCalcDBInfo':
"""
1. Rename current_db to standby_db_{block_height}
2. Create a new current_db for the next calculation period
:param block_height: End block height of the current calc period
:return:
"""
# rename current db -> standby db
assert block_height > 0
self._db.close()
standby_db_path: str = self.rename_current_db_to_standby_db(self._path, block_height)
self._db = self.create_current_db(self._path)
return RewardCalcDBInfo(standby_db_path, block_height)
@classmethod
def finalize_iiss_db(cls,
prev_end_bh: int,
current_db: 'KeyValueDatabase',
prev_db_path: str):
"""
Finalize iiss db before sending to reward calculator (i.e. RC). Process is below
1. Move last Block produce data to previous iiss_db which is to be sent to RC
2. db compaction
:param prev_end_bh: end block height of previous term
:param current_db: newly created db
:param prev_db_path: iiss_db path which is to be finalized and sent to RC (must has been closed)
:return:
"""
bp_key: bytes = make_block_produce_info_key(prev_end_bh)
prev_db: 'KeyValueDatabase' = KeyValueDatabase.from_path(prev_db_path)
cls._move_data_from_current_db_to_prev_db(bp_key,
current_db,
prev_db)
prev_db.close()
cls._process_db_compaction(prev_db_path)
@classmethod
def _move_data_from_current_db_to_prev_db(cls,
key: bytes,
current_db: 'KeyValueDatabase',
prev_db: 'KeyValueDatabase'):
value: Optional[bytes] = current_db.get(key)
if value is None:
return
current_db.delete(key)
prev_db.put(key, value)
@classmethod
def _process_db_compaction(cls, path: str):
"""
There is compatibility issue between C++ levelDB and go levelDB.
To solve it, should make DB being compacted before reading (from RC).
:param path: DB path to compact
:return:
"""
db = KeyValueDatabase.from_path(path)
db.close()
@classmethod
def create_current_db(cls, rc_data_path: str) -> 'KeyValueDatabase':
current_db_path = os.path.join(rc_data_path, cls.CURRENT_IISS_DB_NAME)
Logger.info(tag=IISS_LOG_TAG, msg=f"Create new current_db")
return KeyValueDatabase.from_path(current_db_path, create_if_missing=True)
@classmethod
def rename_current_db_to_standby_db(cls, rc_data_path: str, block_height: int) -> str:
current_db_path: str = os.path.join(rc_data_path, cls.CURRENT_IISS_DB_NAME)
standby_db_name: str = cls.get_standby_rc_db_name(block_height)
standby_db_path: str = os.path.join(rc_data_path, standby_db_name)
cls._rename_db(current_db_path, standby_db_path)
return standby_db_path
@classmethod
def rename_standby_db_to_iiss_db(cls, standby_db_path: str) -> str:
# After change the db name, reward calc manage this db (icon service does not have a authority)
iiss_db_path: str = cls.IISS_RC_DB_NAME_PREFIX.\
join(standby_db_path.rsplit(cls.STANDBY_IISS_DB_NAME_PREFIX, 1))
cls._rename_db(standby_db_path, iiss_db_path)
return iiss_db_path
def get_total_elected_prep_delegated_snapshot(self) -> int:
"""
total_elected_prep_delegated_snapshot =
the delegated amount which the elected P-Reps received at the beginning of this term
- the delegated amount which unregistered P-Reps received in this term
This function is only intended for state backward compatibility
and not used any more after revision is set to 7.
"""
unreg_preps: Set['Address'] = set()
db = self._db.get_sub_db(TxData.PREFIX)
for k, v in db.iterator():
data: 'TxData' = TxData.from_bytes(v)
if data.type == TxType.PREP_UNREGISTER:
unreg_preps.add(data.address)
db = self._db.get_sub_db(PRepsData.PREFIX)
preps: Optional[List['DelegationInfo']] = None
for k, v in db.iterator():
data: 'PRepsData' = PRepsData.from_bytes(k, v)
preps = data.prep_list
break
ret = 0
if preps:
for info in preps:
if info.address not in unreg_preps:
ret += info.value
Logger.info(tag=IISS_LOG_TAG,
msg=f"get_total_elected_prep_delegated_snapshot load: {ret}")
return ret
class IissDBNameRefactor(object):
"""Change iiss_db name: remove revision from iiss_db name
"""
_DB_NAME_PREFIX = Storage.IISS_RC_DB_NAME_PREFIX
@classmethod
def run(cls, rc_data_path: str) -> int:
ret = 0
with os.scandir(rc_data_path) as it:
for entry in it:
if entry.is_dir() and entry.name.startswith(cls._DB_NAME_PREFIX):
new_name: str = cls._get_db_name_without_revision(entry.name)
if not new_name:
Logger.info(
tag=IISS_LOG_TAG,
msg=f"Refactoring iiss_db name has been already done: old={entry.name} "
f"rc_data_path={rc_data_path}")
break
cls._change_db_name(rc_data_path, entry.name, new_name)
ret += 1
return ret
@classmethod
def _change_db_name(cls, rc_data_path: str, old_name: str, new_name: str):
if old_name == new_name:
return
src_path: str = os.path.join(rc_data_path, old_name)
dst_path: str = os.path.join(rc_data_path, new_name)
try:
shutil.move(src_path, dst_path)
Logger.info(tag=IISS_LOG_TAG, msg=f"Renaming iiss_db_name succeeded: old={old_name} new={new_name}")
except BaseException as e:
Logger.error(tag=IISS_LOG_TAG,
msg=f"Failed to rename iiss_db_name: old={old_name} new={new_name} "
f"path={rc_data_path} exception={str(e)}")
@classmethod
def _get_db_name_without_revision(cls, name: str) -> Optional[str]:
# items[0]: block_height, items[1]: revision
items: List[str] = name[len(cls._DB_NAME_PREFIX) + 1:].split("_")
if len(items) == 1:
# No need to rename
return None
return f"{cls._DB_NAME_PREFIX}_{items[0]}"
| 9,625 | 0 | 707 |
1de69c5840e9309149c578942c542103584de9f2 | 675 | py | Python | views/views.py | shorethornhill/policygeek | 2e256d8f6733c0783220666508fb1f67ad363f89 | [
"MIT"
] | null | null | null | views/views.py | shorethornhill/policygeek | 2e256d8f6733c0783220666508fb1f67ad363f89 | [
"MIT"
] | 49 | 2020-01-30T04:59:09.000Z | 2021-07-28T04:39:10.000Z | views/views.py | shorethornhill/policygeek | 2e256d8f6733c0783220666508fb1f67ad363f89 | [
"MIT"
] | null | null | null | import django_version
import authorized
import handlers
| 25.961538 | 79 | 0.534815 | import django_version
import authorized
import handlers
class App(handlers.BaseRequestHandler):
@authorized.role()
def get(self, *args, **kwargs):
from settings.secrets import G_MAPS_API_KEY
# gmods = {
# "modules": [
# ]
# }
d = kwargs.get('d')
d['constants'] = {
}
d['alt_bootstrap'] = {
"UserStore": {
'user': self.user.json(is_self=True) if self.user else None
}
}
# d['gautoload'] = urllib.quote_plus(json.dumps(gmods).replace(' ',''))
d['gmap_api_key'] = G_MAPS_API_KEY
self.render_template("index.html", **d)
| 527 | 67 | 23 |
e2c6caaf9cae89b0a75c06d22bfbbf201102dc68 | 1,989 | py | Python | muninn/__init__.py | LeoBreebaart/muninn | ac54adbac59aa35af13554c77080d2881154b6a8 | [
"BSD-3-Clause"
] | null | null | null | muninn/__init__.py | LeoBreebaart/muninn | ac54adbac59aa35af13554c77080d2881154b6a8 | [
"BSD-3-Clause"
] | null | null | null | muninn/__init__.py | LeoBreebaart/muninn | ac54adbac59aa35af13554c77080d2881154b6a8 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (C) 2014-2021 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
__version__ = "5.1"
__copyright__ = "Copyright (C) 2014-2021 S[&]T, The Netherlands."
__all__ = ["Error", "InternalError", "Struct", "Archive", "open", "config_path"]
import os as _os
from muninn.archive import Archive, create as _create_archive
from muninn.exceptions import *
from muninn.struct import Struct
| 32.080645 | 114 | 0.694319 | #
# Copyright (C) 2014-2021 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
__version__ = "5.1"
__copyright__ = "Copyright (C) 2014-2021 S[&]T, The Netherlands."
__all__ = ["Error", "InternalError", "Struct", "Archive", "open", "config_path"]
import os as _os
from muninn.archive import Archive, create as _create_archive
from muninn.exceptions import *
from muninn.struct import Struct
def config_path():
return _os.environ.get("MUNINN_CONFIG_PATH", "")
def open(id="", **kwargs):
configuration = {} if not id else _read_archive_config_file(_locate_archive_config_file(id))
_merge_configuration(configuration, kwargs)
return _create_archive(configuration)
def _merge_configuration(configuration, other_configuration):
for name, value in other_configuration.items():
section = configuration.setdefault(name, {})
section.update(value)
def _read_archive_config_file(path):
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
parser = ConfigParser()
if not parser.read(path):
raise Error("unable to read config file: \"%s\"" % path)
return dict([(name, dict(parser.items(name))) for name in parser.sections()])
def _locate_archive_config_file(archive_id):
config_file_name = "%s.cfg" % archive_id
if _os.path.basename(config_file_name) != config_file_name:
raise Error("invalid archive identifier: \"%s\"" % archive_id)
for path in filter(None, config_path().split(":")):
if _os.path.isfile(path):
if _os.path.basename(path) == config_file_name:
return path
else:
config_file_path = _os.path.join(path, config_file_name)
if _os.path.isfile(config_file_path):
return config_file_path
raise Error("configuration file: \"%s\" not found on search path: \"%s\"" % (config_file_name, config_path()))
| 1,434 | 0 | 115 |
ade11b8953fc7149ce9035d99dd15f6bc68e454e | 749 | py | Python | explainaboard/processors/processor.py | abodacs/ExplainaBoard | daf981c44baea5cf01a929b9b3c3713ac63a1aee | [
"MIT"
] | null | null | null | explainaboard/processors/processor.py | abodacs/ExplainaBoard | daf981c44baea5cf01a929b9b3c3713ac63a1aee | [
"MIT"
] | null | null | null | explainaboard/processors/processor.py | abodacs/ExplainaBoard | daf981c44baea5cf01a929b9b3c3713ac63a1aee | [
"MIT"
] | null | null | null | from typing import Any, Iterable, Optional
from explainaboard import feature
from explainaboard.tasks import TaskType
from explainaboard.info import SysOutputInfo
class Processor:
"""Base case for task-based processor"""
_features: feature.Features
_task_type: TaskType
| 34.045455 | 83 | 0.715621 | from typing import Any, Iterable, Optional
from explainaboard import feature
from explainaboard.tasks import TaskType
from explainaboard.info import SysOutputInfo
class Processor:
"""Base case for task-based processor"""
_features: feature.Features
_task_type: TaskType
def __init__(self, metadata: dict, system_output_data: Iterable[dict]) -> None:
self._metadata = {**metadata, "features": self._features}
self._system_output_info = SysOutputInfo.from_dict(self._metadata)
# should really be a base type of builders
self._builder: Optional[Any] = None
def process(self) -> SysOutputInfo:
if not self._builder:
raise NotImplementedError
return self._builder.run()
| 411 | 0 | 54 |
acfe911d07fe53778beb84ba1b773c08f91d505e | 4,202 | py | Python | kitchen/migrations/0001_initial.py | DenerRodrigues/cheffapp-api | 498e9c96102f9bc777158b7aa07a99d89afa6a39 | [
"MIT"
] | 1 | 2020-03-23T03:21:43.000Z | 2020-03-23T03:21:43.000Z | kitchen/migrations/0001_initial.py | DenerRodrigues/cheffapp-api | 498e9c96102f9bc777158b7aa07a99d89afa6a39 | [
"MIT"
] | 5 | 2021-03-19T01:03:34.000Z | 2021-06-10T18:44:34.000Z | kitchen/migrations/0001_initial.py | DenerRodrigues/chefapp-api | 498e9c96102f9bc777158b7aa07a99d89afa6a39 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-23 02:58
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import multiselectfield.db.fields
| 65.65625 | 454 | 0.632556 | # Generated by Django 3.0.4 on 2020-03-23 02:58
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chef',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='Indicates that the record is active. Instead of deleting the record, uncheck this.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('last_update', models.DateTimeField(blank=True, null=True, verbose_name='last update')),
('name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Name')),
('description', models.CharField(default='', max_length=250, verbose_name='Description')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email address')),
('phone', models.CharField(blank=True, max_length=50, null=True, verbose_name='Phone')),
('address', jsonfield.fields.JSONField(blank=True, default={}, null=True, verbose_name='Address')),
('open_at', models.TimeField(verbose_name='Open at')),
('close_at', models.TimeField(verbose_name='Close at')),
('days_of_weak', multiselectfield.db.fields.MultiSelectField(choices=[('SUNDAY', 'Sunday'), ('MONDAY', 'Monday'), ('TUESDAY', 'Tuesday'), ('WEDNESDAY', 'Wednesday'), ('THURSDAY', 'Thursday'), ('FRIDAY', 'Friday'), ('SATURDAY', 'Saturday')], max_length=56, verbose_name='Days of weak')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='chefs', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Chef',
'verbose_name_plural': 'Chefs',
},
),
migrations.CreateModel(
name='FoodRecipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='Indicates that the record is active. Instead of deleting the record, uncheck this.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('last_update', models.DateTimeField(blank=True, null=True, verbose_name='last update')),
('name', models.CharField(blank=True, max_length=50, null=True, verbose_name='Name')),
('description', models.CharField(default='', max_length=250, verbose_name='Description')),
('category', models.CharField(choices=[('OTHERS', 'Others'), ('BRAZILIAN', 'Brazilian'), ('ARABIC', 'Arabic'), ('ASIAN', 'Asian'), ('MEXICAN', 'Mexican'), ('ITALIAN', 'Italian'), ('SCNACK', 'Snack'), ('PACKED_LUNCH', 'Packed lunch'), ('MEAT', 'Meat'), ('PIZZA', 'Pizza'), ('PASTA', 'Pasta'), ('FIT', 'Fit'), ('VEGETARIAN', 'Vegetarian'), ('VEGAN', 'Vegan'), ('DRINK', 'Drink')], default='OTHERS', max_length=50, verbose_name='Category')),
('price', models.DecimalField(decimal_places=2, max_digits=10, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='Price')),
('preparation_time', models.TimeField(verbose_name='Preparation time')),
('chef', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='food_recipes', to='kitchen.Chef')),
],
options={
'verbose_name': 'Food Recipe',
'verbose_name_plural': 'Food Recipes',
},
),
]
| 0 | 3,877 | 23 |
43519643fe820a6ddf49410c93da4ae995262d72 | 1,364 | py | Python | lesson23_projects/pen/data/state_gen_v23.py | muzudho/py-state-machine-practice | e31c066f4cf142b6b6c5ff273b56a0f89428c59e | [
"MIT"
] | null | null | null | lesson23_projects/pen/data/state_gen_v23.py | muzudho/py-state-machine-practice | e31c066f4cf142b6b6c5ff273b56a0f89428c59e | [
"MIT"
] | null | null | null | lesson23_projects/pen/data/state_gen_v23.py | muzudho/py-state-machine-practice | e31c066f4cf142b6b6c5ff273b56a0f89428c59e | [
"MIT"
] | null | null | null | """State Generator"""
from lesson17_projects.pen.auto_gen.data.const import A, INIT, IS, PEN, THIS
# States
from lesson18_projects.pen.auto_gen.code.states1.init_this_is_a import InitThisIsAState
from lesson18_projects.pen.auto_gen.code.states1.init_this_is import InitThisIsState
from lesson18_projects.pen.auto_gen.code.states1.init_this import InitThisState
from lesson18_projects.pen.auto_gen.code.states1.init import InitState
from lesson18_projects.pen.auto_gen.code.states1.pen import PenState
# State wrapper
from lesson18_projects.pen.code.states.init import create_init
from lesson18_projects.pen.code.states.init_this import create_init_this
from lesson18_projects.pen.code.states.init_this_is import create_init_this_is
from lesson18_projects.pen.code.states.init_this_is_a import create_init_this_is_a
from lesson18_projects.pen.code.states.pen import create_pen
# ステートを使い回すのではなく、アクセスするたびに ステートの生成を実行しなおせるよう、ラムダ関数を返します
state_gen_doc = {
INIT: {
"": lambda: create_init(InitState()),
THIS: {
"": lambda: create_init_this(InitThisState()),
IS: {
"": lambda: create_init_this_is(InitThisIsState()),
A: {
"": lambda: create_init_this_is_a(InitThisIsAState()),
},
},
},
},
PEN: lambda: create_pen(PenState()),
}
| 38.971429 | 87 | 0.736804 | """State Generator"""
from lesson17_projects.pen.auto_gen.data.const import A, INIT, IS, PEN, THIS
# States
from lesson18_projects.pen.auto_gen.code.states1.init_this_is_a import InitThisIsAState
from lesson18_projects.pen.auto_gen.code.states1.init_this_is import InitThisIsState
from lesson18_projects.pen.auto_gen.code.states1.init_this import InitThisState
from lesson18_projects.pen.auto_gen.code.states1.init import InitState
from lesson18_projects.pen.auto_gen.code.states1.pen import PenState
# State wrapper
from lesson18_projects.pen.code.states.init import create_init
from lesson18_projects.pen.code.states.init_this import create_init_this
from lesson18_projects.pen.code.states.init_this_is import create_init_this_is
from lesson18_projects.pen.code.states.init_this_is_a import create_init_this_is_a
from lesson18_projects.pen.code.states.pen import create_pen
# ステートを使い回すのではなく、アクセスするたびに ステートの生成を実行しなおせるよう、ラムダ関数を返します
state_gen_doc = {
INIT: {
"": lambda: create_init(InitState()),
THIS: {
"": lambda: create_init_this(InitThisState()),
IS: {
"": lambda: create_init_this_is(InitThisIsState()),
A: {
"": lambda: create_init_this_is_a(InitThisIsAState()),
},
},
},
},
PEN: lambda: create_pen(PenState()),
}
| 0 | 0 | 0 |
e82532bf39572b4ac9ff548252aec7862a4e8099 | 4,005 | py | Python | hard-gists/6003874/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/6003874/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/6003874/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | # hotkey_utils.py - bNull
#
# Some useful shortcuts for binding to hotkeys. Current output/hotkeys:
#
# [+] Bound make_dwords to Ctrl-Alt-D
# [+] Bound make_cstrings to Ctrl-Alt-A
# [+] Bound make_offset to Ctrl-Alt-O
import idaapi
import idc
import inspect
def selection_is_valid(selection, ea):
"""If the cursor is not at the beginning or the end of our selection, assume that
something bad has gone wrong and bail out instead of turning a lot of important
things into dwords.
"""
if not (ea == selection[1] or ea == selection[2]-1):
return False
else:
return True
def cool_to_clobber(ea):
"""Verify whether or not the byte is somethng that we'll regret clobbering at
some later point
"""
# Currently, just check to see if there's an instruction defined there.
# TODO: Check for additional things would not be cool-to-clobber.
if idc.GetMnem(ea):
return False
else:
return True
def get_selected_bytes():
"""Highlight a range and turn it into dwords
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
selected = idaapi.read_selection()
curr_ea = idc.ScreenEA()
print "[+] Processing range: %x - %x" % (selected[1],selected[2])
# refer to selection_is_valid comments regarding the need for this check
if (selection_is_valid(selected, curr_ea)):
return selected
else:
return None
def make_cstrings():
"""Highlight a range and turn it into c-style strings
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
# TODO check to verify that each byte is valid ascii
selected = get_selected_bytes()
if selected:
curr_start = selected[1]
curr_length = 0
for ea in range(selected[1], selected[2]):
if not cool_to_clobber(ea):
print "[-] Error: Something that we shouldn't clobber at 0x%x" % ea
break
curr_byte = idaapi.get_byte(ea)
curr_length += 1
if curr_byte == 0:
if curr_length > 1:
idaapi.doASCI(curr_start,curr_length)
curr_length = 0
curr_start = ea + 1
else:
curr_length = 0
curr_start = ea + 1
else:
print "[-] Error: EA is not currently a selection endpoint %x" % idc.ScreenEA()
def make_offset():
"""Resolve an offset to a pointer
For some reason, it seems as though IDA will not auto-define a pointer DWORD. Ex:
.rodata:08E30000 dd 8271234h
In the case that 0x8271234 is actually a function, resolving the offset will
result in:
.rodata:08E30000 dd offset _ZN29ClassAD1Ev ; ClassA::~ClassA()
"""
idc.OpOffset(idc.ScreenEA(),0)
load_hotkeys() | 33.375 | 87 | 0.602747 | # hotkey_utils.py - bNull
#
# Some useful shortcuts for binding to hotkeys. Current output/hotkeys:
#
# [+] Bound make_dwords to Ctrl-Alt-D
# [+] Bound make_cstrings to Ctrl-Alt-A
# [+] Bound make_offset to Ctrl-Alt-O
import idaapi
import idc
import inspect
def selection_is_valid(selection, ea):
"""If the cursor is not at the beginning or the end of our selection, assume that
something bad has gone wrong and bail out instead of turning a lot of important
things into dwords.
"""
if not (ea == selection[1] or ea == selection[2]-1):
return False
else:
return True
def cool_to_clobber(ea):
"""Verify whether or not the byte is somethng that we'll regret clobbering at
some later point
"""
# Currently, just check to see if there's an instruction defined there.
# TODO: Check for additional things would not be cool-to-clobber.
if idc.GetMnem(ea):
return False
else:
return True
def get_selected_bytes():
"""Highlight a range and turn it into dwords
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
selected = idaapi.read_selection()
curr_ea = idc.ScreenEA()
print "[+] Processing range: %x - %x" % (selected[1],selected[2])
# refer to selection_is_valid comments regarding the need for this check
if (selection_is_valid(selected, curr_ea)):
return selected
else:
return None
def make_dwords():
selected = get_selected_bytes()
if selected:
for ea in range(selected[1], selected[2], 4):
if not cool_to_clobber(ea):
print "[-] Error: Something that we shouldn't clobber at 0x%x" % ea
break
idaapi.doDwrd(ea,4)
print "[+] Processed %x" % ea
else:
print "[-] Error: EA is not currently a selection endpoint %x" % idc.ScreenEA()
def make_cstrings():
"""Highlight a range and turn it into c-style strings
NOTE: read_selection appears to be a fickle bitch. You absolutely have to
select more than one line at a time in order for it to work as expected.
"""
# TODO check to verify that each byte is valid ascii
selected = get_selected_bytes()
if selected:
curr_start = selected[1]
curr_length = 0
for ea in range(selected[1], selected[2]):
if not cool_to_clobber(ea):
print "[-] Error: Something that we shouldn't clobber at 0x%x" % ea
break
curr_byte = idaapi.get_byte(ea)
curr_length += 1
if curr_byte == 0:
if curr_length > 1:
idaapi.doASCI(curr_start,curr_length)
curr_length = 0
curr_start = ea + 1
else:
curr_length = 0
curr_start = ea + 1
else:
print "[-] Error: EA is not currently a selection endpoint %x" % idc.ScreenEA()
def make_offset():
"""Resolve an offset to a pointer
For some reason, it seems as though IDA will not auto-define a pointer DWORD. Ex:
.rodata:08E30000 dd 8271234h
In the case that 0x8271234 is actually a function, resolving the offset will
result in:
.rodata:08E30000 dd offset _ZN29ClassAD1Ev ; ClassA::~ClassA()
"""
idc.OpOffset(idc.ScreenEA(),0)
def load_hotkeys():
ENABLED_HOTKEYS = [
("Ctrl-Alt-D", make_dwords),
("Ctrl-Alt-A", make_cstrings),
("Ctrl-Alt-O", make_offset)
]
for func in ENABLED_HOTKEYS:
func_name = inspect.getmembers(func[1])[-1][1]
if idaapi.add_hotkey(func[0], func[1]):
print "[+] Bound %s to %s" % (func_name, func[0])
else:
print "[-] Error: Unable to bind %s to %s" % (func_name, func[0])
load_hotkeys() | 873 | 0 | 46 |
45bbe8eb7e60ebefb00c3d9a54ce29367cad6ba3 | 6,242 | py | Python | tests/chainer_tests/functions_tests/loss_tests/test_softmax_cross_entropy.py | muupan/chainer | 038c0d1195c9479335d4223f42dec8bc5830327a | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/loss_tests/test_softmax_cross_entropy.py | muupan/chainer | 038c0d1195c9479335d4223f42dec8bc5830327a | [
"MIT"
] | 1 | 2016-11-09T06:32:32.000Z | 2016-11-09T10:20:04.000Z | tests/chainer_tests/functions_tests/loss_tests/test_softmax_cross_entropy.py | muupan/chainer | 038c0d1195c9479335d4223f42dec8bc5830327a | [
"MIT"
] | null | null | null | import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
testing.run_module(__name__, __file__)
| 30.90099 | 77 | 0.627683 | import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4,)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
count = 0
for i in six.moves.range(y.shape[0]):
if self.t[i] == -1:
continue
loss_expect -= math.log(y[i, self.t[i]] / y[i].sum())
count += 1
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
def check_backward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.02)
gradient_check.assert_allclose(gx, x.grad, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
class TestReplicatedSoftmaxCrossEntropy1(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=True)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
count = 0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
if self.t[i, k] == -1:
continue
loss_expect -= math.log(
y[i, self.t[i, k], k] / y[i, :, k].sum())
count += 1
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
self.assertAlmostEqual(loss_expect, loss_value, places=4)
class TestReplicatedSoftmaxCrossEntropy2(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (4, 3, 2, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2, 5)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=False)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
for l in six.moves.range(y.shape[3]):
if self.t[i, k, l] == -1:
continue
loss_expect -= math.log(
y[i, self.t[i, k, l], k, l] / y[i, :, k, l].sum())
loss_expect /= y.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=4)
class TestSoftmaxCrossEntropyWithIgnoreLabel(TestSoftmaxCrossEntropy):
def setUp(self):
super(TestSoftmaxCrossEntropyWithIgnoreLabel, self).setUp()
self.t[2] = -1
class TestSoftmaxCrossEntropyIgnoreAll(TestSoftmaxCrossEntropy):
def setUp(self):
super(TestSoftmaxCrossEntropyIgnoreAll, self).setUp()
self.t[:] = -1
class TestReplicatedSoftmaxCrossEntropy1IgnoreLabel(
TestReplicatedSoftmaxCrossEntropy1):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy1IgnoreLabel, self).setUp()
self.t[0, 1] = -1
class TestReplicatedSoftmaxCrossEntropy2IgnoreLabel(
TestReplicatedSoftmaxCrossEntropy2):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy2IgnoreLabel, self).setUp()
self.t[0, 1, 2] = -1
class TestReplicatedSoftmaxCrossEntropy1IgnoreAll(
TestReplicatedSoftmaxCrossEntropy1):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy1IgnoreAll, self).setUp()
self.t[:] = -1
class TestReplicatedSoftmaxCrossEntropy2IgnoreAll(
TestReplicatedSoftmaxCrossEntropy2):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy2IgnoreAll, self).setUp()
self.t[:] = -1
testing.run_module(__name__, __file__)
| 4,500 | 957 | 477 |
b37784edfd5f06e8e68059d5166bf0c313cd8900 | 2,119 | py | Python | python/ql/test/query-tests/Security/CWE-022/tarslip.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/test/query-tests/Security/CWE-022/tarslip.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/test/query-tests/Security/CWE-022-TarSlip/tarslip.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z | #!/usr/bin/python
import tarfile
unsafe_filename_tar = sys.argv[1]
safe_filename_tar = "safe_path.tar"
tar = tarfile.open(safe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(unsafe_filename_tar)
tar.extractall()
tar.close()
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(safe_filename_tar)
tar.extractall()
tar.close()
#Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Part Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Unsanitized members
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=tar)
#Sanitize members
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=safemembers(tar))
# Wrong sanitizer (is missing not)
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
tar.extract(entry, "/tmp/unpack/")
# OK Sanitized using not
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or ".." in entry.name):
tar.extract(entry, "/tmp/unpack/")
# The following two variants are included by purpose, since by default there is a
# difference in handling `not x` and `not (x or False)` when overriding
# Sanitizer.sanitizingEdge. We want to ensure we handle both consistently.
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or False):
tar.extract(entry, "/tmp/unpack/")
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not os.path.isabs(entry.name):
tar.extract(entry, "/tmp/unpack/")
| 25.53012 | 81 | 0.710713 | #!/usr/bin/python
import tarfile
unsafe_filename_tar = sys.argv[1]
safe_filename_tar = "safe_path.tar"
tar = tarfile.open(safe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(unsafe_filename_tar)
tar.extractall()
tar.close()
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
tar.extract(entry)
tar = tarfile.open(safe_filename_tar)
tar.extractall()
tar.close()
#Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Part Sanitized
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if ".." in entry.name:
raise ValueError("Illegal tar archive entry")
tar.extract(entry, "/tmp/unpack/")
#Unsanitized members
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=tar)
#Sanitize members
def safemembers(members):
for info in members:
if badpath(info):
raise
yield info
tar = tarfile.open(unsafe_filename_tar)
tar.extractall(members=safemembers(tar))
# Wrong sanitizer (is missing not)
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if os.path.isabs(entry.name) or ".." in entry.name:
tar.extract(entry, "/tmp/unpack/")
# OK Sanitized using not
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or ".." in entry.name):
tar.extract(entry, "/tmp/unpack/")
# The following two variants are included by purpose, since by default there is a
# difference in handling `not x` and `not (x or False)` when overriding
# Sanitizer.sanitizingEdge. We want to ensure we handle both consistently.
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not (os.path.isabs(entry.name) or False):
tar.extract(entry, "/tmp/unpack/")
# Not reported, although vulnerable to '..'
tar = tarfile.open(unsafe_filename_tar)
for entry in tar:
if not os.path.isabs(entry.name):
tar.extract(entry, "/tmp/unpack/")
| 92 | 0 | 22 |
052feac4535cc396c41ce17d6bc31c6a6fbaaa90 | 1,332 | py | Python | Scraping/Scraping_Instagram/setup.py | ghassen1302/Interview_Code_Demonstration | fd9e2b313d3203e79e4f40bd52f82365508126d2 | [
"Apache-2.0"
] | null | null | null | Scraping/Scraping_Instagram/setup.py | ghassen1302/Interview_Code_Demonstration | fd9e2b313d3203e79e4f40bd52f82365508126d2 | [
"Apache-2.0"
] | null | null | null | Scraping/Scraping_Instagram/setup.py | ghassen1302/Interview_Code_Demonstration | fd9e2b313d3203e79e4f40bd52f82365508126d2 | [
"Apache-2.0"
] | null | null | null | import os
import io
import sys
import setuptools
from setuptools import setup
version = "1.0.0"
with io.open('README.md', 'r', encoding='utf-8') as readme_file:
readme = readme_file.read()
if sys.argv[-1] == 'readme':
print(readme)
sys.exit()
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('./requirements.txt')
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir) for ir in install_reqs if not str(ir).startswith("-") ]
setup(
name='insta_crawler',
version=version,
description=('Scraping Instagram profils'),
long_description=readme,
long_description_content_type='text/markdown',
author='Ghassen Chaabouni',
author_email='ghassen1302@live.com',
packages=[
'insta_crawler',
],
# packages= setuptools.find_packages(),
package_dir={'insta_crawler': 'insta_crawler'},
include_package_data=True,
install_requires=reqs,
license='MIT',
entry_points={
'console_scripts': [
'insta=insta_crawler.__main__:main',
]
}
)
| 26.117647 | 78 | 0.678679 | import os
import io
import sys
import setuptools
from setuptools import setup
version = "1.0.0"
with io.open('README.md', 'r', encoding='utf-8') as readme_file:
readme = readme_file.read()
if sys.argv[-1] == 'readme':
print(readme)
sys.exit()
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('./requirements.txt')
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir) for ir in install_reqs if not str(ir).startswith("-") ]
setup(
name='insta_crawler',
version=version,
description=('Scraping Instagram profils'),
long_description=readme,
long_description_content_type='text/markdown',
author='Ghassen Chaabouni',
author_email='ghassen1302@live.com',
packages=[
'insta_crawler',
],
# packages= setuptools.find_packages(),
package_dir={'insta_crawler': 'insta_crawler'},
include_package_data=True,
install_requires=reqs,
license='MIT',
entry_points={
'console_scripts': [
'insta=insta_crawler.__main__:main',
]
}
)
| 0 | 0 | 0 |
4ce072df82973ee5f57dfd07b392ad5b46a49611 | 560 | py | Python | meine_stadt_transparent/settings/env.py | cyroxx/meine-stadt-transparent | d5a3f03a29a1bb97ce50ac5257d8bbd5208d9218 | [
"MIT"
] | null | null | null | meine_stadt_transparent/settings/env.py | cyroxx/meine-stadt-transparent | d5a3f03a29a1bb97ce50ac5257d8bbd5208d9218 | [
"MIT"
] | null | null | null | meine_stadt_transparent/settings/env.py | cyroxx/meine-stadt-transparent | d5a3f03a29a1bb97ce50ac5257d8bbd5208d9218 | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
import environ
env = environ.Env()
env_file = ".env"
# This works good enough for the console, pycharm and travis ci
TESTING = sys.argv[1:2] == ["test"] or "pytest" in sys.modules
if env.str("ENV_PATH", None):
env_file = env.str("ENV_PATH")
assert Path(env_file).is_file()
elif TESTING:
# This anchoring allows to run tests below the project root
env_file = Path(__file__).parent.parent.parent.joinpath("etc/test.env")
assert env_file.is_file(), "The test env is missing"
env.read_env(str(env_file))
| 28 | 75 | 0.716071 | import sys
from pathlib import Path
import environ
env = environ.Env()
env_file = ".env"
# This works good enough for the console, pycharm and travis ci
TESTING = sys.argv[1:2] == ["test"] or "pytest" in sys.modules
if env.str("ENV_PATH", None):
env_file = env.str("ENV_PATH")
assert Path(env_file).is_file()
elif TESTING:
# This anchoring allows to run tests below the project root
env_file = Path(__file__).parent.parent.parent.joinpath("etc/test.env")
assert env_file.is_file(), "The test env is missing"
env.read_env(str(env_file))
| 0 | 0 | 0 |
e00d451c0b80119f3e71be00f9ef075bcca0ef84 | 110 | py | Python | aioldap/__init__.py | vladislavste/aioldap | 2f3f7f237c5bc27dbb5b42f6f97b262d61972dc1 | [
"Apache-2.0"
] | null | null | null | aioldap/__init__.py | vladislavste/aioldap | 2f3f7f237c5bc27dbb5b42f6f97b262d61972dc1 | [
"Apache-2.0"
] | null | null | null | aioldap/__init__.py | vladislavste/aioldap | 2f3f7f237c5bc27dbb5b42f6f97b262d61972dc1 | [
"Apache-2.0"
] | null | null | null | from .connection import LDAPConnection, Server
__all__ = ['LDAPConnection', 'Server']
__version__ = '0.4.2'
| 18.333333 | 46 | 0.736364 | from .connection import LDAPConnection, Server
__all__ = ['LDAPConnection', 'Server']
__version__ = '0.4.2'
| 0 | 0 | 0 |
12a2916e8aabcbbdd3563b658841e139b5d828e9 | 10,012 | py | Python | craid/bgsBuddy/load.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | null | null | null | craid/bgsBuddy/load.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | 2 | 2020-05-28T13:30:08.000Z | 2020-06-02T14:12:04.000Z | craid/bgsBuddy/load.py | HausReport/ClubRaiders | 88bd64d2512302ca2b391b48979b6e88b092eb92 | [
"BSD-3-Clause"
] | null | null | null | """
Example EDMC plugin.
It adds a single button to the EDMC interface that displays the number of times it has been clicked.
"""
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import tkinter as tk
from typing import Optional
try:
import myNotebook as nb
from config import appname, config
except ImportError:
pass
import GlobalDictionaries
from helpers.DiscordReporter import DiscordReporter
GlobalDictionaries.init_logger()
GlobalDictionaries.load_addresses()
from helpers.DailyPlan import DailyPlan
from helpers.DailyPlans import DailyPlans
from helpers.LogReporter import LogReporter
logger = GlobalDictionaries.logger
logReporter: LogReporter = LogReporter(logger)
logger.info("Test log msg")
logging.info("This is a second log msg")
class BgsBuddy:
"""
ClickCounter implements the EDMC plugin interface.
It adds a button to the EDMC UI that displays the number of times it has been clicked, and a preference to set
the number directly.
"""
def on_load(self) -> str:
"""
on_load is called by plugin_start3 below.
It is the first point EDMC interacts with our code after loading our module.
:return: The name of the plugin, which will be used by EDMC for logging and for the settings window
"""
return GlobalDictionaries.plugin_name
def on_unload(self) -> None:
"""
on_unload is called by plugin_stop below.
It is the last thing called before EDMC shuts down. :1
Note that blocking code here will hold the shutdown process.
"""
self.on_preferences_closed("", False) # Save our prefs
def setup_preferences(self, parent: nb.Notebook, cmdr: str, is_beta: bool) -> Optional[tk.Frame]:
"""
setup_preferences is called by plugin_prefs below.
It is where we can setup our own settings page in EDMC's settings window. Our tab is defined for us.
:param parent: the tkinter parent that our returned Frame will want to inherit from
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
:return: The frame to add to the settings window
"""
current_row = 0
frame = nb.Frame(parent)
# setup our config in a "Click Count: number"
nb.Label(frame, text='Click Count').grid(row=current_row)
nb.Entry(frame, textvariable=self.click_count).grid(row=current_row, column=1)
current_row += 1 # Always increment our row counter, makes for far easier tkinter design.
return frame
def on_preferences_closed(self, cmdr: str, is_beta: bool) -> None:
"""
on_preferences_closed is called by prefs_changed below.
It is called when the preferences dialog is dismissed by the user.
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
"""
config.set('click_counter_count', self.click_count.get())
def setup_main_ui(self, parent: tk.Frame) -> tk.Frame:
"""
Create our entry on the main EDMC UI.
This is called by plugin_app below.
:param parent: EDMC main window Tk
:return: Our frame
"""
current_row = 0
frame = tk.Frame(parent)
button = tk.Button(
frame,
text="Count me",
command=lambda: self.click_count.set(str(int(self.click_count.get()) + 1))
)
button.grid(row=current_row)
current_row += 1
nb.Label(frame, text="Count:").grid(row=current_row, sticky=tk.W)
nb.Label(frame, textvariable=self.click_count).grid(row=current_row, column=1)
return frame
cmdrNameSet = False
cc = BgsBuddy()
samplePlan: DailyPlan = DailyPlan("LHS 2477", "Federal Reclamation Co", "Hodack Prison Colony")
samplePlan.addMissionInfluenceGoal(60)
samplePlan.addBountyGoal(16000000)
samplePlan.addCartographyGoal(8000000)
samplePlan.addTradeProfitGoal(16000000)
samplePlan2: DailyPlan = DailyPlan("HR 5975", "Beyond Infinity Corporation", "Wreaken Construction")
samplePlan2.addMissionInfluenceGoal(60)
samplePlan2.addBountyGoal(16000000)
samplePlan2.addCartographyGoal(8000000)
samplePlan2.addTradeProfitGoal(16000000)
samplePlan3: DailyPlan = DailyPlan("LAWD 26", "Minutemen", "Sirius Corporation")
samplePlan3.addMissionInfluenceGoal(90)
samplePlan3.addBountyGoal(16000000)
samplePlan3.addCartographyGoal(8000000)
samplePlan3.addTradeProfitGoal(0)
samplePlan3.addTradeLossGoal(16000000)
samplePlan3.addMurderGoal(32)
samplePlan3.setHookUrl("https://discordapp.com/api/webhooks/785228043128012820/uFmUix9PqWhh1cAoYYx1Hsh43VVmGPwCnNQlq5is1vBhqKUTeC2h0-VgDXfmQttuq9UX")
dailyPlans: DailyPlans = DailyPlans(logReporter)
dailyPlans.addPlan(samplePlan)
dailyPlans.addPlan(samplePlan2)
dailyPlans.addPlan(samplePlan3)
disco = DiscordReporter(logger)
dailyPlans.addReporter(disco)
#
# Direct EDMC callbacks to class
#
# Note that all of these could be simply replaced with something like:
# plugin_start3 = cc.on_load
| 39.262745 | 149 | 0.694966 | """
Example EDMC plugin.
It adds a single button to the EDMC interface that displays the number of times it has been clicked.
"""
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import tkinter as tk
from typing import Optional
try:
import myNotebook as nb
from config import appname, config
except ImportError:
pass
import GlobalDictionaries
from helpers.DiscordReporter import DiscordReporter
GlobalDictionaries.init_logger()
GlobalDictionaries.load_addresses()
from helpers.DailyPlan import DailyPlan
from helpers.DailyPlans import DailyPlans
from helpers.LogReporter import LogReporter
logger = GlobalDictionaries.logger
logReporter: LogReporter = LogReporter(logger)
logger.info("Test log msg")
logging.info("This is a second log msg")
class BgsBuddy:
"""
ClickCounter implements the EDMC plugin interface.
It adds a button to the EDMC UI that displays the number of times it has been clicked, and a preference to set
the number directly.
"""
def __init__(self) -> None:
# Be sure to use names that wont collide in our config variables
self.click_count: Optional[tk.StringVar] = tk.StringVar(value=str(config.getint('click_counter_count')))
logger.info("BGS Buddy instantiated")
def on_load(self) -> str:
"""
on_load is called by plugin_start3 below.
It is the first point EDMC interacts with our code after loading our module.
:return: The name of the plugin, which will be used by EDMC for logging and for the settings window
"""
return GlobalDictionaries.plugin_name
def on_unload(self) -> None:
"""
on_unload is called by plugin_stop below.
It is the last thing called before EDMC shuts down. :1
Note that blocking code here will hold the shutdown process.
"""
self.on_preferences_closed("", False) # Save our prefs
def setup_preferences(self, parent: nb.Notebook, cmdr: str, is_beta: bool) -> Optional[tk.Frame]:
"""
setup_preferences is called by plugin_prefs below.
It is where we can setup our own settings page in EDMC's settings window. Our tab is defined for us.
:param parent: the tkinter parent that our returned Frame will want to inherit from
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
:return: The frame to add to the settings window
"""
current_row = 0
frame = nb.Frame(parent)
# setup our config in a "Click Count: number"
nb.Label(frame, text='Click Count').grid(row=current_row)
nb.Entry(frame, textvariable=self.click_count).grid(row=current_row, column=1)
current_row += 1 # Always increment our row counter, makes for far easier tkinter design.
return frame
def on_preferences_closed(self, cmdr: str, is_beta: bool) -> None:
"""
on_preferences_closed is called by prefs_changed below.
It is called when the preferences dialog is dismissed by the user.
:param cmdr: The current ED Commander
:param is_beta: Whether or not EDMC is currently marked as in beta mode
"""
config.set('click_counter_count', self.click_count.get())
def setup_main_ui(self, parent: tk.Frame) -> tk.Frame:
"""
Create our entry on the main EDMC UI.
This is called by plugin_app below.
:param parent: EDMC main window Tk
:return: Our frame
"""
current_row = 0
frame = tk.Frame(parent)
button = tk.Button(
frame,
text="Count me",
command=lambda: self.click_count.set(str(int(self.click_count.get()) + 1))
)
button.grid(row=current_row)
current_row += 1
nb.Label(frame, text="Count:").grid(row=current_row, sticky=tk.W)
nb.Label(frame, textvariable=self.click_count).grid(row=current_row, column=1)
return frame
cmdrNameSet = False
cc = BgsBuddy()
samplePlan: DailyPlan = DailyPlan("LHS 2477", "Federal Reclamation Co", "Hodack Prison Colony")
samplePlan.addMissionInfluenceGoal(60)
samplePlan.addBountyGoal(16000000)
samplePlan.addCartographyGoal(8000000)
samplePlan.addTradeProfitGoal(16000000)
samplePlan2: DailyPlan = DailyPlan("HR 5975", "Beyond Infinity Corporation", "Wreaken Construction")
samplePlan2.addMissionInfluenceGoal(60)
samplePlan2.addBountyGoal(16000000)
samplePlan2.addCartographyGoal(8000000)
samplePlan2.addTradeProfitGoal(16000000)
samplePlan3: DailyPlan = DailyPlan("LAWD 26", "Minutemen", "Sirius Corporation")
samplePlan3.addMissionInfluenceGoal(90)
samplePlan3.addBountyGoal(16000000)
samplePlan3.addCartographyGoal(8000000)
samplePlan3.addTradeProfitGoal(0)
samplePlan3.addTradeLossGoal(16000000)
samplePlan3.addMurderGoal(32)
samplePlan3.setHookUrl("https://discordapp.com/api/webhooks/785228043128012820/uFmUix9PqWhh1cAoYYx1Hsh43VVmGPwCnNQlq5is1vBhqKUTeC2h0-VgDXfmQttuq9UX")
dailyPlans: DailyPlans = DailyPlans(logReporter)
dailyPlans.addPlan(samplePlan)
dailyPlans.addPlan(samplePlan2)
dailyPlans.addPlan(samplePlan3)
disco = DiscordReporter(logger)
dailyPlans.addReporter(disco)
#
# Direct EDMC callbacks to class
#
# Note that all of these could be simply replaced with something like:
# plugin_start3 = cc.on_load
def plugin_start3(plugin_dir: str) -> str:
return cc.on_load()
def plugin_stop() -> None:
return cc.on_unload()
def plugin_prefs(parent: nb.Notebook, cmdr: str, is_beta: bool) -> Optional[tk.Frame]:
return cc.setup_preferences(parent, cmdr, is_beta)
def prefs_changed(cmdr: str, is_beta: bool) -> None:
return cc.on_preferences_closed(cmdr, is_beta)
def plugin_app(parent: tk.Frame) -> Optional[tk.Frame]:
return cc.setup_main_ui(parent)
def journal_entry(cmdr, is_beta, system, station, entry, state):
event = entry['event']
if not cmdrNameSet:
dailyPlans.setCommanderName(cmdr)
if event == 'Docked' or (event == 'Location' and entry['Docked'] == True):
stationFaction = entry['StationFaction']
systemAddress = str(entry['SystemAddress'])
systemName = entry['StarSystem']
stationFactionName = stationFaction['Name']
dailyPlans.setCurrentSystem(systemName)
dailyPlans.setCurrentStation(station)
dailyPlans.setCurrentStationFaction(stationFactionName)
GlobalDictionaries.add_system_and_address(systemName, systemAddress)
logger.info(f"Docked: Setting system={systemName}, station={station}, stationFaction={stationFaction}.")
GlobalDictionaries.clear_target_dictionary()
elif event == 'Undocked':
dailyPlans.setCurrentStation(None)
dailyPlans.setCurrentStationFaction(None)
logger.info("Undocked: Setting station & stationFaction to none.")
elif event == 'Location':
systemName = entry['StarSystem']
systemAddress = str(entry['SystemAddress'])
dailyPlans.setCurrentSystem(systemName)
dailyPlans.setCurrentStation(None)
dailyPlans.setCurrentStationFaction(None)
GlobalDictionaries.add_system_and_address(systemName, systemAddress)
logger.info(f"Other location: Setting system={systemName}, station=None, stationFaction=None.")
elif event == 'MissionCompleted': # get mission influence value
dailyPlans.checkMissionSuccess(entry)
logger.info(f"Mission completed.")
elif (event == 'SellExplorationData') or (event == 'MultiSellExplorationData'): # get carto data value
dailyPlans.checkCartography(entry)
logger.info(f"Sell Exploration Data.")
elif event == 'RedeemVoucher' and entry['Type'] == 'bounty': # bounties collected
dailyPlans.checkBounty(entry)
logger.info(f"Redeem Bounty.")
elif event == 'MarketSell': # Trade Profit
dailyPlans.checkTrade(entry)
logger.info(f"Trade.")
elif event == 'ShipTargeted': # Target ship
if 'PilotName_Localised' in entry and 'Faction' in entry:
pilotName = entry['PilotName_Localised']
pilotFaction = entry['Faction']
logger.info(f"Targeted: {pilotName} from {pilotFaction}")
GlobalDictionaries.add_target_faction(pilotName, pilotFaction)
elif event == 'CommitCrime' and entry['CrimeType']=='murder': # Clean Murder
dailyPlans.checkMurder(entry)
elif event == 'FSDJump' or event == 'CarrierJump': # get factions at jump
#
# Update system stuff
#
systemName = entry['StarSystem']
systemAddress = str(entry['SystemAddress'])
dailyPlans.setCurrentSystem(systemName)
dailyPlans.setCurrentStation(None)
dailyPlans.setCurrentStationFaction(None)
GlobalDictionaries.add_system_and_address(systemName, systemAddress)
logger.info(f"{event}: Setting system={systemName}, station=None, stationFaction=None.")
GlobalDictionaries.clear_target_dictionary()
# FIXME: Not sure we'd need list of local faction names
# FIXME: Having a list of faction states, however would be useful for
# boom/investment bonuses, detecting war/civil war/exotic states
#
# Update faction stuff
#
# this.FactionNames = []
# this.FactionStates = {'Factions': []}
# z = 0
# for i in entry['Factions']:
# if i['Name'] == "Pilots' Federation Local Branch":
# continue
#
# this.FactionNames.append(i['Name'])
# this.FactionStates['Factions'].append(
# {'Faction': i['Name'], 'Happiness': i['Happiness_Localised'], 'States': []})
#
# try:
# for x in i['ActiveStates']:
# this.FactionStates['Factions'][z]['States'].append({'State': x['State']})
# except KeyError:
# this.FactionStates['Factions'][z]['States'].append({'State': 'None'})
# z += 1
| 4,653 | 0 | 164 |
0cb168472c1c55ed0c742b217667a02bb1fbb4f1 | 6,832 | py | Python | conda_build/jinja_context.py | mbargull/conda-build | ebc56f48196774301863fecbe98a32a7ded6eb7e | [
"BSD-3-Clause"
] | null | null | null | conda_build/jinja_context.py | mbargull/conda-build | ebc56f48196774301863fecbe98a32a7ded6eb7e | [
"BSD-3-Clause"
] | null | null | null | conda_build/jinja_context.py | mbargull/conda-build | ebc56f48196774301863fecbe98a32a7ded6eb7e | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
from functools import partial
import json
import logging
import os
import sys
import jinja2
from .conda_interface import PY3
from .environ import get_dict as get_environ
from .metadata import select_lines, ns_cfg
log = logging.getLogger(__file__)
class UndefinedNeverFail(jinja2.Undefined):
"""
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and
{{ MY_UNDEFINED_VAR|int }}. This can mask lots of errors in jinja templates, so it
should only be used for a first-pass parse, when you plan on running a 'strict'
second pass later.
"""
all_undefined_names = []
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda self, *args, **kwargs: UndefinedNeverFail(hint=self._undefined_hint,
obj=self._undefined_obj,
name=self._undefined_name,
exc=self._undefined_exception)
__str__ = __repr__ = \
lambda *args, **kwargs: u''
__int__ = lambda _: 0
__float__ = lambda _: 0.0
class FilteredLoader(jinja2.BaseLoader):
"""
A pass-through for the given loader, except that the loaded source is
filtered according to any metadata selectors in the source text.
"""
def context_processor(initial_metadata, recipe_dir, config, permit_undefined_jinja):
"""
Return a dictionary to use as context for jinja templates.
initial_metadata: Augment the context with values from this MetaData object.
Used to bootstrap metadata contents via multiple parsing passes.
"""
ctx = get_environ(config=config, m=initial_metadata)
environ = dict(os.environ)
environ.update(get_environ(config=config, m=initial_metadata))
ctx.update(
load_setup_py_data=partial(load_setup_py_data, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
# maintain old alias for backwards compatibility:
load_setuptools=partial(load_setuptools, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
load_npm=load_npm,
environ=environ)
return ctx
| 37.745856 | 100 | 0.650615 | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
from functools import partial
import json
import logging
import os
import sys
import jinja2
from .conda_interface import PY3
from .environ import get_dict as get_environ
from .metadata import select_lines, ns_cfg
log = logging.getLogger(__file__)
class UndefinedNeverFail(jinja2.Undefined):
"""
A class for Undefined jinja variables.
This is even less strict than the default jinja2.Undefined class,
because it permits things like {{ MY_UNDEFINED_VAR[:2] }} and
{{ MY_UNDEFINED_VAR|int }}. This can mask lots of errors in jinja templates, so it
should only be used for a first-pass parse, when you plan on running a 'strict'
second pass later.
"""
all_undefined_names = []
def __init__(self, hint=None, obj=jinja2.runtime.missing, name=None,
exc=jinja2.exceptions.UndefinedError):
UndefinedNeverFail.all_undefined_names.append(name)
jinja2.Undefined.__init__(self, hint, obj, name, exc)
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
__complex__ = __pow__ = __rpow__ = \
lambda self, *args, **kwargs: UndefinedNeverFail(hint=self._undefined_hint,
obj=self._undefined_obj,
name=self._undefined_name,
exc=self._undefined_exception)
__str__ = __repr__ = \
lambda *args, **kwargs: u''
__int__ = lambda _: 0
__float__ = lambda _: 0.0
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
return UndefinedNeverFail(hint=self._undefined_hint,
obj=self._undefined_obj,
name=self._undefined_name + '.' + k,
exc=self._undefined_exception)
class FilteredLoader(jinja2.BaseLoader):
"""
A pass-through for the given loader, except that the loaded source is
filtered according to any metadata selectors in the source text.
"""
def __init__(self, unfiltered_loader, config):
self._unfiltered_loader = unfiltered_loader
self.list_templates = unfiltered_loader.list_templates
self.config = config
def get_source(self, environment, template):
contents, filename, uptodate = self._unfiltered_loader.get_source(environment,
template)
return select_lines(contents, ns_cfg(self.config)), filename, uptodate
def load_setup_py_data(config, setup_file='setup.py', from_recipe_dir=False, recipe_dir=None,
permit_undefined_jinja=True):
_setuptools_data = {}
def setup(**kw):
_setuptools_data.update(kw)
import setuptools
import distutils.core
cd_to_work = False
path_backup = sys.path
if from_recipe_dir and recipe_dir:
setup_file = os.path.abspath(os.path.join(recipe_dir, setup_file))
elif os.path.exists(config.work_dir):
cd_to_work = True
cwd = os.getcwd()
os.chdir(config.work_dir)
if not os.path.isabs(setup_file):
setup_file = os.path.join(config.work_dir, setup_file)
# this is very important - or else if versioneer or otherwise is in the start folder,
# things will pick up the wrong versioneer/whatever!
sys.path.insert(0, config.work_dir)
else:
message = ("Did not find setup.py file in manually specified location, and source "
"not downloaded yet.")
if permit_undefined_jinja:
log.debug(message)
return {}
else:
raise RuntimeError(message)
# Patch setuptools, distutils
setuptools_setup = setuptools.setup
distutils_setup = distutils.core.setup
numpy_setup = None
try:
import numpy.distutils.core
numpy_setup = numpy.distutils.core.setup
numpy.distutils.core.setup = setup
except ImportError:
log.debug("Failed to import numpy for setup patch. Is numpy installed?")
setuptools.setup = distutils.core.setup = setup
ns = {
'__name__': '__main__',
'__doc__': None,
'__file__': setup_file,
}
if os.path.isfile(setup_file):
code = compile(open(setup_file).read(), setup_file, 'exec', dont_inherit=1)
exec(code, ns, ns)
distutils.core.setup = distutils_setup
setuptools.setup = setuptools_setup
if numpy_setup:
numpy.distutils.core.setup = numpy_setup
if cd_to_work:
os.chdir(cwd)
# remove our workdir from sys.path
sys.path = path_backup
return _setuptools_data if _setuptools_data else None
def load_setuptools(config, setup_file='setup.py', from_recipe_dir=False, recipe_dir=None,
permit_undefined_jinja=True):
log.warn("Deprecation notice: the load_setuptools function has been renamed to "
"load_setup_py_data. load_setuptools will be removed in a future release.")
return load_setup_py_data(config=config, setup_file=setup_file, from_recipe_dir=from_recipe_dir,
recipe_dir=recipe_dir, permit_undefined_jinja=permit_undefined_jinja)
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor(initial_metadata, recipe_dir, config, permit_undefined_jinja):
"""
Return a dictionary to use as context for jinja templates.
initial_metadata: Augment the context with values from this MetaData object.
Used to bootstrap metadata contents via multiple parsing passes.
"""
ctx = get_environ(config=config, m=initial_metadata)
environ = dict(os.environ)
environ.update(get_environ(config=config, m=initial_metadata))
ctx.update(
load_setup_py_data=partial(load_setup_py_data, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
# maintain old alias for backwards compatibility:
load_setuptools=partial(load_setuptools, config=config, recipe_dir=recipe_dir,
permit_undefined_jinja=permit_undefined_jinja),
load_npm=load_npm,
environ=environ)
return ctx
| 3,898 | 0 | 177 |
caf0c7d25879744f325117bb5c6962bdc25ccef8 | 1,383 | py | Python | publications/PrADA/experiments/income_census/train_census_no_fg_target_finetune.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 49 | 2020-11-04T03:15:59.000Z | 2022-03-23T12:21:15.000Z | publications/PrADA/experiments/income_census/train_census_no_fg_target_finetune.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 2 | 2021-09-12T02:36:42.000Z | 2021-11-25T13:19:58.000Z | publications/PrADA/experiments/income_census/train_census_no_fg_target_finetune.py | UMDataScienceLab/research | 279ee21444817903cb9ef9dc9d9583a502865336 | [
"Apache-2.0"
] | 11 | 2020-11-11T12:14:49.000Z | 2022-03-08T16:17:05.000Z | import argparse
from experiments.income_census.train_config import fine_tune_hyperparameters, data_hyperparameters
from experiments.income_census.train_census_no_fg_adapt_pretrain import create_no_fg_census_global_model
from experiments.income_census.train_census_utils import finetune_census
if __name__ == "__main__":
parser = argparse.ArgumentParser("census_no-fg_target_fine_tune")
parser.add_argument('--pretrain_task_id', type=str)
args = parser.parse_args()
pretrain_task_id = args.pretrain_task_id
print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}")
census_pretain_model_root_dir = data_hyperparameters['census_no-fg_pretrained_model_dir']
init_model, census_finetune_target_model_root_dir = get_finetune_model_meta()
task_id = finetune_census(pretrain_task_id,
census_pretain_model_root_dir,
census_finetune_target_model_root_dir,
fine_tune_hyperparameters,
data_hyperparameters,
init_model)
print(f"[INFO] finetune task id:{task_id}")
| 44.612903 | 104 | 0.739696 | import argparse
from experiments.income_census.train_config import fine_tune_hyperparameters, data_hyperparameters
from experiments.income_census.train_census_no_fg_adapt_pretrain import create_no_fg_census_global_model
from experiments.income_census.train_census_utils import finetune_census
def get_finetune_model_meta():
finetune_target_root_dir = data_hyperparameters['census_no-fg_ft_target_model_dir']
model = create_no_fg_census_global_model()
return model, finetune_target_root_dir
if __name__ == "__main__":
parser = argparse.ArgumentParser("census_no-fg_target_fine_tune")
parser.add_argument('--pretrain_task_id', type=str)
args = parser.parse_args()
pretrain_task_id = args.pretrain_task_id
print(f"[INFO] fine-tune pre-trained model with pretrain task id : {pretrain_task_id}")
census_pretain_model_root_dir = data_hyperparameters['census_no-fg_pretrained_model_dir']
init_model, census_finetune_target_model_root_dir = get_finetune_model_meta()
task_id = finetune_census(pretrain_task_id,
census_pretain_model_root_dir,
census_finetune_target_model_root_dir,
fine_tune_hyperparameters,
data_hyperparameters,
init_model)
print(f"[INFO] finetune task id:{task_id}")
| 187 | 0 | 23 |
52002f1ba9dded1bc6812a61610c05b8c8517de5 | 1,960 | py | Python | gmid2/scripts/st_wmbmm_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | null | null | null | gmid2/scripts/st_wmbmm_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | null | null | null | gmid2/scripts/st_wmbmm_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | 1 | 2020-12-28T20:06:37.000Z | 2020-12-28T20:06:37.000Z | PRJ_PATH = "/home/junkyul/conda/gmid2"
import sys
sys.path.append(PRJ_PATH)
import os
import time
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
from gmid2.global_constants import *
from gmid2.basics.uai_files import read_limid, read_svo
from gmid2.basics.directed_network import DecisionNetwork
from gmid2.basics.graphical_model import GraphicalModel
from gmid2.inference.submodel import submodel_tree_decomposition
from gmid2.inference.st_wmbmm_bw import StWMBMMBw
if __name__ == "__main__":
if len(sys.argv) > 1:
file_path = sys.argv[1]
ibound = int(sys.argv[2])
else:
TEST_PATH = os.path.join(BENCHMARK_DIR, "synthetic")
f = "mdp1-4_2_2_5.uai"
file_path = os.path.join(TEST_PATH, f)
ibound = 1
run(file_path, ibound)
| 30.625 | 103 | 0.672959 | PRJ_PATH = "/home/junkyul/conda/gmid2"
import sys
sys.path.append(PRJ_PATH)
import os
import time
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
from gmid2.global_constants import *
from gmid2.basics.uai_files import read_limid, read_svo
from gmid2.basics.directed_network import DecisionNetwork
from gmid2.basics.graphical_model import GraphicalModel
from gmid2.inference.submodel import submodel_tree_decomposition
from gmid2.inference.st_wmbmm_bw import StWMBMMBw
def run(file_path, ibound):
print("{}\t\t{}".format(StWMBMMBw.__name__, ibound))
f = file_path.split("/")[-1].replace(".uai", "")
print("\nSTART {}\t\t{}".format(f, time.ctime(time.time())))
file_name = file_path.replace(".uai", "")
file_info = read_limid(file_name, skip_table=False)
gm = GraphicalModel()
gm.build(file_info)
gm.convert_prob_to_log() # conversion is done here!
gm.convert_util_to_alpha(1.0)
dn = DecisionNetwork()
dn.build(file_info)
t0 = time.time()
st = submodel_tree_decomposition(dn)
print("st\t\t{}".format(time.time()-t0))
print("roots\t\t{}".format(len(dn.value_nids) + 1))
st_mp = StWMBMMBw(gm, st, i_bound=ibound, alpha=1.0) # __init__ for message passing and nx.DiGraph
t0 = time.time()
st_mp.build_message_graph()
print("build\t\t{}".format(time.time() - t0))
st_mp.schedule()
st_mp.init_propagate()
t0 = time.time()
st_mp.propagate_iter()
bound = st_mp.bounds()
print("prop\t\t{}".format(time.time() - t0))
print("ub\t\t{}".format(bound))
print("END {}\t\t{}".format(f, time.ctime(time.time())))
return bound
if __name__ == "__main__":
if len(sys.argv) > 1:
file_path = sys.argv[1]
ibound = int(sys.argv[2])
else:
TEST_PATH = os.path.join(BENCHMARK_DIR, "synthetic")
f = "mdp1-4_2_2_5.uai"
file_path = os.path.join(TEST_PATH, f)
ibound = 1
run(file_path, ibound)
| 1,135 | 0 | 23 |
ce9dcc518fa49600c1232fcccf8ba47b2963da17 | 3,150 | py | Python | django_project/blog/views.py | nikki259/Blog-Made-Easy | cebc418101fc9e9638fb05d0d59895abf51ea695 | [
"bzip2-1.0.6"
] | null | null | null | django_project/blog/views.py | nikki259/Blog-Made-Easy | cebc418101fc9e9638fb05d0d59895abf51ea695 | [
"bzip2-1.0.6"
] | null | null | null | django_project/blog/views.py | nikki259/Blog-Made-Easy | cebc418101fc9e9638fb05d0d59895abf51ea695 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
from gensim.summarization import keywords
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
| 28.636364 | 157 | 0.681905 | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post
from gensim.summarization import keywords
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer
def home(request):
context = {
'mlp': Post.objects.all()
}
return render(request, 'blog/home.html', context)
class PostListView(ListView):
model = Post
template_name = 'blog/home.html' # <app>/<model>_<viewtype>.html
context_object_name = 'mlp'
ordering = ['-date_posted']
paginate_by = 4
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
paginate_by = 2
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
content = form.instance.content
# print('\nKeywords:')
parser = PlaintextParser.from_string(content, Tokenizer("english"))
summarizer = LexRankSummarizer()
summary = summarizer(parser.document, 2)
contextsummary = ''
for sentence in summary:
contextsummary += str(sentence)
form.instance.text_summary = contextsummary
form.instance.keywords = keywords(content, ratio=0.1)
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
def contact(request):
return render(request,'blog/contact.html',{'title': 'Contact'})
def search_post(request):
if request.method == 'POST':
data = request.POST.get('search_query')
context = {
'mlp': Post.objects.filter(keywords__icontains=data).order_by('date_posted') | Post.objects.filter(title__icontains=data).order_by('date_posted')
}
return render(request, 'blog/search_results.html', context)
| 1,621 | 781 | 230 |
808cbc7d31f0d2080f943c3d421209db59ecc136 | 996 | py | Python | get_coords.py | mdiscenza/gallery_hop_2 | b4cb35db336d7fa1d49d1c392610743ee2da6ff4 | [
"MIT"
] | null | null | null | get_coords.py | mdiscenza/gallery_hop_2 | b4cb35db336d7fa1d49d1c392610743ee2da6ff4 | [
"MIT"
] | null | null | null | get_coords.py | mdiscenza/gallery_hop_2 | b4cb35db336d7fa1d49d1c392610743ee2da6ff4 | [
"MIT"
] | null | null | null | import mysql.connector
from geopy.geocoders import Nominatim
from instagram.client import InstagramAPI
import json
import urllib2
import flickr
INSTAGRAM_CLIENT_ID = '5d56eb1e594c420997c394d1dca7fcea'
INSTAGRAM_CLIENT_SECRET = 'd0d78baa1e4e4f4b8af9fd9588379968'
api = InstagramAPI(client_id=INSTAGRAM_CLIENT_ID,client_secret=INSTAGRAM_CLIENT_SECRET)
cnx = mysql.connector.connect(user='galleryhop', password='galleryhop', host='galleryhop2.crflf9mu2uwj.us-east-1.rds.amazonaws.com',database='galleryhop2')
cursor = cnx.cursor()
cursor.execute("""select * from galleries""")
geolocator = Nominatim()
coords = []
for row in cursor:
try:
location = geolocator.geocode(row[5]+' NYC')
coords.append((location.latitude,location.longitude))
except:
print 'error'
print coords
for i in coords:
photos = flickr.photos_search(lat=i[0],lon=i[1],per_page=5,radius=0.25)
for p in photos:
str = 'https://farm'+p.farm+'.staticflickr.com/'+p.server+'/'+p.id+'_'+p.secret+'.jpg'
print str
| 26.918919 | 155 | 0.768072 | import mysql.connector
from geopy.geocoders import Nominatim
from instagram.client import InstagramAPI
import json
import urllib2
import flickr
INSTAGRAM_CLIENT_ID = '5d56eb1e594c420997c394d1dca7fcea'
INSTAGRAM_CLIENT_SECRET = 'd0d78baa1e4e4f4b8af9fd9588379968'
api = InstagramAPI(client_id=INSTAGRAM_CLIENT_ID,client_secret=INSTAGRAM_CLIENT_SECRET)
cnx = mysql.connector.connect(user='galleryhop', password='galleryhop', host='galleryhop2.crflf9mu2uwj.us-east-1.rds.amazonaws.com',database='galleryhop2')
cursor = cnx.cursor()
cursor.execute("""select * from galleries""")
geolocator = Nominatim()
coords = []
for row in cursor:
try:
location = geolocator.geocode(row[5]+' NYC')
coords.append((location.latitude,location.longitude))
except:
print 'error'
print coords
for i in coords:
photos = flickr.photos_search(lat=i[0],lon=i[1],per_page=5,radius=0.25)
for p in photos:
str = 'https://farm'+p.farm+'.staticflickr.com/'+p.server+'/'+p.id+'_'+p.secret+'.jpg'
print str
| 0 | 0 | 0 |
74265566e707c2cb9dfce9f512df8b2fd9a4d504 | 1,481 | py | Python | calamari_ocr/ocr/backends/ctc_decoder/fuzzy_ctc_decoder.py | Nesbi/calamari | 25eb872118d15d0740f702ef42ef6f785e1a5858 | [
"Apache-2.0"
] | null | null | null | calamari_ocr/ocr/backends/ctc_decoder/fuzzy_ctc_decoder.py | Nesbi/calamari | 25eb872118d15d0740f702ef42ef6f785e1a5858 | [
"Apache-2.0"
] | null | null | null | calamari_ocr/ocr/backends/ctc_decoder/fuzzy_ctc_decoder.py | Nesbi/calamari | 25eb872118d15d0740f702ef42ef6f785e1a5858 | [
"Apache-2.0"
] | null | null | null | from calamari_ocr.ocr.backends.ctc_decoder.ctc_decoder import CTCDecoder
import numpy as np
if __name__ == "__main__":
d = FuzzyCTCDecoder()
r = d.decode(np.array(np.transpose([[0.8, 0, 0.7, 0.2, 0.1], [0.1, 0.4, 0.2, 0.7, 0.8], [0.1, 0.6, 0.1, 0.1, 0.1]])))
print(r)
| 37.025 | 121 | 0.573261 | from calamari_ocr.ocr.backends.ctc_decoder.ctc_decoder import CTCDecoder
import numpy as np
class FuzzyCTCDecoder(CTCDecoder):
def __init__(self, blank=0, blank_threshold=0.7, alternatives_threshold=0.0001):
super().__init__()
self._blank = blank
self._blank_threshold = blank_threshold
self._alternatives_threshold = alternatives_threshold
def decode(self, probabilities):
blanks = probabilities[:, self._blank] >= self._blank_threshold
sentence = []
# where blank is True 'character changes' are expected
for idx in range(len(blanks)):
if not blanks[idx]:
if len(sentence) == 0:
sentence.append((-1, idx, idx + 1))
else:
_, start, end = sentence[-1]
if end == idx:
del sentence[-1]
sentence.append((-1, start, idx + 1))
else:
sentence.append((-1, idx, idx + 1))
# get the best char in each range
sentence = [(np.argmax(np.max(probabilities[start:end], axis=0)), start, end) for _, start, end in sentence]
return self.find_alternatives(probabilities, sentence, self._alternatives_threshold)
if __name__ == "__main__":
d = FuzzyCTCDecoder()
r = d.decode(np.array(np.transpose([[0.8, 0, 0.7, 0.2, 0.1], [0.1, 0.4, 0.2, 0.7, 0.8], [0.1, 0.6, 0.1, 0.1, 0.1]])))
print(r)
| 1,108 | 13 | 76 |
3f725fec6d54dcc16fa843c1ef69ca10bddc0b4e | 521 | py | Python | itembase/core/forms/item_forms.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | itembase/core/forms/item_forms.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | 9 | 2020-01-17T14:16:08.000Z | 2020-02-18T15:07:40.000Z | itembase/core/forms/item_forms.py | wedwardbeck/ibase | 5647fa5aff6c1bdc99b6c93884ff0d5aef17d85b | [
"MIT"
] | null | null | null | from django.forms import ModelForm
from itembase.core.models import UnitOfMeasure, VendorItem
| 19.296296 | 58 | 0.512476 | from django.forms import ModelForm
from itembase.core.models import UnitOfMeasure, VendorItem
class UOMForm(ModelForm):
class Meta:
model = UnitOfMeasure
fields = [
"name",
"abbreviation",
"description"
]
class VendorItemForm(ModelForm):
class Meta:
model = VendorItem
fields = [
"item_number",
"description",
"vendor",
"uom",
"pack_count",
"status"
]
| 0 | 378 | 46 |
adc9bc1cfe8fa3e2cf3c34be62916759313042a0 | 1,355 | py | Python | test/utils/test_json.py | HansBug/pji | 449d171cea0c03f4c302da886988f36f70e34ee6 | [
"Apache-2.0"
] | null | null | null | test/utils/test_json.py | HansBug/pji | 449d171cea0c03f4c302da886988f36f70e34ee6 | [
"Apache-2.0"
] | null | null | null | test/utils/test_json.py | HansBug/pji | 449d171cea0c03f4c302da886988f36f70e34ee6 | [
"Apache-2.0"
] | null | null | null | import io
import json
import tempfile
import pytest
import yaml
from yaml.parser import ParserError as YamlParserError
from pji.utils import auto_load_json, JsonLoadError
@pytest.mark.unittest
| 33.04878 | 75 | 0.599262 | import io
import json
import tempfile
import pytest
import yaml
from yaml.parser import ParserError as YamlParserError
from pji.utils import auto_load_json, JsonLoadError
@pytest.mark.unittest
class TestUtilsJson:
def test_auto_load_json(self):
with io.BytesIO(json.dumps({'a': 233, 'b': -1}).encode()) as f:
assert auto_load_json(f) == {'a': 233, 'b': -1}
with io.BytesIO(yaml.safe_dump({'a': 233, 'b': -1}).encode()) as f:
assert auto_load_json(f) == {'a': 233, 'b': -1}
def test_auto_load_json_invalid(self):
with pytest.raises(JsonLoadError) as ei:
with io.BytesIO(b'[this]is invalid') as f:
auto_load_json(f)
err = ei.value
assert isinstance(err.exception, YamlParserError)
def test_auto_load_json_with_real_file(self):
with tempfile.NamedTemporaryFile('w') as tmpfile:
json.dump({'a': 233, 'b': -1}, tmpfile)
tmpfile.flush()
with open(tmpfile.name, 'rb') as file:
assert auto_load_json(file) == {'a': 233, 'b': -1}
with tempfile.NamedTemporaryFile('w') as tmpfile:
yaml.safe_dump({'a': 233, 'b': -1}, tmpfile)
tmpfile.flush()
with open(tmpfile.name, 'rb') as file:
assert auto_load_json(file) == {'a': 233, 'b': -1}
| 1,057 | -1 | 102 |
fcdf4ce6300238e5df22a7fa370e993820dbc6aa | 96 | py | Python | jsonauthenticator/__init__.py | pardo-bsso/jupyterhub-jsonauth | 927177997201d9c84193d429335c1c862d3d5925 | [
"BSD-3-Clause"
] | null | null | null | jsonauthenticator/__init__.py | pardo-bsso/jupyterhub-jsonauth | 927177997201d9c84193d429335c1c862d3d5925 | [
"BSD-3-Clause"
] | null | null | null | jsonauthenticator/__init__.py | pardo-bsso/jupyterhub-jsonauth | 927177997201d9c84193d429335c1c862d3d5925 | [
"BSD-3-Clause"
] | 1 | 2020-04-07T13:22:13.000Z | 2020-04-07T13:22:13.000Z | from jsonauthenticator.jsonauthenticator import JsonAuthenticator
__all__ = [JsonAuthenticator]
| 32 | 65 | 0.885417 | from jsonauthenticator.jsonauthenticator import JsonAuthenticator
__all__ = [JsonAuthenticator]
| 0 | 0 | 0 |
315e21a14e703da874d1262f29bb61d3a86a96d1 | 5,347 | py | Python | shortcode.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2017-04-15T01:48:27.000Z | 2017-04-15T01:48:27.000Z | shortcode.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 153 | 2017-04-14T18:06:26.000Z | 2017-06-02T13:08:09.000Z | shortcode.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2021-02-18T11:15:52.000Z | 2021-02-18T11:15:52.000Z | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import app_config
import datetime
import logging
import requests
import shortcodes
from render_utils import make_context
from PIL import Image
from StringIO import StringIO
from functools import partial
from jinja2 import Environment, FileSystemLoader
from pymongo import MongoClient
IMAGE_URL_TEMPLATE = '%s/%s'
IMAGE_TYPES = ['image', 'asset-image']
COLLAGE_TYPES = ['collage2']
SHORTCODE_DICT = {
'image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'collage2': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'asset-image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'idpgraphic': {},
'video': {},
}
env = Environment(loader=FileSystemLoader('templates/shortcodes'))
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
def _process_id(url, tag):
"""
Extract an ID from a url (or just return the URL).
"""
if tag == 'tweet':
parts = url.split('/')
return parts[5]
else:
return url
def _get_extra_context(id, tag):
"""
Do some processing
"""
extra = dict()
if tag in IMAGE_TYPES:
extra.update(_get_image_context(id, tag))
return extra
def _get_collage_extra_context(pargs, tag):
"""
Do some processing
"""
extra = dict()
if tag in COLLAGE_TYPES:
extra.update(_get_collage_context(pargs))
return extra
def _handler(context, content, pargs, kwargs, tag, defaults):
"""
Default handler all other handlers inherit from.
"""
if pargs:
if tag in COLLAGE_TYPES:
template_context = dict()
extra_context = _get_collage_extra_context(pargs, tag)
template_context.update(extra_context)
else:
id = _process_id(pargs[0], tag)
template_context = dict(url=pargs[0],
id=id)
extra_context = _get_extra_context(id, tag)
template_context.update(extra_context)
else:
template_context = dict()
if tag == 'idpgraphic':
template_context.update(make_context())
template_context.update(defaults)
template_context.update(kwargs)
template = env.get_template('%s.html' % tag)
output = template.render(**template_context)
return output
"""
Register handlers
"""
parser = shortcodes.Parser()
for tag, defaults in SHORTCODE_DICT.items():
tag_handler = partial(_handler, tag=tag, defaults=defaults)
parser.register(tag_handler, tag)
def process_shortcode(tag):
"""
Generates html from shortcode
"""
# Replace unicode <br>
# Replace rquote to normal quotation marks
text = tag.get_text()
text = text.replace(u'\xa0', u' ')
text = text.replace(u'\u201D', u'"')
text = text.replace(u'\u201C', u'"')
try:
return parser.parse(text)
except shortcodes.RenderingError as e:
logger.error('Could not render short code in: "%s"' % text)
logger.error('cause: %s' % e.__cause__)
return ''
def _get_image_context(id, tag):
"""
Download image and get/cache aspect ratio.
"""
if (tag == 'asset-image'):
image = Image.open('www/%s' % id)
ratio = float(image.height) / float(image.width)
ratio = round(ratio * 100, 2)
return dict(ratio=ratio)
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
return dict(ratio=ratio, url=url)
def _get_collage_context(pargs):
"""
Download image and get/cache aspect ratio.
"""
ratios = {}
for ix, id in enumerate(pargs):
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
ratios['url%s' % ix] = url
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
ratios['ratio%s' % ix] = ratio
return ratios
| 27.561856 | 73 | 0.601459 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import app_config
import datetime
import logging
import requests
import shortcodes
from render_utils import make_context
from PIL import Image
from StringIO import StringIO
from functools import partial
from jinja2 import Environment, FileSystemLoader
from pymongo import MongoClient
IMAGE_URL_TEMPLATE = '%s/%s'
IMAGE_TYPES = ['image', 'asset-image']
COLLAGE_TYPES = ['collage2']
SHORTCODE_DICT = {
'image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'collage2': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'asset-image': {
'caption': '',
'width': '100%',
'format': 'centered'
},
'idpgraphic': {},
'video': {},
}
env = Environment(loader=FileSystemLoader('templates/shortcodes'))
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
def _process_id(url, tag):
"""
Extract an ID from a url (or just return the URL).
"""
if tag == 'tweet':
parts = url.split('/')
return parts[5]
else:
return url
def _get_extra_context(id, tag):
"""
Do some processing
"""
extra = dict()
if tag in IMAGE_TYPES:
extra.update(_get_image_context(id, tag))
return extra
def _get_collage_extra_context(pargs, tag):
"""
Do some processing
"""
extra = dict()
if tag in COLLAGE_TYPES:
extra.update(_get_collage_context(pargs))
return extra
def _handler(context, content, pargs, kwargs, tag, defaults):
"""
Default handler all other handlers inherit from.
"""
if pargs:
if tag in COLLAGE_TYPES:
template_context = dict()
extra_context = _get_collage_extra_context(pargs, tag)
template_context.update(extra_context)
else:
id = _process_id(pargs[0], tag)
template_context = dict(url=pargs[0],
id=id)
extra_context = _get_extra_context(id, tag)
template_context.update(extra_context)
else:
template_context = dict()
if tag == 'idpgraphic':
template_context.update(make_context())
template_context.update(defaults)
template_context.update(kwargs)
template = env.get_template('%s.html' % tag)
output = template.render(**template_context)
return output
"""
Register handlers
"""
parser = shortcodes.Parser()
for tag, defaults in SHORTCODE_DICT.items():
tag_handler = partial(_handler, tag=tag, defaults=defaults)
parser.register(tag_handler, tag)
def process_shortcode(tag):
"""
Generates html from shortcode
"""
# Replace unicode <br>
# Replace rquote to normal quotation marks
text = tag.get_text()
text = text.replace(u'\xa0', u' ')
text = text.replace(u'\u201D', u'"')
text = text.replace(u'\u201C', u'"')
try:
return parser.parse(text)
except shortcodes.RenderingError as e:
logger.error('Could not render short code in: "%s"' % text)
logger.error('cause: %s' % e.__cause__)
return ''
def _get_image_context(id, tag):
"""
Download image and get/cache aspect ratio.
"""
if (tag == 'asset-image'):
image = Image.open('www/%s' % id)
ratio = float(image.height) / float(image.width)
ratio = round(ratio * 100, 2)
return dict(ratio=ratio)
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
return dict(ratio=ratio, url=url)
def _get_collage_context(pargs):
"""
Download image and get/cache aspect ratio.
"""
ratios = {}
for ix, id in enumerate(pargs):
url = IMAGE_URL_TEMPLATE % (app_config.IMAGE_URL, id)
ratios['url%s' % ix] = url
client = MongoClient(app_config.MONGODB_URL)
database = client['idp-georgia']
collection = database.images
result = collection.find_one({'_id': id})
if not result:
logger.info('image %s: uncached, downloading %s' % (id, url))
response = requests.get(url)
image = Image.open(StringIO(response.content))
ratio = float(image.height) / float(image.width)
collection.insert({
'_id': id,
'date': datetime.datetime.utcnow(),
'ratio': ratio,
})
else:
logger.info('image %s: retrieved from cache' % id)
ratio = result['ratio']
ratio = round(ratio * 100, 2)
ratios['ratio%s' % ix] = ratio
return ratios
| 0 | 0 | 0 |
a8823f6bbf443951e2e46c1a81b03e45befd69e6 | 954 | py | Python | registry/smart_contract/migrations/0018_auto_20180727_1202.py | RustamSultanov/Python-test-registry- | 1d779a8135567a0b3aeca0151b2d7f0905014e88 | [
"MIT"
] | 1 | 2019-01-16T14:52:37.000Z | 2019-01-16T14:52:37.000Z | registry/smart_contract/migrations/0018_auto_20180727_1202.py | RustamSultanov/Python-test-registry- | 1d779a8135567a0b3aeca0151b2d7f0905014e88 | [
"MIT"
] | 8 | 2019-10-21T16:18:33.000Z | 2021-06-08T20:33:14.000Z | registry/smart_contract/migrations/0018_auto_20180727_1202.py | RustamSultanov/Python-test-registry- | 1d779a8135567a0b3aeca0151b2d7f0905014e88 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-27 12:02
from django.db import migrations, models
import django.db.models.deletion
| 31.8 | 139 | 0.642558 | # Generated by Django 2.0.7 on 2018-07-27 12:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('smart_contract', '0017_comment_another_employee'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='another_employee',
field=models.ManyToManyField(related_name='another_employee', to='smart_contract.UserAccept'),
),
migrations.AlterField(
model_name='comment',
name='employee',
field=models.ManyToManyField(related_name='employee_list', to='smart_contract.UserAccept'),
),
migrations.AlterField(
model_name='comment',
name='init_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='init_user', to='smart_contract.UserAccept'),
),
]
| 0 | 807 | 23 |
f7581c8b846fd9de9dcdd1e8c19cb593ed1e4f94 | 1,509 | py | Python | test/test_api.py | frodo19/script.module.inputstreamhelper | 123e3f4050a888adb864d5e46ca7bdaa63da1a5f | [
"MIT"
] | null | null | null | test/test_api.py | frodo19/script.module.inputstreamhelper | 123e3f4050a888adb864d5e46ca7bdaa63da1a5f | [
"MIT"
] | null | null | null | test/test_api.py | frodo19/script.module.inputstreamhelper | 123e3f4050a888adb864d5e46ca7bdaa63da1a5f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# pylint: disable=invalid-name,missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import default
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcvfs = __import__('xbmcvfs')
if __name__ == '__main__':
unittest.main()
| 30.795918 | 103 | 0.670643 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# pylint: disable=invalid-name,missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import default
xbmc = __import__('xbmc')
xbmcaddon = __import__('xbmcaddon')
xbmcgui = __import__('xbmcgui')
xbmcvfs = __import__('xbmcvfs')
class TestApi(unittest.TestCase):
@staticmethod
def test_settings():
default.run(['default.py'])
@staticmethod
def test_widevine_install():
default.run(['default.py', 'widevine_install'])
@staticmethod
def test_widevine_remove():
default.run(['default.py', 'widevine_remove'])
@staticmethod
def test_about():
default.run(['default.py', 'info'])
@staticmethod
def test_check_inputstream():
if os.path.exists('test/cdm/widevine_config.json'):
os.remove('test/cdm/widevine_config.json')
default.run(['default.py', 'check_inputstream', 'mpd', 'com.widevine.alpha'])
default.run(['default.py', 'check_inputstream', 'hls', 'widevine'])
default.run(['default.py', 'check_inputstream', 'hls'])
default.run(['default.py', 'check_inputstream', 'rtmp'])
default.run(['default.py', 'check_inputstream', 'mpd', 'widevine', 'butter', 'cheese', 'eggs'])
if __name__ == '__main__':
unittest.main()
| 717 | 237 | 23 |
f3208fee03eb157ed12accf40298fcab8dcac57c | 2,653 | py | Python | wikiconv/conversation_reconstruction/construct_utils/utils/third_party/rev_clean.py | CyberFlameGO/wikidetox | 60ee914c8bb81bada0847a3676e0bf24a6e35221 | [
"Apache-2.0"
] | 66 | 2017-09-10T12:47:37.000Z | 2022-03-18T01:33:10.000Z | wikiconv/conversation_reconstruction/construct_utils/utils/third_party/rev_clean.py | CyberFlameGO/wikidetox | 60ee914c8bb81bada0847a3676e0bf24a6e35221 | [
"Apache-2.0"
] | 82 | 2017-09-12T13:01:59.000Z | 2021-11-10T19:40:01.000Z | wikiconv/conversation_reconstruction/construct_utils/utils/third_party/rev_clean.py | CyberFlameGO/wikidetox | 60ee914c8bb81bada0847a3676e0bf24a6e35221 | [
"Apache-2.0"
] | 20 | 2017-11-02T21:23:35.000Z | 2022-03-09T01:30:58.000Z | # -*- coding: utf-8 -*-
"""Revision cleaning utilties.
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import bs4
months = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'SJep',
'Oct',
'Nov',
'Dec',
]
month_or = '|'.join(months)
date_p = re.compile(r'\d\d:\d\d,( \d?\d)? (%s)( \d?\d)?,? \d\d\d\d (\(UTC\))?' %
month_or)
pre_sub_patterns = [(r'\[\[Image:.*?\]\]', ''), (r'\[\[File:.*?\]\]', ''),
(r'\[\[User:.*?\]\]', ''), (r'\[\[user:.*?\]\]', ''),
(r'\(?\[\[User talk:.*?\]\]\)?', ''),
(r'\(?\[\[user talk:.*?\]\]\)?', ''),
(r'\(?\[\[User Talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_talk:.*?\]\]\)?', ''),
(r'\(?\[\[user_talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_Talk:.*?\]\]\)?', ''),
(r'\(?\[\[Special:Contributions.*?\]\]\)?', '')]
post_sub_patterns = [('--', ''), (' :', ' '),
('—Preceding .* comment added by •', '')]
def clean_html(rev):
"""Clean revision HTML."""
# Remove timestmp.
ret = re.sub(date_p, lambda x: '', rev)
# Strip HTML format.
try:
ret = bs4.BeautifulSoup(ret, 'html.parser').get_text()
except: # pylint: disable=bare-except
pass
# Change format for better diff
ret = re.sub('[\n]+', '\n', str(ret))
ret = '\n'.join(
[x.strip() for x in ret.splitlines() if x.strip()]) + '\n'
if ret == '\n':
return ''
return ret
| 26.267327 | 80 | 0.505089 | # -*- coding: utf-8 -*-
"""Revision cleaning utilties.
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import bs4
months = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'SJep',
'Oct',
'Nov',
'Dec',
]
month_or = '|'.join(months)
date_p = re.compile(r'\d\d:\d\d,( \d?\d)? (%s)( \d?\d)?,? \d\d\d\d (\(UTC\))?' %
month_or)
pre_sub_patterns = [(r'\[\[Image:.*?\]\]', ''), (r'\[\[File:.*?\]\]', ''),
(r'\[\[User:.*?\]\]', ''), (r'\[\[user:.*?\]\]', ''),
(r'\(?\[\[User talk:.*?\]\]\)?', ''),
(r'\(?\[\[user talk:.*?\]\]\)?', ''),
(r'\(?\[\[User Talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_talk:.*?\]\]\)?', ''),
(r'\(?\[\[user_talk:.*?\]\]\)?', ''),
(r'\(?\[\[User_Talk:.*?\]\]\)?', ''),
(r'\(?\[\[Special:Contributions.*?\]\]\)?', '')]
post_sub_patterns = [('--', ''), (' :', ' '),
('—Preceding .* comment added by •', '')]
def clean_html(rev):
"""Clean revision HTML."""
# Remove timestmp.
ret = re.sub(date_p, lambda x: '', rev)
# Strip HTML format.
try:
ret = bs4.BeautifulSoup(ret, 'html.parser').get_text()
except: # pylint: disable=bare-except
pass
# Change format for better diff
ret = re.sub('[\n]+', '\n', str(ret))
ret = '\n'.join(
[x.strip() for x in ret.splitlines() if x.strip()]) + '\n'
if ret == '\n':
return ''
return ret
def clean(rev):
ret = str(rev)
for p, r in pre_sub_patterns:
ret = re.sub(p, r, ret)
# Strip media wiki format.
for p, r in post_sub_patterns:
ret = re.sub(p, r, ret)
return ret
| 174 | 0 | 23 |
d3016514cc782dbcf4527177ae8e6a3b38b60e3e | 4,513 | py | Python | TimetableSplitter.py | TestValleySchool/TimetableSplitter | 8fae94858eb31ee4ce0f189a8c961362c0295cff | [
"Apache-2.0"
] | null | null | null | TimetableSplitter.py | TestValleySchool/TimetableSplitter | 8fae94858eb31ee4ce0f189a8c961362c0295cff | [
"Apache-2.0"
] | null | null | null | TimetableSplitter.py | TestValleySchool/TimetableSplitter | 8fae94858eb31ee4ce0f189a8c961362c0295cff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# MIS Timetable Splitter
#
# Split a combined HTML exported Student Timetable from a common Management
# Information System (MIS) product, that shall remain nameless, into individual
# per-student files.
#
#
# Copyright 2019 Test Valley School.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html.parser import HTMLParser
import argparse
import re
import os
# each timetable begins with a <td class="TitleBold">, with "Timetable" written in it, which is convenient enough
# argument parsing
argparser = argparse.ArgumentParser(description='Split a combined HTML exported Student Timetable from a common Management Information System (MIS) product, that shall remain nameless, into individual per-student files.')
argparser.add_argument('-i', '--input', dest='inputfile', help='The input HTML file.', required=True, type=argparse.FileType('r'))
argparser.add_argument('-o', '--output',dest='outputpath', help='The directory for the output files', required=True)
argparser.add_argument('--force', dest='force', help='Allow this script to overwrite files in the output folder.', action='store_true')
# main execution
args = argparser.parse_args()
tt_parser = TimetableParser()
# check output path
if not os.path.exists(args.outputpath):
raise ValueError("The output path specified does not exist.")
if not os.path.isdir(args.outputpath):
raise ValueError("The output path specified is not a directory.")
if not args.force and len(os.listdir(args.outputpath)) > 0:
raise ValueError("The output path is not empty. To allow overwriting of files with the same name, re-run with --force.")
# have the parser identify points at which we will split the HTML file
tt_parser.feed(args.inputfile.read())
# with identified split points, split file into individual items??
args.inputfile.seek(0)
lines = args.inputfile.readlines()
for i in range(0, len(tt_parser.splitpoints)):
currentsplit = tt_parser.splitpoints[i]
currentline = lines[currentsplit[0]-1]
try:
nextsplit = tt_parser.splitpoints[i+1]
except IndexError:
# at the end of the loop, simply split from the current split point to the end of the line
nextsplit = (currentsplit[0]-1, len(currentline))
individual_tt_filename = os.path.join(args.outputpath, tt_parser.titles[i] + '.html')
with open(individual_tt_filename, 'w') as outputfile:
print("Writing " + individual_tt_filename)
# write header
outputfile.write('<html><head><title>' + tt_parser.titles[i] + '</title>')
# write the style tags -- disabled at the moment because WP strips inline style in post body
#outputfile.write('<style type="text/css">')
#outputfile.write(tt_parser.style_data)
#outputfile.write('</style>')
outputfile.write('</head><body>')
# this is hacky to a significant degree, but we split the original file part way through a tag, so we'll re-create
# the table and title class
outputfile.write('<table><tr><td class="TitleBold">')
outputfile.write(currentline[currentsplit[1]:nextsplit[1]])
| 40.294643 | 222 | 0.680035 | #!/usr/bin/env python3
#
# MIS Timetable Splitter
#
# Split a combined HTML exported Student Timetable from a common Management
# Information System (MIS) product, that shall remain nameless, into individual
# per-student files.
#
#
# Copyright 2019 Test Valley School.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html.parser import HTMLParser
import argparse
import re
import os
class TimetableParser(HTMLParser):
# each timetable begins with a <td class="TitleBold">, with "Timetable" written in it, which is convenient enough
def __init__(self):
super().__init__()
self.tt_regex = re.compile(r'Timetable\s*')
self.splitpoints = []
self.titles = []
self.in_style = False
def handle_starttag(self, tag, attrs):
if tag == 'style':
self.in_style = True
def handle_endtag(self, tag):
self.in_style = False
def handle_data(self, data):
if self.in_style:
self.style_data = data
elif self.tt_regex.match(data):
print("Matched a new item '" + data + "' starting at line " + str(self.getpos()[0]) + ", col " + str(self.getpos()[1]))
self.splitpoints.append(self.getpos())
self.titles.append(data)
# argument parsing
argparser = argparse.ArgumentParser(description='Split a combined HTML exported Student Timetable from a common Management Information System (MIS) product, that shall remain nameless, into individual per-student files.')
argparser.add_argument('-i', '--input', dest='inputfile', help='The input HTML file.', required=True, type=argparse.FileType('r'))
argparser.add_argument('-o', '--output',dest='outputpath', help='The directory for the output files', required=True)
argparser.add_argument('--force', dest='force', help='Allow this script to overwrite files in the output folder.', action='store_true')
# main execution
args = argparser.parse_args()
tt_parser = TimetableParser()
# check output path
if not os.path.exists(args.outputpath):
raise ValueError("The output path specified does not exist.")
if not os.path.isdir(args.outputpath):
raise ValueError("The output path specified is not a directory.")
if not args.force and len(os.listdir(args.outputpath)) > 0:
raise ValueError("The output path is not empty. To allow overwriting of files with the same name, re-run with --force.")
# have the parser identify points at which we will split the HTML file
tt_parser.feed(args.inputfile.read())
# with identified split points, split file into individual items??
args.inputfile.seek(0)
lines = args.inputfile.readlines()
for i in range(0, len(tt_parser.splitpoints)):
currentsplit = tt_parser.splitpoints[i]
currentline = lines[currentsplit[0]-1]
try:
nextsplit = tt_parser.splitpoints[i+1]
except IndexError:
# at the end of the loop, simply split from the current split point to the end of the line
nextsplit = (currentsplit[0]-1, len(currentline))
individual_tt_filename = os.path.join(args.outputpath, tt_parser.titles[i] + '.html')
with open(individual_tt_filename, 'w') as outputfile:
print("Writing " + individual_tt_filename)
# write header
outputfile.write('<html><head><title>' + tt_parser.titles[i] + '</title>')
# write the style tags -- disabled at the moment because WP strips inline style in post body
#outputfile.write('<style type="text/css">')
#outputfile.write(tt_parser.style_data)
#outputfile.write('</style>')
outputfile.write('</head><body>')
# this is hacky to a significant degree, but we split the original file part way through a tag, so we'll re-create
# the table and title class
outputfile.write('<table><tr><td class="TitleBold">')
outputfile.write(currentline[currentsplit[1]:nextsplit[1]])
| 619 | 13 | 145 |
c894511092289e7f5fc3972a4c34ec92cd0a7022 | 199 | py | Python | src/covid19_icta/tests/test_scrape.py | nuuuwan/covid19_icta | ca03aa49809a5ab6ede3e0c424b06b5012e01f17 | [
"MIT"
] | null | null | null | src/covid19_icta/tests/test_scrape.py | nuuuwan/covid19_icta | ca03aa49809a5ab6ede3e0c424b06b5012e01f17 | [
"MIT"
] | null | null | null | src/covid19_icta/tests/test_scrape.py | nuuuwan/covid19_icta | ca03aa49809a5ab6ede3e0c424b06b5012e01f17 | [
"MIT"
] | null | null | null | import unittest
from covid19_icta import scrape
if __name__ == '__main__':
unittest.main()
| 15.307692 | 38 | 0.708543 | import unittest
from covid19_icta import scrape
class TestCase(unittest.TestCase):
def test_dump(self):
self.assertTrue(scrape._run())
if __name__ == '__main__':
unittest.main()
| 38 | 13 | 49 |
2156619410a6290cedec6f46d37644938b196d56 | 78 | py | Python | gql_schema_codegen/union/__init__.py | sauldom102/gql_schema_codegen | f3bb813874760a8495e67c770d4622674fef0632 | [
"MIT"
] | 2 | 2022-03-26T20:33:17.000Z | 2022-03-26T23:15:17.000Z | gql_schema_codegen/union/__init__.py | sauldom102/gql_schema_codegen | f3bb813874760a8495e67c770d4622674fef0632 | [
"MIT"
] | null | null | null | gql_schema_codegen/union/__init__.py | sauldom102/gql_schema_codegen | f3bb813874760a8495e67c770d4622674fef0632 | [
"MIT"
] | null | null | null | from .union import UnionType, UnionInfo
__all__ = ['UnionType', 'UnionInfo']
| 19.5 | 39 | 0.74359 | from .union import UnionType, UnionInfo
__all__ = ['UnionType', 'UnionInfo']
| 0 | 0 | 0 |
315349a2602d5d6cfc0c8557fc874c4ddcfca2e3 | 8,177 | py | Python | ECR/list_used_images_having_critical_or_high_severity_findings.py | terratenney/aws-tools | d8ca07d56d812deb819b039752b94a0f1b9e6eb2 | [
"MIT"
] | 8 | 2020-12-27T18:44:17.000Z | 2022-03-10T22:20:28.000Z | ECR/list_used_images_having_critical_or_high_severity_findings.py | terratenney/aws-tools | d8ca07d56d812deb819b039752b94a0f1b9e6eb2 | [
"MIT"
] | 28 | 2020-08-30T02:57:03.000Z | 2021-05-12T09:13:15.000Z | ECR/list_used_images_having_critical_or_high_severity_findings.py | kyhau/arki | b5d6b160ef0780032f231362158dd9dd892f4e8e | [
"MIT"
] | 8 | 2020-09-03T19:00:13.000Z | 2022-03-31T05:31:35.000Z | """
List items of
```
{
"AccountId": string,
"ClusterArn": string,
"HighSeverityCount": sum-of-critical-and-high-severity,
"Image": string,
"LastStatus": string,
"LaunchType": string,
"Region": string,
"TaskArn": string
}
```
"""
from boto3.session import Session
from botocore.exceptions import ClientError
import boto3
import click
from collections import defaultdict
import logging
import re
from helper.aws import AwsApiHelper
from helper.ser import dump_json
logging.getLogger().setLevel(logging.INFO)
@click.command()
@click.option("--x-role-name", "-x", help="Name of a cross account role for accessing cross account images")
@click.option("--profile", "-p", help="AWS profile name. Use profiles in ~/.aws if not specified.")
@click.option("--region", "-r", default="ap-southeast-2", show_default=True, help="AWS Region. Use 'all' for all regions.")
if __name__ == "__main__":
main()
| 41.507614 | 123 | 0.594961 | """
List items of
```
{
"AccountId": string,
"ClusterArn": string,
"HighSeverityCount": sum-of-critical-and-high-severity,
"Image": string,
"LastStatus": string,
"LaunchType": string,
"Region": string,
"TaskArn": string
}
```
"""
from boto3.session import Session
from botocore.exceptions import ClientError
import boto3
import click
from collections import defaultdict
import logging
import re
from helper.aws import AwsApiHelper
from helper.ser import dump_json
logging.getLogger().setLevel(logging.INFO)
def get_session_for_account(account_id, role_name):
try:
role_arn = f"arn:aws:iam::{account_id}:role/{role_name}"
ret = boto3.client("sts").assume_role(RoleArn=role_arn, RoleSessionName="image_scan_checker")
cred = {
"aws_access_key_id": ret["Credentials"]["AccessKeyId"],
"aws_secret_access_key": ret["Credentials"]["SecretAccessKey"],
"aws_session_token": ret["Credentials"]["SessionToken"]
}
return Session(**cred)
except Exception as e:
logging.debug(f"Failed to assume role {role_name} in {account_id}: {e}")
return
class EcrHelper():
def __init__(self, cross_account_role_name):
self._cross_account_role_name = cross_account_role_name
self._global_ecr_checked_images = {}
def check_image_scan_findings(self, images, region, src_account_id, src_session):
for ecr_account_id, image_info_list in self.group_images_by_account(images).items():
if ecr_account_id == src_account_id:
session_2 = src_session
else:
session_2 = get_session_for_account(ecr_account_id, region)
if session_2 is None:
logging.info(f"Do not have access to {ecr_account_id}. Skipped")
for image_info in image_info_list:
self._global_ecr_checked_images[image_info["image"]] = -3
continue
ecr_client = session_2.client("ecr", region_name=region)
for image_info in image_info_list:
self.describe_image_scan_findings(ecr_client, ecr_account_id, image_info)
def describe_image_scan_findings(self, ecr_client, ecr_account_id, image_info):
high_cnt = 0
params = {
"registryId": ecr_account_id,
"repositoryName": image_info["repository"],
"imageId": image_info["image_id"],
"maxResults": 1000,
}
try:
for item in ecr_client.get_paginator("describe_image_scan_findings").paginate(**params):
if item["imageScanStatus"]["status"] == "COMPLETE":
severity_counts = item["imageScanFindings"]["findingSeverityCounts"]
high_cnt = high_cnt + severity_counts.get("CRITICAL", 0) + severity_counts.get("HIGH", 0)
self._global_ecr_checked_images[image_info["image"]] = high_cnt
except ClientError as e:
if e.response["Error"]["Code"] in ["RepositoryNotFoundException"]:
self._global_ecr_checked_images[image_info["image"]] = -2
logging.error(f"Unable to process {params}: {e}")
def group_images_by_account(self, images):
acc_images_dict = defaultdict(list)
for image in set(images):
if image in self._global_ecr_checked_images:
# skip if we have already checked it
continue
# Possible formats: repository-url/image, repository-url/image:tag, repository-url/image@digest
matched = re.search("(\d+)\.dkr\.ecr\..+?\.amazonaws\.com\/([^:@]+)([:|@]*)(.*)$", image)
if matched is None:
continue
ecr_account_id, repository, separator = matched.groups()[0], matched.groups()[1], matched.groups()[2]
if ecr_account_id is None:
continue
image_tag = image_digest = None
if separator == ":": # tag
image_tag = matched.groups()[3]
elif separator == "@": # digest
image_digest = matched.groups()[3]
else:
image_tag = "latest"
image_id = {}
if image_tag not in (None, ""):
image_id["imageTag"] = image_tag
elif image_digest not in (None, ""):
image_id["imageDigest"] = image_digest
acc_images_dict[ecr_account_id].append({"image": image, "repository": repository, "image_id": image_id})
return acc_images_dict
class EcsHelper(AwsApiHelper):
def __init__(self, ecr_helper):
super().__init__()
self._ecr_helper = ecr_helper
def lookup_image_details(self, results, region, account_id, session):
self._ecr_helper.check_image_scan_findings(
[item["Image"] for item in results if item["Image"] is not None], region, account_id, session)
for item in results:
high_severity_cnt = self._ecr_helper._global_ecr_checked_images.get(item["Image"], -1)
if high_severity_cnt == -1:
item["ImageScanError"] = "NullImage"
elif high_severity_cnt == -2:
item["ImageScanError"] = "RepositoryNotFound" # e.g Tag is removed
elif high_severity_cnt == -3:
item["ImageScanError"] = "AccessDenied"
item["HighSeverityCount"] = max(high_severity_cnt, 0)
return results if results is not None else []
def process_request(self, session, account_id, region, kwargs):
client = session.client("ecs", region_name=region)
results = []
cluster_arns = [item for item in self.paginate(client, "list_clusters")]
for cluster_arn in cluster_arns:
task_arns = [item for item in self.paginate(client, "list_tasks", {"cluster": cluster_arn})]
while len(task_arns) > 0:
# Note that `tasks` supports a list of up to 100 task IDs or full ARN entries.
check_tasks = task_arns[0:min(100, len(task_arns))]
for task in client.describe_tasks(cluster=cluster_arn, tasks=check_tasks)["tasks"]:
# Retrieve Image in running containers, or container definitions (if it's
# Fargate or the running containers (task["containers"]) has "Image"==None)
containers = task["containers"]
none_image_found = any(c.get("image") is None for c in containers)
if task["launchType"] == "FARGATE" or none_image_found:
resp = client.describe_task_definition(
taskDefinition=task["taskDefinitionArn"])
containers = resp["taskDefinition"]["containerDefinitions"]
for container in containers:
data = {
"Image": container.get("image"), # can have None image
"ClusterArn": task["clusterArn"],
"LaunchType": task["launchType"],
"TaskArn": task["taskArn"],
"LastStatus": task["lastStatus"],
"AccountId": account_id,
"Region": region,
}
results.append(data)
task_arns = task_arns[100:]
for result in self.lookup_image_details(results, region, account_id, session):
print(dump_json(result))
@click.command()
@click.option("--x-role-name", "-x", help="Name of a cross account role for accessing cross account images")
@click.option("--profile", "-p", help="AWS profile name. Use profiles in ~/.aws if not specified.")
@click.option("--region", "-r", default="ap-southeast-2", show_default=True, help="AWS Region. Use 'all' for all regions.")
def main(x_role_name, profile, region):
ecr_helper = EcrHelper(x_role_name)
EcsHelper(ecr_helper).start(profile, region, "ecs")
if __name__ == "__main__":
main()
| 6,965 | 6 | 286 |
a1e55371436592a3daa9c792546cfcb188fe65f8 | 6,521 | py | Python | models/segtran_modified/code/receptivefield/plotting.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | models/segtran_modified/code/receptivefield/plotting.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | models/segtran_modified/code/receptivefield/plotting.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | import itertools
from typing import Any, Optional
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from receptivefield.common import estimate_rf_from_gradient
from receptivefield.image import get_default_image
from receptivefield.types import (
ImageShape,
GridPoint,
GridShape,
ReceptiveFieldDescription,
ReceptiveFieldRect,
to_rf_rect,
)
def _plot_rect(
ax,
rect: ReceptiveFieldRect,
color: Any,
alpha: float = 0.9,
linewidth: float = 5,
size: float = 90,
) -> None:
"""
Plot rectangle and center point.
:param ax: matplotlib axis
:param rect: definition of rectangle
:param color:
:param alpha:
:param linewidth:
:param size: point size
"""
ax.add_patch(
patches.Rectangle(
(rect.y - rect.h / 2, rect.x - rect.w / 2),
rect.h,
rect.w,
alpha=alpha,
fill=False,
facecolor="white",
edgecolor=color,
linewidth=linewidth,
)
)
plt.scatter([rect.y], [rect.x], s=size, c=color)
def plot_gradient_field(
receptive_field_grad: np.ndarray,
image: np.ndarray = None,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Plot gradient map from gradient tensor.
:param receptive_field_grad: numpy tensor of shape [N, W, H, C]
:param image: optional image of shape [W, H, 3]
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
receptive_field = estimate_rf_from_gradient(receptive_field_grad)
receptive_field_grad = np.array(receptive_field_grad).mean(0).mean(-1)
receptive_field_grad /= receptive_field_grad.max()
receptive_field_grad += (np.abs(receptive_field_grad) > 0) * 0.2
if image is not None:
receptive_field_grad = np.expand_dims(receptive_field_grad, -1)
receptive_field_grad = 255 / 2 * (receptive_field_grad + 1) + image * 0.5
receptive_field_grad = receptive_field_grad.astype("uint8")
if axis is None:
figsize = plot_params.get("figsize", (5, 5))
plt.figure(figsize=figsize)
axis = plt.subplot(111)
plt.title("Normalized gradient map")
im = plt.imshow(receptive_field_grad, cmap="coolwarm")
plt.xlabel("x")
plt.ylabel("y")
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
axis.add_patch(
patches.Rectangle(
(
receptive_field.y - receptive_field.h / 2,
receptive_field.x - receptive_field.w / 2,
), # (x,y)
receptive_field.h,
receptive_field.w,
fill=False,
alpha=0.9,
linewidth=4,
edgecolor=(0.2, 0.2, 0.2),
)
)
axis.set_aspect("equal")
plt.tight_layout()
def plot_receptive_grid(
input_shape: GridShape,
output_shape: GridShape,
rf_params: ReceptiveFieldDescription,
custom_image: Optional[np.ndarray] = None,
plot_naive_rf: bool = False,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Visualize receptive field grid.
:param input_shape: an input image shape as an instance of GridShape
:param output_shape: an output feature map shape
:param rf_params: an instance of ReceptiveFieldDescription computed for
this feature map.
:param custom_image: optional image [height, width, 3] to be plotted as
a background.
:param plot_naive_rf: plot naive version of the receptive field. Naive
version of RF does not take strides, and offsets into considerations,
it is a simple linear mapping from N points in feature map to pixels
in the image.
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
if custom_image is None:
img = get_default_image(shape=ImageShape(input_shape.h, input_shape.w))
else:
img = custom_image
figsize = plot_params.get("figsize", (10, 10))
# plot image
if axis is None:
plt.figure(figsize=figsize)
axis = plt.subplot(111)
axis.imshow(img)
# plot naive receptive field grid
if plot_naive_rf:
dw = input_shape.w / output_shape.w
dh = input_shape.h / output_shape.h
for i, j in itertools.product(range(output_shape.w), range(output_shape.h)):
x0, x1 = i * dw, (i + 1) * dw
y0, y1 = j * dh, (j + 1) * dh
axis.add_patch(
patches.Rectangle(
(y0, x0),
dh,
dw,
alpha=0.9,
fill=False,
edgecolor="gray",
linewidth=1,
)
)
rf_offset = rf_params.offset
rf_size = rf_params.size
rf_stride = rf_params.stride
# map from output grid space to input image
# plot RF grid based on rf params
points = [
map_point(i, j)
for i, j in itertools.product(range(output_shape.w), range(output_shape.h))
]
points = np.array(points)
axis.scatter(points[:, 1], points[:, 0], marker="o", c=(0.2, 0.9, 0.1, 0.9), s=10)
# plot receptive field from corner point
_plot_rect(
axis,
rect=to_rf_rect(rf_offset, rf_size),
color=(0.9, 0.3, 0.2),
linewidth=5,
size=90,
)
center_point = map_point(output_shape.w // 2, output_shape.h // 2)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(center_point[0], center_point[1]), rf_size),
color=(0.1, 0.3, 0.9),
linewidth=5,
size=90,
)
last_point = map_point(output_shape.w - 1, output_shape.h - 1)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(last_point[0], last_point[1]), rf_size),
color=(0.1, 0.9, 0.3),
linewidth=5,
size=90,
)
axis.set_aspect("equal")
| 30.330233 | 86 | 0.613863 | import itertools
from typing import Any, Optional
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from receptivefield.common import estimate_rf_from_gradient
from receptivefield.image import get_default_image
from receptivefield.types import (
ImageShape,
GridPoint,
GridShape,
ReceptiveFieldDescription,
ReceptiveFieldRect,
to_rf_rect,
)
def _plot_rect(
ax,
rect: ReceptiveFieldRect,
color: Any,
alpha: float = 0.9,
linewidth: float = 5,
size: float = 90,
) -> None:
"""
Plot rectangle and center point.
:param ax: matplotlib axis
:param rect: definition of rectangle
:param color:
:param alpha:
:param linewidth:
:param size: point size
"""
ax.add_patch(
patches.Rectangle(
(rect.y - rect.h / 2, rect.x - rect.w / 2),
rect.h,
rect.w,
alpha=alpha,
fill=False,
facecolor="white",
edgecolor=color,
linewidth=linewidth,
)
)
plt.scatter([rect.y], [rect.x], s=size, c=color)
def plot_gradient_field(
receptive_field_grad: np.ndarray,
image: np.ndarray = None,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Plot gradient map from gradient tensor.
:param receptive_field_grad: numpy tensor of shape [N, W, H, C]
:param image: optional image of shape [W, H, 3]
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
receptive_field = estimate_rf_from_gradient(receptive_field_grad)
receptive_field_grad = np.array(receptive_field_grad).mean(0).mean(-1)
receptive_field_grad /= receptive_field_grad.max()
receptive_field_grad += (np.abs(receptive_field_grad) > 0) * 0.2
if image is not None:
receptive_field_grad = np.expand_dims(receptive_field_grad, -1)
receptive_field_grad = 255 / 2 * (receptive_field_grad + 1) + image * 0.5
receptive_field_grad = receptive_field_grad.astype("uint8")
if axis is None:
figsize = plot_params.get("figsize", (5, 5))
plt.figure(figsize=figsize)
axis = plt.subplot(111)
plt.title("Normalized gradient map")
im = plt.imshow(receptive_field_grad, cmap="coolwarm")
plt.xlabel("x")
plt.ylabel("y")
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
axis.add_patch(
patches.Rectangle(
(
receptive_field.y - receptive_field.h / 2,
receptive_field.x - receptive_field.w / 2,
), # (x,y)
receptive_field.h,
receptive_field.w,
fill=False,
alpha=0.9,
linewidth=4,
edgecolor=(0.2, 0.2, 0.2),
)
)
axis.set_aspect("equal")
plt.tight_layout()
def plot_receptive_grid(
input_shape: GridShape,
output_shape: GridShape,
rf_params: ReceptiveFieldDescription,
custom_image: Optional[np.ndarray] = None,
plot_naive_rf: bool = False,
axis: Optional[Any] = None,
**plot_params
) -> None:
"""
Visualize receptive field grid.
:param input_shape: an input image shape as an instance of GridShape
:param output_shape: an output feature map shape
:param rf_params: an instance of ReceptiveFieldDescription computed for
this feature map.
:param custom_image: optional image [height, width, 3] to be plotted as
a background.
:param plot_naive_rf: plot naive version of the receptive field. Naive
version of RF does not take strides, and offsets into considerations,
it is a simple linear mapping from N points in feature map to pixels
in the image.
:param axis: a matplotlib axis object as returned by the e.g. plt.subplot
function. If not None then axis is used for visualizations otherwise
default figure is created.
:param plot_params: additional plot params: figsize=(5, 5)
"""
if custom_image is None:
img = get_default_image(shape=ImageShape(input_shape.h, input_shape.w))
else:
img = custom_image
figsize = plot_params.get("figsize", (10, 10))
# plot image
if axis is None:
plt.figure(figsize=figsize)
axis = plt.subplot(111)
axis.imshow(img)
# plot naive receptive field grid
if plot_naive_rf:
dw = input_shape.w / output_shape.w
dh = input_shape.h / output_shape.h
for i, j in itertools.product(range(output_shape.w), range(output_shape.h)):
x0, x1 = i * dw, (i + 1) * dw
y0, y1 = j * dh, (j + 1) * dh
axis.add_patch(
patches.Rectangle(
(y0, x0),
dh,
dw,
alpha=0.9,
fill=False,
edgecolor="gray",
linewidth=1,
)
)
rf_offset = rf_params.offset
rf_size = rf_params.size
rf_stride = rf_params.stride
# map from output grid space to input image
def map_point(i: int, j: int):
return np.array(rf_offset) + np.array([i, j]) * np.array(rf_stride)
# plot RF grid based on rf params
points = [
map_point(i, j)
for i, j in itertools.product(range(output_shape.w), range(output_shape.h))
]
points = np.array(points)
axis.scatter(points[:, 1], points[:, 0], marker="o", c=(0.2, 0.9, 0.1, 0.9), s=10)
# plot receptive field from corner point
_plot_rect(
axis,
rect=to_rf_rect(rf_offset, rf_size),
color=(0.9, 0.3, 0.2),
linewidth=5,
size=90,
)
center_point = map_point(output_shape.w // 2, output_shape.h // 2)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(center_point[0], center_point[1]), rf_size),
color=(0.1, 0.3, 0.9),
linewidth=5,
size=90,
)
last_point = map_point(output_shape.w - 1, output_shape.h - 1)
_plot_rect(
axis,
rect=to_rf_rect(GridPoint(last_point[0], last_point[1]), rf_size),
color=(0.1, 0.9, 0.3),
linewidth=5,
size=90,
)
axis.set_aspect("equal")
| 85 | 0 | 26 |
734b41896af854cd8a465478ddda45e8ee945260 | 956 | py | Python | environments/functions/bukin6.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | 6 | 2020-01-14T00:01:34.000Z | 2021-12-28T14:31:05.000Z | environments/functions/bukin6.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | null | null | null | environments/functions/bukin6.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | 1 | 2020-09-06T10:44:29.000Z | 2020-09-06T10:44:29.000Z | """Implementation of the Bukin N. 6 function as in the link below. The number of
problem dimensions is arbitrary, as well as the bounds.
https://www.sfu.ca/~ssurjano/bukin6.html
"""
from .function import Function
import torch
| 24.512821 | 80 | 0.59205 | """Implementation of the Bukin N. 6 function as in the link below. The number of
problem dimensions is arbitrary, as well as the bounds.
https://www.sfu.ca/~ssurjano/bukin6.html
"""
from .function import Function
import torch
class Bukin6(Function):
def __init__(self, env_params):
super(Bukin6, self).__init__(env_params)
self.x = None # NP array
self.x_low = [-15, -5]
self.x_high = [-3, 3]
self.optimal_x = [-10, 1] # Location
self.resolution = 250
self.z = None # Function evaluation
self.set_observation()
self.set_domain()
self.set_range()
self.init_plot(env_params["data path"])
def get_func(self):
"""Evaluate the function based on the position attribute."""
a = 100
b = self.x[1]-(0.01*(self.x[0]**2))
c = torch.sqrt(torch.abs(b))
d = 0.01*torch.abs(self.x[0]+10)
return (a*c)+d
#
| 405 | 302 | 23 |
4f0629a25f94f351b3ccbae0e80044e51e76a82f | 664 | py | Python | d2e_share_splitter/sharecontributions/urls.py | developertoentrepreneur/d2e-share-splitter | 3dc406c726a801b507aa0b049fce8a2ab5d1bf2d | [
"MIT"
] | null | null | null | d2e_share_splitter/sharecontributions/urls.py | developertoentrepreneur/d2e-share-splitter | 3dc406c726a801b507aa0b049fce8a2ab5d1bf2d | [
"MIT"
] | 5 | 2022-01-09T07:40:38.000Z | 2022-02-12T19:38:54.000Z | d2e_share_splitter/sharecontributions/urls.py | developertoentrepreneur/d2e_share_splitter | 3dc406c726a801b507aa0b049fce8a2ab5d1bf2d | [
"MIT"
] | null | null | null | from django.urls import path
from d2e_share_splitter.sharecontributions import views
app_name = "sharecontributions"
urlpatterns = [
# Django Ajax CRUD Operations
path("contribs/", views.ContribsView.as_view(), name="list_contribs"),
# path('contribs/log', views.ContribsLog.as_view(), name='log_contribs'),
path("contribs/create/", views.CreateContrib.as_view(), name="contrib_create"),
path(
"contrib/<int:pk>/delete/",
views.DeleteContrib.as_view(),
name="contrib_delete",
),
path(
"contrib/update-form/",
views.UpdateContribFormView.as_view(),
name="contrib_form_update",
),
]
| 28.869565 | 83 | 0.671687 | from django.urls import path
from d2e_share_splitter.sharecontributions import views
app_name = "sharecontributions"
urlpatterns = [
# Django Ajax CRUD Operations
path("contribs/", views.ContribsView.as_view(), name="list_contribs"),
# path('contribs/log', views.ContribsLog.as_view(), name='log_contribs'),
path("contribs/create/", views.CreateContrib.as_view(), name="contrib_create"),
path(
"contrib/<int:pk>/delete/",
views.DeleteContrib.as_view(),
name="contrib_delete",
),
path(
"contrib/update-form/",
views.UpdateContribFormView.as_view(),
name="contrib_form_update",
),
]
| 0 | 0 | 0 |
9ca7464fb9148a18192a581a0fd1d96880a4c8d7 | 2,793 | py | Python | hero_trivia.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | 1 | 2020-09-18T20:48:10.000Z | 2020-09-18T20:48:10.000Z | hero_trivia.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | hero_trivia.py | healingbrew/HeroesEmojiSlicer | 3c2cab9db8a0afea6b3bf3eb60c2a0e45d840355 | [
"MIT"
] | null | null | null | # encoding: utf-8
from Storm.Localized import Strings as LocalizedStrings
from Storm.GameData import Catalog
from sys import argv, stderr
from os.path import exists
if __name__ != '__main__':
print_utf8_e('herotrivia.py is a CLI file, not a module')
exit(-1)
if len(argv) < 2:
print_utf8_e('Usage: python %s path_to_mods_dir [locale]' % (argv[0]))
exit(1)
RootDir = argv[1]
RootDirLength = len(RootDir)
RootLocale = 'enus'
if len(argv) > 2:
RootLocale = argv[2]
GameDataList = ['%s/heroesdata.stormmod' % RootDir]
GameDataList += list(map(lambda x: '%s/%s/' % (RootDir, x.get('value').lower()[5:]), Catalog('%s/heroesdata.stormmod/base.stormdata/Includes.xml' % RootDir)))
print('Name, Radius, Inner Radius, Flags, Search')
for gameDataDir in GameDataList:
gameDataPath = '%s/base.stormdata/GameData.xml' % gameDataDir
if not exists(gameDataPath):
print_utf8_e('Catalog stormmod %s does not exist!' % gameDataPath)
continue
CLocale = LocalizedStrings({}).Load('%s/%s.stormdata/LocalizedData/GameStrings.txt' % (gameDataDir, RootLocale))
GameDataCatalog = set(map(lambda x: x.get('path'), Catalog(gameDataPath).findall("Catalog")))
for CatalogEntry in GameDataCatalog:
catalogPath = '%s/base.stormdata/%s' % (gameDataDir, CatalogEntry)
if not exists(catalogPath):
print_utf8_e('Catalog file %s does not exist!' % catalogPath)
continue
CatalogFile = Catalog(catalogPath)
for CUnit in CatalogFile.findall('CUnit'):
CUnitId = CUnit.get('id')
CUnitParent = CUnit.get('parent') or CUnitId
if CUnitParent.startswith('StormHero') is not True and CUnitId != 'RexxarMisha': continue
CUnitName = CLocale.get("Unit/Name/%s" % CUnitId)
CUnitRadius = 'Inherited'
if CUnit.find('Radius') is not None: CUnitRadius = CUnit.find('Radius').get('value')
CUnitInnerRadius = 'Inherited'
if CUnit.find('InnerRadius') is not None: CUnitInnerRadius = CUnit.find('InnerRadius').get('value')
CUnitFlags = list(map(lambda x: x.get('index'), filter(lambda x: x.get('value') == '1', CUnit.findall('HeroPlaystyleFlags'))))
CUnitFlags += list(filter(lambda x: x is not None and (x.startswith('HeroGeneric') or x == 'UltimateEvolutionInvalidTarget'), map(lambda x: x.get('Link'), CUnit.findall('BehaviorArray'))))
if len(CUnitFlags) == 0: CUnitFlags = ['Inherited']
CSearchText = CLocale.get("Hero/AdditionalSearchText/%s" % CUnitId[4:])
print('%s, %s, %s, %s, %s' % (CUnitName, CUnitRadius, CUnitInnerRadius, ', '.join(CUnitFlags), CSearchText))
| 47.338983 | 200 | 0.659864 | # encoding: utf-8
from Storm.Localized import Strings as LocalizedStrings
from Storm.GameData import Catalog
from sys import argv, stderr
from os.path import exists
def print_utf8(text):
print(text)
def print_utf8_e(text):
print(text, file=stderr)
if __name__ != '__main__':
print_utf8_e('herotrivia.py is a CLI file, not a module')
exit(-1)
if len(argv) < 2:
print_utf8_e('Usage: python %s path_to_mods_dir [locale]' % (argv[0]))
exit(1)
RootDir = argv[1]
RootDirLength = len(RootDir)
RootLocale = 'enus'
if len(argv) > 2:
RootLocale = argv[2]
GameDataList = ['%s/heroesdata.stormmod' % RootDir]
GameDataList += list(map(lambda x: '%s/%s/' % (RootDir, x.get('value').lower()[5:]), Catalog('%s/heroesdata.stormmod/base.stormdata/Includes.xml' % RootDir)))
print('Name, Radius, Inner Radius, Flags, Search')
for gameDataDir in GameDataList:
gameDataPath = '%s/base.stormdata/GameData.xml' % gameDataDir
if not exists(gameDataPath):
print_utf8_e('Catalog stormmod %s does not exist!' % gameDataPath)
continue
CLocale = LocalizedStrings({}).Load('%s/%s.stormdata/LocalizedData/GameStrings.txt' % (gameDataDir, RootLocale))
GameDataCatalog = set(map(lambda x: x.get('path'), Catalog(gameDataPath).findall("Catalog")))
for CatalogEntry in GameDataCatalog:
catalogPath = '%s/base.stormdata/%s' % (gameDataDir, CatalogEntry)
if not exists(catalogPath):
print_utf8_e('Catalog file %s does not exist!' % catalogPath)
continue
CatalogFile = Catalog(catalogPath)
for CUnit in CatalogFile.findall('CUnit'):
CUnitId = CUnit.get('id')
CUnitParent = CUnit.get('parent') or CUnitId
if CUnitParent.startswith('StormHero') is not True and CUnitId != 'RexxarMisha': continue
CUnitName = CLocale.get("Unit/Name/%s" % CUnitId)
CUnitRadius = 'Inherited'
if CUnit.find('Radius') is not None: CUnitRadius = CUnit.find('Radius').get('value')
CUnitInnerRadius = 'Inherited'
if CUnit.find('InnerRadius') is not None: CUnitInnerRadius = CUnit.find('InnerRadius').get('value')
CUnitFlags = list(map(lambda x: x.get('index'), filter(lambda x: x.get('value') == '1', CUnit.findall('HeroPlaystyleFlags'))))
CUnitFlags += list(filter(lambda x: x is not None and (x.startswith('HeroGeneric') or x == 'UltimateEvolutionInvalidTarget'), map(lambda x: x.get('Link'), CUnit.findall('BehaviorArray'))))
if len(CUnitFlags) == 0: CUnitFlags = ['Inherited']
CSearchText = CLocale.get("Hero/AdditionalSearchText/%s" % CUnitId[4:])
print('%s, %s, %s, %s, %s' % (CUnitName, CUnitRadius, CUnitInnerRadius, ', '.join(CUnitFlags), CSearchText))
| 47 | 0 | 46 |
4b42ff639c755881ef8c17f3d435b947e0f8e74c | 255 | py | Python | lint.py | 9kin/codeforces-dl | 70edc1b8942f4a12ef052e0ae6e4331d34be6a71 | [
"MIT"
] | 5 | 2020-10-08T05:17:58.000Z | 2021-05-16T17:42:04.000Z | lint.py | 9kin/cfdl | 70edc1b8942f4a12ef052e0ae6e4331d34be6a71 | [
"MIT"
] | null | null | null | lint.py | 9kin/cfdl | 70edc1b8942f4a12ef052e0ae6e4331d34be6a71 | [
"MIT"
] | null | null | null | import glob
from os import system
files = glob.glob("*.py") + glob.glob("cfdl/*.py")
system("python3 -m black . --line-length 79")
for file in files:
# system(f"python3 -m pybetter {file}")
system(f"python3 -m isort {file}")
__all__ = ["files"]
| 23.181818 | 50 | 0.647059 | import glob
from os import system
files = glob.glob("*.py") + glob.glob("cfdl/*.py")
system("python3 -m black . --line-length 79")
for file in files:
# system(f"python3 -m pybetter {file}")
system(f"python3 -m isort {file}")
__all__ = ["files"]
| 0 | 0 | 0 |
efb5bbb32c39ad32330d530f8caa7d9f8ccf4b10 | 2,030 | py | Python | gpef/stat/basic_stat.py | zz090923610/gpef | 5bee7afb7df4041d15fd25c02542f0875a70469a | [
"MIT"
] | null | null | null | gpef/stat/basic_stat.py | zz090923610/gpef | 5bee7afb7df4041d15fd25c02542f0875a70469a | [
"MIT"
] | null | null | null | gpef/stat/basic_stat.py | zz090923610/gpef | 5bee7afb7df4041d15fd25c02542f0875a70469a | [
"MIT"
] | null | null | null | import json
import sys
import pandas as pd
# noinspection SpellCheckingInspection
EXAMPLE_CTRL_JSON = """######### EXAMPLE JSON: #########
{
"save_path": "./basic_stat.csv",
"data": [
{
"name": "Baseline",
"csv_path": "./similarity_score.csv",
"col_name": "baseline",
"invalid_cell_as": 0,
},
{
"name": "Tor",
"csv_path": "./similarity_score.csv",
"col_name": "torsocks",
"invalid_cell_as": 0,
}
]
}
"""
def data_prepare(df_path, col_name, invalid_cell_as=None):
"""
sample_range = [start, end, interval], start can be left, end can be right if all data are included.
"""
df = pd.read_csv(df_path)
df = df[col_name]
if invalid_cell_as is not None:
df = df.fillna(float(invalid_cell_as))
return df
if __name__ == '__main__':
main()
| 25.061728 | 104 | 0.56601 | import json
import sys
import pandas as pd
# noinspection SpellCheckingInspection
EXAMPLE_CTRL_JSON = """######### EXAMPLE JSON: #########
{
"save_path": "./basic_stat.csv",
"data": [
{
"name": "Baseline",
"csv_path": "./similarity_score.csv",
"col_name": "baseline",
"invalid_cell_as": 0,
},
{
"name": "Tor",
"csv_path": "./similarity_score.csv",
"col_name": "torsocks",
"invalid_cell_as": 0,
}
]
}
"""
def data_prepare(df_path, col_name, invalid_cell_as=None):
"""
sample_range = [start, end, interval], start can be left, end can be right if all data are included.
"""
df = pd.read_csv(df_path)
df = df[col_name]
if invalid_cell_as is not None:
df = df.fillna(float(invalid_cell_as))
return df
def describe(df):
res = dict(df.describe())
res["median"] = df.median()
return res
def parse_cmd(json_path):
with open(json_path, 'r') as f:
commands = json.load(f)
result = []
for data_single in commands['data']:
if "invalid_cell_as" in data_single:
invalid_cell_as = data_single['invalid_cell_as']
else:
invalid_cell_as = None
raw_data = data_prepare(data_single['csv_path'], data_single['col_name'],
invalid_cell_as=invalid_cell_as)
res = describe(raw_data)
res['name'] = data_single['name']
print(res)
result.append(res)
df = pd.DataFrame(result)
df[["name", "mean", "median", "std"]].to_csv(commands["save_path"], index=False)
print(df[["name", "mean", "median", "std"]])
def generate_example_json():
print(EXAMPLE_CTRL_JSON)
def main():
if len(sys.argv) != 2:
print("Usage: basic_stat path/to/conf.json")
exit()
if sys.argv[1] == '-g':
generate_example_json()
else:
parse_cmd(sys.argv[1])
if __name__ == '__main__':
main()
| 1,012 | 0 | 92 |
2c8269117559be4e33981f99162afa6ebcd63fc4 | 4,798 | py | Python | neoasynchttpy/connection.py | emehrkay/neoasynchttpy | 42453f2ddb7498c95f6145ff12ea4926c857d328 | [
"MIT"
] | 3 | 2018-02-22T09:47:57.000Z | 2020-08-07T09:18:11.000Z | neoasynchttpy/connection.py | emehrkay/neoasynchttpy | 42453f2ddb7498c95f6145ff12ea4926c857d328 | [
"MIT"
] | null | null | null | neoasynchttpy/connection.py | emehrkay/neoasynchttpy | 42453f2ddb7498c95f6145ff12ea4926c857d328 | [
"MIT"
] | null | null | null | import aiohttp
import datetime
import json
import logging
from .error import NeoAsyncHTTPyException
from .utils import Timer
logger = logging.getLogger(__name__)
| 26.655556 | 77 | 0.565861 | import aiohttp
import datetime
import json
import logging
from .error import NeoAsyncHTTPyException
from .utils import Timer
logger = logging.getLogger(__name__)
class Request:
def __init__(self, uri, statements):
self.uri = uri
self.statements = statements
self.date = datetime.datetime.now().timestamp()
class Response:
def __init__(self, request: Request):
self.response = None
self.text = None
self._json = None
self.request = request
self.time = None
@property
def status(self) -> int:
return self.response.status
@property
def json(self) -> dict:
if not self._json:
self._json = json.loads(self.text) if self.text else {}
return self._json
@property
def results(self) -> list:
return self.json.get('results', {})
@property
def errors(self) -> list:
return self.json.get('errors')
class Connection:
def __init__(self, url: str='127.0.0.1', protocol: str='http',
port: int=7474, username: str=None, password: str=None,
loop = None):
self.url = url
self.protocol = protocol
self.port = port
self.username = username
self.password = password
self.loop = loop
self.commit_uri = None
self.reset()
def reset(self):
self.statements = []
return self
@property
def uri(self) -> str:
return '{protocol}://{url}:{port}/db/data/transaction/commit'.format(
protocol=self.protocol, url=self.url, port=self.port)
@property
def transaction_uri(self) -> str:
return '{protocol}://{url}:{port}/db/data/transaction'.format(
protocol=self.protocol, url=self.url, port=self.port)
@property
def session_kwargs(self) -> dict:
kwargs = {}
if self.username and self.password:
auth = aiohttp.BasicAuth(login=self.username,
password=self.password)
kwargs['auth'] = auth
if self.loop:
kwargs['loop'] = self.loop
return kwargs
def statement(self, statement: str, parameters: dict=None,
stats: bool=False):
part = {
'statement': statement,
}
if parameters:
part['parameters'] = parameters
if stats:
part['includeStats'] = True
self.statements.append(part)
return self
async def query(self, start_transaction: bool=False,
commit_transaction: bool=False,
rollback_transaction: bool=False) -> Response:
statements = {
'statements': self.statements,
}
if start_transaction:
request_uri = self.transaction_uri
elif commit_transaction or rollback_transaction:
if not self.commit_uri:
raise NeoAsyncHTTPyException('No open transaction to close')
request_uri = self.commit_uri
if rollback_transaction:
def action(session):
return session.delete(request_uri)
else:
request_uri = self.uri
def action(session):
return session.post(request_uri, json=statements)
request = Request(uri=request_uri, statements=statements)
response = Response(request=request)
logger.debug('Executing statements:')
logger.debug(statements)
with Timer() as timer:
try:
sk = self.session_kwargs
async with aiohttp.ClientSession(**sk) as session:
async with action(session) as resp:
response.response = resp
response.text = await resp.text()
if start_transaction:
self.commit_uri = response.json.get('commit')
if response.errors and self.commit_uri:
self.commit_uri = None
except Exception as e:
logging.error(e, exc_info=True)
class Resp:
status: 500
response.response = Resp()
response.errors = [e,]
self.commit_uri = None
finally:
self.reset()
if commit_transaction or rollback_transaction:
self.commit_uri = None
logger.debug('runtime: {} milliseconds\n'.format(timer.elapsed))
response.time = timer.elapsed
return response
async def commit(self):
return await self.query(commit_transaction=True)
async def rollback(self):
return await self.query(rollback_transaction=True)
| 4,075 | 459 | 96 |
8644e390166f2682e9bc73b30e0a9ab66c26cb34 | 1,388 | py | Python | setup.py | haskiindahouse/promosales-and-prizes-flask | 5b9ae3e0506d847463e367180f4e784fe835c393 | [
"MIT"
] | 46 | 2016-06-28T10:25:07.000Z | 2019-12-10T20:53:47.000Z | setup.py | haskiindahouse/promosales-and-prizes-flask | 5b9ae3e0506d847463e367180f4e784fe835c393 | [
"MIT"
] | 4 | 2018-02-10T10:53:08.000Z | 2018-11-07T08:11:06.000Z | setup.py | haskiindahouse/promosales-and-prizes-flask | 5b9ae3e0506d847463e367180f4e784fe835c393 | [
"MIT"
] | 9 | 2016-07-20T17:05:46.000Z | 2022-02-15T18:40:17.000Z | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
setup(
name='arrested',
version='0.1.3',
author='Mikey Waites',
author_email='mike@oldstlabs.com',
url='https://github.com/mikeywaites/flask-arrested',
description=('A framework for rapidly building REST APIs in Flask.'),
license='MIT',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['flask'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 35.589744 | 79 | 0.62536 | import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
setup(
name='arrested',
version='0.1.3',
author='Mikey Waites',
author_email='mike@oldstlabs.com',
url='https://github.com/mikeywaites/flask-arrested',
description=('A framework for rapidly building REST APIs in Flask.'),
license='MIT',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=['flask'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 0 | 0 | 0 |
711a490278ad9aa748258d15c11feabf487c3d1d | 2,482 | py | Python | ingredients_tasks/celary.py | sandwichcloud/ingredients.tasks | 23d2772536f07aa5e4787b7ee67dee2f1faedb08 | [
"MIT"
] | null | null | null | ingredients_tasks/celary.py | sandwichcloud/ingredients.tasks | 23d2772536f07aa5e4787b7ee67dee2f1faedb08 | [
"MIT"
] | null | null | null | ingredients_tasks/celary.py | sandwichcloud/ingredients.tasks | 23d2772536f07aa5e4787b7ee67dee2f1faedb08 | [
"MIT"
] | null | null | null | import inspect
import celery
from kombu import Queue, Exchange
from sqlalchemy.engine.url import URL
| 34.472222 | 108 | 0.586624 | import inspect
import celery
from kombu import Queue, Exchange
from sqlalchemy.engine.url import URL
class Messaging(object):
def __init__(self, host, port, username, password, vhost):
self.host = host
self.port = port
self.username = username
self.password = password
self.vhost = vhost
self.app = celery.Celery()
def connect(self):
connect_args = {
'drivername': 'amqp',
'host': self.host,
'port': self.port,
'username': self.username,
'password': self.password,
'database': self.vhost
}
from ingredients_tasks.tasks import image, instance, network, region, zone
include, task_queues, task_routes = self.populate_tasks(image, instance, network, region, zone)
self.app.conf.update(
broker_url=URL(**connect_args).__str__(),
broker_transport_options={
'confirm_publish': True
},
task_acks_late=True,
task_reject_on_worker_lost=True,
task_ignore_result=True,
task_store_errors_even_if_ignored=False,
task_soft_time_limit=300, # 5 minutes
task_time_limit=600, # 10 minutes
worker_prefetch_multiplier=1, # One worker process can only do one type of task at a time
include=include,
task_queues=task_queues,
task_routes=task_routes
)
def start(self, argv):
self.app.start(argv=argv)
def populate_tasks(self, *args):
include = []
task_queues = set()
task_routes = {}
from ingredients_tasks.tasks.tasks import NetworkTask, ImageTask, InstanceTask, RegionTask, ZoneTask
for task_module in args:
include.append(task_module.__name__)
for name, method in inspect.getmembers(task_module):
if method in [NetworkTask, ImageTask, InstanceTask, RegionTask, ZoneTask]:
continue
if hasattr(method, 'apply_async'):
task_queues.add(Queue(name, exchange=Exchange(task_module.__name__), routing_key=name))
task_routes[task_module.__name__ + "." + name] = {
'queue': name,
'exchange': task_module.__name__,
'routing_key': name
}
return include, task_queues, task_routes
| 2,246 | 3 | 130 |
aef1874cba1ba91999e70d76bd372d4964f55506 | 402 | py | Python | examples/station/make_release.py | bjornaa/ladim2 | f6c1be9028ca54370ce33dde25b005d5b0bb4677 | [
"MIT"
] | null | null | null | examples/station/make_release.py | bjornaa/ladim2 | f6c1be9028ca54370ce33dde25b005d5b0bb4677 | [
"MIT"
] | null | null | null | examples/station/make_release.py | bjornaa/ladim2 | f6c1be9028ca54370ce33dde25b005d5b0bb4677 | [
"MIT"
] | null | null | null | # Make a particles.in file for a streak line
# Continuous release from single source
from numpy import linspace
start_time = "1989-05-24T12"
num_particles = 1001
# Release point in grid coordinates
x, y = 115, 100
zmax = 200
Z = linspace(zmax, 0, num_particles)
with open("station.rls", mode="w") as f:
for z in Z:
f.write("{:s} {:7.3f} {:7.3f} {:6.2f}\n".format(start_time, x, y, z))
| 22.333333 | 77 | 0.666667 | # Make a particles.in file for a streak line
# Continuous release from single source
from numpy import linspace
start_time = "1989-05-24T12"
num_particles = 1001
# Release point in grid coordinates
x, y = 115, 100
zmax = 200
Z = linspace(zmax, 0, num_particles)
with open("station.rls", mode="w") as f:
for z in Z:
f.write("{:s} {:7.3f} {:7.3f} {:6.2f}\n".format(start_time, x, y, z))
| 0 | 0 | 0 |
8ec7495881a9db747d6064a014e7a97e7403ee47 | 1,045 | py | Python | src/isentropic_flow.py | iwarobots/aerodynamics | 3174dfbf1745e185a577033b4723d877ebe3019b | [
"Apache-2.0"
] | 1 | 2015-05-06T12:32:28.000Z | 2015-05-06T12:32:28.000Z | src/isentropic_flow.py | iwarobots/aerodynamics | 3174dfbf1745e185a577033b4723d877ebe3019b | [
"Apache-2.0"
] | null | null | null | src/isentropic_flow.py | iwarobots/aerodynamics | 3174dfbf1745e185a577033b4723d877ebe3019b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import, division
from math import sqrt
from scipy.optimize import brentq
from common import func1, MIN_MACH, MAX_MACH
from constants import GAMMA
| 18.660714 | 66 | 0.582775 | #!/usr/bin/env python
from __future__ import absolute_import, division
from math import sqrt
from scipy.optimize import brentq
from common import func1, MIN_MACH, MAX_MACH
from constants import GAMMA
def m2p(m):
x = -(GAMMA/(GAMMA-1))
return func1(m) ** x
def m2rho(m):
x = -(1/(GAMMA-1))
return func1(m) ** x
def m2t(m):
return func1(m) ** (-1)
def m2a(m):
x = (GAMMA+1) / (GAMMA-1)
y = 1 / (m**2) * (2/(GAMMA+1)*func1(m)) ** x
return sqrt(y)
def p2m(p):
return brentq(lambda x: m2p(x)-p, MIN_MACH, MAX_MACH)
def rho2m(rho):
return brentq(lambda x: m2rho(x)-rho, MIN_MACH, MAX_MACH)
def t2m(t):
return brentq(lambda x: m2t(x)-t, MIN_MACH, MAX_MACH)
def a2m(a, supersonic=1):
if supersonic == 1:
m = brentq(lambda x: m2a(x)-a, 1, MAX_MACH)
elif supersonic == 0:
m = brentq(lambda x: m2a(x)-a, MIN_MACH, 1)
return m
def ap2m(ap):
return brentq(lambda x: m2a(x)*m2p(x)-ap, MIN_MACH, MAX_MACH)
| 586 | 0 | 225 |
bd2210232a57c21704c1368e578ab1906b3a9250 | 3,022 | py | Python | test.py | edward-zhu/dialog | dc84725dfc55b945ad8d435a56bc606f3708d4c8 | [
"Apache-2.0"
] | 38 | 2018-05-24T06:36:05.000Z | 2021-09-09T19:11:33.000Z | test.py | edward-zhu/dialog | dc84725dfc55b945ad8d435a56bc606f3708d4c8 | [
"Apache-2.0"
] | 5 | 2018-10-13T12:16:11.000Z | 2022-01-13T00:45:54.000Z | test.py | edward-zhu/dialog | dc84725dfc55b945ad8d435a56bc606f3708d4c8 | [
"Apache-2.0"
] | 12 | 2018-05-24T06:36:06.000Z | 2020-09-30T01:33:17.000Z | import json
import sys
import random
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import RMSprop
from torch.optim.lr_scheduler import StepLR
from loader import load_data, load_ontology, load_embed, load_sys_vocab, load_kb
from kb import load_kb
from model import load_tracker_model
from decoder import load_generator_model
from codec import Codec
from sentence_generator import SentenceGenerator
CONFIG_FN = 'config.json'
sent_groups = {}
if __name__ == '__main__':
main() | 29.057692 | 109 | 0.684646 | import json
import sys
import random
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import RMSprop
from torch.optim.lr_scheduler import StepLR
from loader import load_data, load_ontology, load_embed, load_sys_vocab, load_kb
from kb import load_kb
from model import load_tracker_model
from decoder import load_generator_model
from codec import Codec
from sentence_generator import SentenceGenerator
CONFIG_FN = 'config.json'
sent_groups = {}
def print_ret(states_pred, sent_groups_pred, onto, sentence_generator):
states_pred_dict = {}
for slot in states_pred:
_, argmax = states_pred[slot].data[0][0].max(0)
states_pred_dict[slot] = argmax
print '%s pred: %d (%s)' % (slot, int(argmax), onto[slot][int(argmax) - 1], )
maxs, argmaxs = sent_groups_pred.data[0].topk(1)
for i, argmax in enumerate(argmaxs):
# print 'sys utt pred: (%d, %.2f)' % (argmax, maxs[i]) + random.choice(sent_groups[str(int(argmax))])
print sentence_generator.generate(states_pred_dict, str(int(argmax)))
def to_search_criteria(states_pred, onto):
criteria = []
for slot in states_pred:
_, argmax = states_pred[slot].data[0][0].max(0)
argmax = int(argmax)
if argmax != 0 and '_req' not in slot:
criteria.append((slot, onto[slot][argmax - 1]))
return criteria
def get_kb_result(kb, criteria, indicator_len):
ret = kb.search_multi(criteria)
nret = min(len(ret), indicator_len - 1)
vec = torch.zeros(1, indicator_len).long()
vec[0][nret] = 1
return ret, Variable(vec)
def main():
with open(CONFIG_FN) as f:
conf = json.load(f)
global sent_groups
with open(conf["sent_groups"]) as f:
sent_groups = json.load(f)["groups"]
kb = load_kb(conf["kb"], 'name')
sys_vocab, sys_word2idx = load_sys_vocab(conf["sys_vocab"])
sys_codec = Codec(sys_vocab, sys_word2idx)
onto, onto_idx = load_ontology(conf["ontology"])
word2idx, embed = load_embed(**conf)
usr_codec = Codec([], word2idx)
trk_model, slot_len_sum = load_tracker_model(onto, embed, conf, kb)
trk_model.eval()
hidden = trk_model.state_tracker.init_hidden()
kb_vec = Variable(torch.zeros(1, conf["kb_indicator_len"]))
sentence_generator = SentenceGenerator(kb, onto, sent_groups)
for line in iter(sys.stdin.readline, ''):
inp = usr_codec.encode(line.strip())
inp = Variable(torch.LongTensor([ inp, ]))
sentvecs, states_reps, states_preds, hidden, sent_grp_preds = trk_model(inp, None, hidden)
criteria = to_search_criteria(states_preds, onto)
ret, kb_vec = get_kb_result(kb, criteria, conf["kb_indicator_len"])
# print criteria, kb_vec
sentvecs = sentvecs.view(1, -1)
states_reps = states_reps.view(1, -1)
print_ret(states_preds, sent_grp_preds, onto, sentence_generator)
if __name__ == '__main__':
main() | 2,357 | 0 | 92 |
3ec64fc89f82d869b585285cf650864ca71a00f5 | 91 | py | Python | math/__init__.py | skrolikowski/PyBox | d79c5229df69f21767a4db15ebe05b91bba3dc8d | [
"Unlicense",
"MIT"
] | null | null | null | math/__init__.py | skrolikowski/PyBox | d79c5229df69f21767a4db15ebe05b91bba3dc8d | [
"Unlicense",
"MIT"
] | null | null | null | math/__init__.py | skrolikowski/PyBox | d79c5229df69f21767a4db15ebe05b91bba3dc8d | [
"Unlicense",
"MIT"
] | null | null | null | from pybox.math import vec2d
from pybox.math import matrix
from pybox.math import transform | 30.333333 | 32 | 0.846154 | from pybox.math import vec2d
from pybox.math import matrix
from pybox.math import transform | 0 | 0 | 0 |
7e9eab7ddbce3f30e22daf4213ed37499e57972c | 341 | py | Python | src/pi/buzzer.py | mrzzy/memento | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | 1 | 2019-11-18T04:30:32.000Z | 2019-11-18T04:30:32.000Z | src/pi/buzzer.py | mrzzy/NP-Portfolio-2 | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | 1 | 2021-03-10T06:04:20.000Z | 2021-03-10T06:04:20.000Z | src/pi/buzzer.py | mrzzy/NP-Portfolio-2 | a83db7dd769c949d9924f5ef29930d818b105ef4 | [
"MIT"
] | null | null | null | #
# Memento Pi
# Utilities for dsiplaying notification on the raspberry pi
#
import time
import threading
from gpiozero import TonalBuzzer
buzzer = TonalBuzzer(23)
# play a tone on the buzzer
| 17.947368 | 59 | 0.686217 | #
# Memento Pi
# Utilities for dsiplaying notification on the raspberry pi
#
import time
import threading
from gpiozero import TonalBuzzer
buzzer = TonalBuzzer(23)
# play a tone on the buzzer
def play():
def play_fn():
buzzer.play("A4")
time.sleep(1)
buzzer.stop()
threading.Thread(target=play_fn).start()
| 124 | 0 | 22 |
3857fa817c8e67b0670f3091842785de15ed6634 | 1,201 | py | Python | layers/attention.py | HHeracles/SEGR | bd16511fa6d3b4afd79251f707f3a6544abe2baa | [
"BSD-2-Clause"
] | null | null | null | layers/attention.py | HHeracles/SEGR | bd16511fa6d3b4afd79251f707f3a6544abe2baa | [
"BSD-2-Clause"
] | null | null | null | layers/attention.py | HHeracles/SEGR | bd16511fa6d3b4afd79251f707f3a6544abe2baa | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
#from lib.config import cfg
#import lib.utils as utils
# h -- batch_size * cfg.MODEL.RNN_SIZE
# att_feats -- batch_size * att_num * att_feats_dim
# p_att_feats -- batch_size * att_num * cfg.ATT_HIDDEN_SIZE | 34.314286 | 83 | 0.621982 | import torch
import torch.nn as nn
import torch.nn.functional as F
#from lib.config import cfg
#import lib.utils as utils
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
RNN_SIZE: 1024
ATT_HIDDEN_SIZE: -1
ATT_HIDDEN_DROP: 0.0
self.Wah = nn.Linear(RNN_SIZE, ATT_HIDDEN_SIZE, bias=False)
self.alpha = nn.Linear(ATT_HIDDEN_SIZE, 1, bias=False)
self.dropout = nn.Dropout(ATT_HIDDEN_DROP) if ATT_HIDDEN_DROP > 0 else None
ATT_ACT: 'TANH'
if ATT_ACT == 'RELU':
self.act = nn.ReLU()
else:
self.act = nn.Tanh()
# h -- batch_size * cfg.MODEL.RNN_SIZE
# att_feats -- batch_size * att_num * att_feats_dim
# p_att_feats -- batch_size * att_num * cfg.ATT_HIDDEN_SIZE
def forward(self, h, att_feats, p_att_feats):
Wah = self.Wah(h).unsqueeze(1)
alpha = self.act(Wah + p_att_feats)
if self.dropout is not None:
alpha = self.dropout(alpha)
alpha = self.alpha(alpha).squeeze(-1)
alpha = F.softmax(alpha, dim=-1)
att = torch.bmm(alpha.unsqueeze(1), att_feats).squeeze(1)
return att | 835 | 6 | 75 |
d9f6fb21174c5c62646ae99726a551208fe8d98c | 1,638 | py | Python | unix/users.py | fmenabe/python-unix | 738907eddcdeece4be8c82f1d5604c296c94e49f | [
"MIT"
] | 7 | 2015-09-17T13:33:10.000Z | 2021-09-30T15:07:59.000Z | unix/users.py | fmenabe/python-unix | 738907eddcdeece4be8c82f1d5604c296c94e49f | [
"MIT"
] | 3 | 2015-09-17T12:53:17.000Z | 2016-09-27T21:10:48.000Z | unix/users.py | fmenabe/python-unix | 738907eddcdeece4be8c82f1d5604c296c94e49f | [
"MIT"
] | 4 | 2017-01-16T15:29:22.000Z | 2019-08-28T21:11:25.000Z | # /etc/passwd fields.
_PASSWD_FIELDS = ('login', 'password', 'uid', 'gid', 'name', 'home', 'shell')#
| 34.851064 | 80 | 0.598291 | # /etc/passwd fields.
_PASSWD_FIELDS = ('login', 'password', 'uid', 'gid', 'name', 'home', 'shell')#
class Users(object):
def __init__(self, host):
self._host = host
def list(self, verbose=False):
with self._host.set_controls(decode='utf-8'):
status, stdout, stderr = self._host.execute('getent', 'passwd')
if not status:
raise UnixError(stderr)
return [dict(zip(_PASSWD_FIELDS, user.split(':')))['login']
for user in stdout.splitlines()]
def get(self, uid):
with self._host.set_controls(decode='utf-8'):
status, stdout, stderr = self._host.execute('getent', 'passwd', uid)
if not status:
raise UnixError(stderr)
return dict(zip(_PASSWD_FIELDS, stdout.splitlines()[0].split(':')))
def uid(self, username):
return self.get(username)['uid']
def username(self, uid):
return self.get(uid)['login']
def groups(self, username):
with self._host.set_controls(decode='utf-8'):
status, stdout, stderr = self._host.execute('id', G=username)
if not status:
raise UnixError(stderr)
return [int(gid) for gid in stdout.split()]
def add(self, user, **kwargs):
# self._host.isroot('useradd')
return self._host.execute('useradd', user, **kwargs)
def delete(self, user, **kwargs):
# self._host.isroot('userdel')
return self._host.execute('userdel', user, **kwargs)
def update(self, user, **kwargs):
# self._host.isroot('usermod')
return self._host.execute('usermod', user, **kwargs)
| 1,273 | -1 | 265 |
98e4368d0e9f536c07331e64baef3efb401ac8e3 | 756 | py | Python | pydrm/app/window_help.py | MalloryWittwer/pydrm_GUI | 67a418da4ba7e45862cf315abcb19be2e0fedc55 | [
"MIT"
] | null | null | null | pydrm/app/window_help.py | MalloryWittwer/pydrm_GUI | 67a418da4ba7e45862cf315abcb19be2e0fedc55 | [
"MIT"
] | null | null | null | pydrm/app/window_help.py | MalloryWittwer/pydrm_GUI | 67a418da4ba7e45862cf315abcb19be2e0fedc55 | [
"MIT"
] | 1 | 2021-07-28T03:52:39.000Z | 2021-07-28T03:52:39.000Z | import os
import pathlib
import tkinter as tk
class HelpWindow(tk.Toplevel):
'''
Toplevel window for displaying the help pannel
'''
| 27 | 57 | 0.472222 | import os
import pathlib
import tkinter as tk
class HelpWindow(tk.Toplevel):
'''
Toplevel window for displaying the help pannel
'''
def __init__(self, master):
tk.Toplevel.__init__(self,
master,
# width=width,
# height=height,
)
self.title('Documentation')
self.resizable(False, False)
doc_path = os.path.join(
pathlib.Path(__file__).parent.absolute(),
'static/doc.txt'
)
with open(doc_path, 'r') as f:
doc_text = f.read()
msg = tk.Message(self, text=doc_text, bg='white')
msg.pack()
| 566 | 0 | 28 |
a72128720d6fb6ac06e8346b112638b4f7a7b14c | 3,064 | py | Python | search/save_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | search/save_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | search/save_similarities.py | cotsog/pathways-backend | 9231731359fc97833dbdbca33ac23eebeac4f715 | [
"BSD-3-Clause"
] | null | null | null | import logging
from search.models import Task, TaskSimilarityScore, TaskServiceSimilarityScore
from human_services.services.models import Service
LOGGER = logging.getLogger(__name__)
| 38.3 | 91 | 0.629896 | import logging
from search.models import Task, TaskSimilarityScore, TaskServiceSimilarityScore
from human_services.services.models import Service
LOGGER = logging.getLogger(__name__)
def save_task_similarities(ids, similarities, count):
TaskSimilarityScore.objects.all().delete()
for i in range(len(ids)):
similarities_for_task = [similarities[i, j] for j in range(len(ids)) if i != j]
cutoff = compute_cutoff(similarities_for_task, count)
for j in range(len(ids)):
score = similarities[i, j]
if i != j and score >= cutoff:
record = TaskSimilarityScore(first_task_id=ids[i],
second_task_id=ids[j],
similarity_score=score)
record.save()
def compute_cutoff(scores, count):
scores.sort(reverse=True)
return scores[min(count, len(scores)) - 1]
def save_task_service_similarity_scores(task_ids, service_ids, similarities, count):
TaskServiceSimilarityScore.objects.all().delete()
task_count = len(task_ids)
service_count = len(service_ids)
# Assuming that the similarities are computed from a document vector
# containing task descriptions *followed by* service descriptions
def to_service_similarity_offset(service_index):
return task_count + service_index
for i in range(task_count):
similarities_for_task = [similarities[i, to_service_similarity_offset(j)]
for j in range(service_count)]
cutoff = compute_cutoff(similarities_for_task, count)
for j in range(service_count):
score = similarities[i, to_service_similarity_offset(j)]
if score >= cutoff:
record = TaskServiceSimilarityScore(task_id=task_ids[i],
service_id=service_ids[j],
similarity_score=score)
record.save()
def save_manual_similarities(manual_similarities):
manual_similarity_score = 1.0
for task_id, service_ids in manual_similarities.items():
for service_id in service_ids:
if is_task_id_valid(task_id) and is_service_id_valid(service_id):
TaskServiceSimilarityScore.objects.update_or_create(
task_id=task_id,
service_id=service_id,
defaults={
'similarity_score': manual_similarity_score
}
)
def is_task_id_valid(task_id):
try:
Task.objects.get(id=task_id)
return True
except Task.DoesNotExist:
LOGGER.warning('%s: Failed to save manual similarity, no such task', task_id)
return False
def is_service_id_valid(service_id):
try:
Service.objects.get(id=service_id)
return True
except Service.DoesNotExist:
LOGGER.warning('%s: Failed to save manual similarity, no such service', service_id)
return False
| 2,736 | 0 | 138 |
0274bef6940448267f887f270571dc5e94a02dc6 | 1,754 | py | Python | mystarwar/cmd.py | cizixs/mystarwar | 70e8fc2cccc27a094e4d3ea43b0709d78f79a3fe | [
"Apache-2.0"
] | null | null | null | mystarwar/cmd.py | cizixs/mystarwar | 70e8fc2cccc27a094e4d3ea43b0709d78f79a3fe | [
"Apache-2.0"
] | null | null | null | mystarwar/cmd.py | cizixs/mystarwar | 70e8fc2cccc27a094e4d3ea43b0709d78f79a3fe | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import argparse
import os
import starwar
SUPPORTED_BACKEND = ('json', 'mongodb', 'mysql', 'sqlite')
def expand_path(path):
"""Return the absolute path for a given path.
Expand `~` and `.` characters, transform relative path to absolute one.
"""
if path is None:
path = 'data'
path = os.path.abspath(os.path.expanduser(path))
try:
if not os.path.isdir(path):
os.mkdir(path)
except OSError as err:
print("Can not create directory: %s" % err.message)
return None
return path
if __name__ == '__main__':
main() | 30.241379 | 106 | 0.581528 | # coding: utf-8
import argparse
import os
import starwar
SUPPORTED_BACKEND = ('json', 'mongodb', 'mysql', 'sqlite')
def expand_path(path):
"""Return the absolute path for a given path.
Expand `~` and `.` characters, transform relative path to absolute one.
"""
if path is None:
path = 'data'
path = os.path.abspath(os.path.expanduser(path))
try:
if not os.path.isdir(path):
os.mkdir(path)
except OSError as err:
print("Can not create directory: %s" % err.message)
return None
return path
def main():
parser = argparse.ArgumentParser(description="Mystarwar Cli: Own your own starwar dataset in secs.")
parser.add_argument('-b', '--backend', dest='backend', default='json',
help='Backend to store the dataset, current support: json, mysql, mongo, sqlite.')
parser.add_argument('-l', '--location', dest='location',
help='The backend location: directory for json backend, '
'connection url for mysql or mongo backend, filename for sqlite backend')
args = parser.parse_args()
args.backend = args.backend.lower()
if args.backend not in SUPPORTED_BACKEND:
raise ValueError(
"Support %s to save data, but get %s" % (SUPPORTED_BACKEND,
args.backend))
if args.backend == 'json':
args.location = expand_path(args.location)
if not args.backend:
return
c = starwar.StarwarCrawler(['http://swapi.co/api/'],
backend=args.backend,
location=args.location)
c.run()
if __name__ == '__main__':
main() | 1,120 | 0 | 23 |
5d5dd02029a0eabbbf42dbfcca6767a8a68fc1a0 | 351 | py | Python | students/K33401/Nikitin_michael/lab2/lab2/tours/migrations/0007_remove_userscomments_created.py | mexannik1998/ITMO_ICT_WebDevelopment_2021-2022 | 0894edd7d49a73abba31f72266fdeb35fc3f6367 | [
"MIT"
] | null | null | null | students/K33401/Nikitin_michael/lab2/lab2/tours/migrations/0007_remove_userscomments_created.py | mexannik1998/ITMO_ICT_WebDevelopment_2021-2022 | 0894edd7d49a73abba31f72266fdeb35fc3f6367 | [
"MIT"
] | null | null | null | students/K33401/Nikitin_michael/lab2/lab2/tours/migrations/0007_remove_userscomments_created.py | mexannik1998/ITMO_ICT_WebDevelopment_2021-2022 | 0894edd7d49a73abba31f72266fdeb35fc3f6367 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-29 10:34
from django.db import migrations
| 19.5 | 48 | 0.569801 | # Generated by Django 3.1.5 on 2021-01-29 10:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0006_auto_20210129_1334'),
]
operations = [
migrations.RemoveField(
model_name='userscomments',
name='created',
),
]
| 0 | 239 | 25 |
404bc0dc24b4e5d46c95b30442b597b6ec72eb95 | 5,707 | py | Python | dict/engines/wordhippo.py | bubblesub/dict | 0f8c92d0f36c122435a39b15292612137c41c023 | [
"MIT"
] | 1 | 2021-09-06T01:39:27.000Z | 2021-09-06T01:39:27.000Z | dict/engines/wordhippo.py | bubblesub/dict | 0f8c92d0f36c122435a39b15292612137c41c023 | [
"MIT"
] | null | null | null | dict/engines/wordhippo.py | bubblesub/dict | 0f8c92d0f36c122435a39b15292612137c41c023 | [
"MIT"
] | null | null | null | """Definition of the WordHippoEngine."""
import argparse
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from enum import IntEnum
from typing import IO, Optional
from urllib.parse import quote
import lxml.etree
import requests
from dict.colors import COLOR_HIGHLIGHT, COLOR_RESET
from dict.engines.base import BaseEngine
from dict.pager import print_in_columns
MEANINGS_URL = (
"https://www.wordhippo.com/what-is/the-meaning-of-the-word/{}.html"
)
SYNONYMS_URL = "https://www.wordhippo.com/what-is/another-word-for/{}.html"
USER_AGENT = (
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
)
class WordHippoLookupMode(IntEnum):
"""WordHippo engine lookup target."""
SYNONYMS = 1
MEANINGS = 2
@dataclass
class BaseWordHippoResult:
"""Base WordHippo engine result."""
word_type: str
@property
def column_size(self) -> int:
"""Maximum item length to align the results in a table.
:return: max item length
"""
raise NotImplementedError("not implemented") # pragma: no cover
def print_to_stream(self, file: IO[str], column_size: int) -> None:
"""Print self to the given stream.
:param file: output stream
:param column_size: column size for aligning the results in a table
"""
raise NotImplementedError("not implemented") # pragma: no cover
TLookupFunc = Callable[[str], Iterable[BaseWordHippoResult]]
@dataclass
class WordHippoMeaningResult(BaseWordHippoResult):
"""WordHippo engine meaning result."""
meanings: list[str]
@property
@dataclass
class WordHippoSynonymResult(BaseWordHippoResult):
"""WordHippo engine synonym result."""
word_desc: str
synonyms: list[str]
@property
class WordHippoEngine(BaseEngine[BaseWordHippoResult]):
"""WordHippo engine."""
names = ["wordhippo"]
@staticmethod
@staticmethod
def get_synonyms(phrase: str) -> Iterable[WordHippoSynonymResult]:
"""Get synonyms for the given phrase.
:param phrase: phrase to look up
:return: a generator of synonyms
"""
url = SYNONYMS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_desc_node in doc.cssselect("div.tabdesc"):
word_type_node = word_desc_node.getprevious()
related_word_nodes = word_desc_node.getnext().cssselect("div.wb a")
yield WordHippoSynonymResult(
word_type=(word_type_node.text or "").strip(),
word_desc=_get_text_from_node(word_desc_node),
synonyms=list(map(_get_text_from_node, related_word_nodes)),
)
@staticmethod
def get_meanings(phrase: str) -> Iterable[WordHippoMeaningResult]:
"""Get meanings for the given phrase.
:param phrase: phrase to look up
:return: a generator of meanings
"""
url = MEANINGS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_type_node in doc.cssselect("div.defv2wordtype"):
meaning_word_nodes = word_type_node.getnext().cssselect(
".topleveldefinition li"
)
yield WordHippoMeaningResult(
word_type=_get_text_from_node(word_type_node),
meanings=list(map(_get_text_from_node, meaning_word_nodes)),
)
| 31.185792 | 79 | 0.642544 | """Definition of the WordHippoEngine."""
import argparse
from collections.abc import Callable, Iterable
from dataclasses import dataclass
from enum import IntEnum
from typing import IO, Optional
from urllib.parse import quote
import lxml.etree
import requests
from dict.colors import COLOR_HIGHLIGHT, COLOR_RESET
from dict.engines.base import BaseEngine
from dict.pager import print_in_columns
MEANINGS_URL = (
"https://www.wordhippo.com/what-is/the-meaning-of-the-word/{}.html"
)
SYNONYMS_URL = "https://www.wordhippo.com/what-is/another-word-for/{}.html"
USER_AGENT = (
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0"
)
class WordHippoLookupMode(IntEnum):
"""WordHippo engine lookup target."""
SYNONYMS = 1
MEANINGS = 2
def _get_text_from_node(node: lxml.etree.Element) -> str:
return "".join(node.itertext())
@dataclass
class BaseWordHippoResult:
"""Base WordHippo engine result."""
word_type: str
@property
def column_size(self) -> int:
"""Maximum item length to align the results in a table.
:return: max item length
"""
raise NotImplementedError("not implemented") # pragma: no cover
def print_to_stream(self, file: IO[str], column_size: int) -> None:
"""Print self to the given stream.
:param file: output stream
:param column_size: column size for aligning the results in a table
"""
raise NotImplementedError("not implemented") # pragma: no cover
TLookupFunc = Callable[[str], Iterable[BaseWordHippoResult]]
@dataclass
class WordHippoMeaningResult(BaseWordHippoResult):
"""WordHippo engine meaning result."""
meanings: list[str]
@property
def column_size(self) -> int:
return max(map(len, self.meanings))
def print_to_stream(self, file: IO[str], column_size: int) -> None:
print(COLOR_HIGHLIGHT + self.word_type + COLOR_RESET, file=file)
for meaning in self.meanings:
print(f"- {meaning}", file=file)
print(file=file)
@dataclass
class WordHippoSynonymResult(BaseWordHippoResult):
"""WordHippo engine synonym result."""
word_desc: str
synonyms: list[str]
@property
def column_size(self) -> int:
return max(map(len, self.synonyms))
def print_to_stream(self, file: IO[str], column_size: int) -> None:
print(
COLOR_HIGHLIGHT
+ f"{self.word_type} ({self.word_desc})"
+ COLOR_RESET,
file=file,
)
print_in_columns(
(synonym for synonym in self.synonyms),
column_size=column_size,
file=file,
)
class WordHippoEngine(BaseEngine[BaseWordHippoResult]):
"""WordHippo engine."""
names = ["wordhippo"]
@staticmethod
def decorate_arg_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-s",
action="store_const",
dest="lookup_mode",
const=WordHippoLookupMode.SYNONYMS,
help="look for synonyms (default)",
)
parser.add_argument(
"-m",
action="store_const",
dest="lookup_mode",
const=WordHippoLookupMode.MEANINGS,
help="look for meanings",
)
def lookup_phrase(
self, args: argparse.Namespace, phrase: str
) -> Iterable[BaseWordHippoResult]:
func_map: dict[Optional[int], TLookupFunc] = {
WordHippoLookupMode.SYNONYMS: self.get_synonyms,
WordHippoLookupMode.MEANINGS: self.get_meanings,
None: self.get_synonyms,
}
func = func_map[args.lookup_mode]
yield from func(phrase)
@staticmethod
def get_synonyms(phrase: str) -> Iterable[WordHippoSynonymResult]:
"""Get synonyms for the given phrase.
:param phrase: phrase to look up
:return: a generator of synonyms
"""
url = SYNONYMS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_desc_node in doc.cssselect("div.tabdesc"):
word_type_node = word_desc_node.getprevious()
related_word_nodes = word_desc_node.getnext().cssselect("div.wb a")
yield WordHippoSynonymResult(
word_type=(word_type_node.text or "").strip(),
word_desc=_get_text_from_node(word_desc_node),
synonyms=list(map(_get_text_from_node, related_word_nodes)),
)
@staticmethod
def get_meanings(phrase: str) -> Iterable[WordHippoMeaningResult]:
"""Get meanings for the given phrase.
:param phrase: phrase to look up
:return: a generator of meanings
"""
url = MEANINGS_URL.format(quote(phrase))
response = requests.get(url, headers={"User-Agent": USER_AGENT})
response.raise_for_status()
doc = lxml.etree.HTML(response.text)
for word_type_node in doc.cssselect("div.defv2wordtype"):
meaning_word_nodes = word_type_node.getnext().cssselect(
".topleveldefinition li"
)
yield WordHippoMeaningResult(
word_type=_get_text_from_node(word_type_node),
meanings=list(map(_get_text_from_node, meaning_word_nodes)),
)
def print_results(
self, results: Iterable[BaseWordHippoResult], file: IO[str]
) -> None:
column_size = max(result.column_size for result in results)
for result in results:
result.print_to_stream(file=file, column_size=column_size)
| 1,862 | 0 | 209 |
f5a03ad20eeda501a202d9a0e2603e684f2246a7 | 2,114 | py | Python | dla/tests/test_arnoldi.py | mp4096/dla | a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef | [
"BSD-3-Clause"
] | null | null | null | dla/tests/test_arnoldi.py | mp4096/dla | a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef | [
"BSD-3-Clause"
] | null | null | null | dla/tests/test_arnoldi.py | mp4096/dla | a3a1ae1c86abe62e321ca2d2fba89d30c39ba6ef | [
"BSD-3-Clause"
] | null | null | null | """Test the Arnoldi implementation."""
from dla.linalg import arnoldi
import numpy as np
import numpy.testing as npt
from scipy.linalg import subspace_angles
a = np.array([
[9.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 8.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 5.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 6.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 8.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 5.0],
])
b = np.array([
[1.0],
[0.0],
[3.0],
[1.0],
[0.0],
[0.0],
])
def test_arnoldi_simple():
"""Test the Arnoldi algorithm for a simple system."""
num_directions = 3
x = np.empty((a.shape[0], num_directions))
x[:, 0] = b.squeeze()
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_xxl():
"""Test the Arnoldi algorithm for a larger system."""
np.random.seed(777)
a_xxl = np.random.rand(100, 100)
b_xxl = np.random.rand(100)
num_directions = 10
x = np.empty((a_xxl.shape[0], num_directions))
x[:, 0] = b_xxl
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a_xxl, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a_xxl, b_xxl, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_orthogonality():
"""Test if the Arnoldi implementation produces an orthogonal basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
for i in range(num_directions):
for j in range(i):
npt.assert_almost_equal(np.dot(rks[:, i], rks[:, j]), 0.0)
def test_arnoldi_normalisation():
"""Test if the Arnoldi implementation produces an normalised basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(
np.linalg.norm(rks, axis=0),
np.ones((num_directions,)),
)
| 28.958904 | 74 | 0.577578 | """Test the Arnoldi implementation."""
from dla.linalg import arnoldi
import numpy as np
import numpy.testing as npt
from scipy.linalg import subspace_angles
a = np.array([
[9.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 8.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 5.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 6.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 8.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 5.0],
])
b = np.array([
[1.0],
[0.0],
[3.0],
[1.0],
[0.0],
[0.0],
])
def test_arnoldi_simple():
"""Test the Arnoldi algorithm for a simple system."""
num_directions = 3
x = np.empty((a.shape[0], num_directions))
x[:, 0] = b.squeeze()
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_xxl():
"""Test the Arnoldi algorithm for a larger system."""
np.random.seed(777)
a_xxl = np.random.rand(100, 100)
b_xxl = np.random.rand(100)
num_directions = 10
x = np.empty((a_xxl.shape[0], num_directions))
x[:, 0] = b_xxl
x[:, 0] /= np.linalg.norm(x[:, 0])
for i in range(1, num_directions):
x[:, i] = np.linalg.solve(a_xxl, x[:, i - 1])
x[:, i] /= np.linalg.norm(x[:, i])
rks = arnoldi(a_xxl, b_xxl, num_directions)
npt.assert_almost_equal(np.abs(subspace_angles(x, rks)).max(), 0.0)
def test_arnoldi_orthogonality():
"""Test if the Arnoldi implementation produces an orthogonal basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
for i in range(num_directions):
for j in range(i):
npt.assert_almost_equal(np.dot(rks[:, i], rks[:, j]), 0.0)
def test_arnoldi_normalisation():
"""Test if the Arnoldi implementation produces an normalised basis."""
num_directions = 4
rks = arnoldi(a, b, num_directions)
npt.assert_almost_equal(
np.linalg.norm(rks, axis=0),
np.ones((num_directions,)),
)
| 0 | 0 | 0 |
8e7b99b3286e2086dc64ba2272a4da8ef40cb9cf | 2,573 | py | Python | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | import urllib.parse, urllib.request, json, ssl
# Authentication and API Requests
# LEARNING LAB 2 Cisco Kinetic for Cities
# The Initial login steps are the same as Learning Lab 1.
# You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE'
#Ignore invalid Certificates
ssl._create_default_https_context = ssl._create_unverified_context
############################### LEARNING LAB 2 CODE BEGINS HERE ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info
# The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information
print('Learning Lab 2 Starts Here:')
user_id = '86847897-ab35-489c-af17-6fbf301a6016'
access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c'
#Define the required GET Headers needed by the CKC API
headers = {
'authorization': "Bearer " + access_token,
'Content-Type': "application/json"
}
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info'
print('\nGetting User Location Info: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Location Info:', results, '\n')
############################### LEARNING LAB 2 PART-2 ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer
# The access_token obtained as explained in Learning Lab 1 is used for authorization
#Define the required GET Headers needed by the CKC API
headers = {'authorization': "Bearer " + access_token }
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer'
print('\nGetting User capabilities: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Capabilities:', results, '\n')
| 37.289855 | 128 | 0.724835 | import urllib.parse, urllib.request, json, ssl
# Authentication and API Requests
# LEARNING LAB 2 Cisco Kinetic for Cities
# The Initial login steps are the same as Learning Lab 1.
# You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE'
#Ignore invalid Certificates
ssl._create_default_https_context = ssl._create_unverified_context
############################### LEARNING LAB 2 CODE BEGINS HERE ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info
# The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information
print('Learning Lab 2 Starts Here:')
user_id = '86847897-ab35-489c-af17-6fbf301a6016'
access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c'
#Define the required GET Headers needed by the CKC API
headers = {
'authorization': "Bearer " + access_token,
'Content-Type': "application/json"
}
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info'
print('\nGetting User Location Info: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Location Info:', results, '\n')
############################### LEARNING LAB 2 PART-2 ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer
# The access_token obtained as explained in Learning Lab 1 is used for authorization
#Define the required GET Headers needed by the CKC API
headers = {'authorization': "Bearer " + access_token }
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer'
print('\nGetting User capabilities: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Capabilities:', results, '\n')
| 0 | 0 | 0 |
9023b23619aa93ca93a974d8970bf8835be08e4b | 756 | py | Python | gitcd/package.py | pchr-srf/gitcd | 8b4f08a8221c6fd09ba83e055c1dddabcde80b01 | [
"Apache-2.0"
] | null | null | null | gitcd/package.py | pchr-srf/gitcd | 8b4f08a8221c6fd09ba83e055c1dddabcde80b01 | [
"Apache-2.0"
] | 1 | 2019-03-11T19:44:33.000Z | 2019-03-11T19:44:33.000Z | gitcd/package.py | pchr-srf/gitcd | 8b4f08a8221c6fd09ba83e055c1dddabcde80b01 | [
"Apache-2.0"
] | null | null | null | import pkg_resources
import requests
import pip
from gitcd.exceptions import GitcdPyPiApiException
| 25.2 | 62 | 0.617725 | import pkg_resources
import requests
import pip
from gitcd.exceptions import GitcdPyPiApiException
class Package(object):
packageUrl = 'https://pypi.org/pypi/gitcd/json'
def upgrade(self):
pip.main(['install', '--user', '--upgrade', 'gitcd'])
def getLocalVersion(self):
return pkg_resources.get_distribution("gitcd").version
def getPypiVersion(self):
response = requests.get(
self.packageUrl
)
if response.status_code != 200:
raise GitcdPyPiApiException(
"Could not fetch version info on PyPi site." +
"You need to check manually, sorry for that."
)
result = response.json()
return result['info']['version']
| 498 | 135 | 23 |
f6ee7ab6de4138d80cc0d69f23edffd1e8ec6327 | 1,309 | py | Python | Charlatan/charlatan/data/business.py | NCPlayz/Charlatan | 1867486f0b06d399310a674d00e5e362140caf1e | [
"MIT"
] | null | null | null | Charlatan/charlatan/data/business.py | NCPlayz/Charlatan | 1867486f0b06d399310a674d00e5e362140caf1e | [
"MIT"
] | null | null | null | Charlatan/charlatan/data/business.py | NCPlayz/Charlatan | 1867486f0b06d399310a674d00e5e362140caf1e | [
"MIT"
] | null | null | null | from charlatan.helper import fetch
from random import choice, uniform
from charlatan.misc.business import CURRENCIES, CURRENCY_SYMBOLS
| 27.270833 | 67 | 0.607334 | from charlatan.helper import fetch
from random import choice, uniform
from charlatan.misc.business import CURRENCIES, CURRENCY_SYMBOLS
class Business:
def __init__(self, locale):
self.locale = locale
self.data = 'business'
self.fetch = fetch(self.data, self.locale)
@property
def company_type(self):
company_type = self.fetch['company']['type']['title']
return choice(company_type)
@property
def company_type_abbr(self):
company_type_abbr = self.fetch['company']['type']['title']
return choice(company_type_abbr)
@property
def company(self):
companies = self.fetch['company']['name']
return choice(companies)
@property
def copyright(self):
return f'© {self.company}, {self.company_type_abbr}'
@property
def currency_iso(self):
return choice(CURRENCIES)
def price(self, minimum=10.00, maximum=1000.00):
currencies = CURRENCY_SYMBOLS
price = uniform(
float(minimum),
float(maximum),
)
fmt = '{0:.2f} {1}'
if self.locale in currencies:
return fmt.format(price, currencies[self.locale])
return fmt.format(price, currencies['default']) | 877 | 270 | 24 |
5a563c670f6a0996124760432a25ca5058b92169 | 726 | py | Python | tests/sentry/interfaces/query/tests.py | NickPresta/sentry | ed3e6034ef560e18f392ba30071de860557b7b43 | [
"BSD-3-Clause"
] | 2 | 2015-10-14T12:45:32.000Z | 2016-01-27T03:24:43.000Z | tests/sentry/interfaces/query/tests.py | NickPresta/sentry | ed3e6034ef560e18f392ba30071de860557b7b43 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/interfaces/query/tests.py | NickPresta/sentry | ed3e6034ef560e18f392ba30071de860557b7b43 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.testutils import TestCase, fixture
from sentry.interfaces import Query
| 26.888889 | 66 | 0.658402 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.testutils import TestCase, fixture
from sentry.interfaces import Query
class QueryTest(TestCase):
@fixture
def interface(self):
return Query(query='SELECT 1', engine='psycopg2')
def test_serialize_behavior(self):
assert self.interface.serialize() == {
'query': self.interface.query,
'engine': self.interface.engine,
}
def test_get_hash_uses_query(self):
assert self.interface.get_hash() == [self.interface.query]
def test_get_search_context(self):
assert self.interface.get_search_context(self.event) == {
'text': [self.interface.query],
}
| 429 | 125 | 23 |
c30b399122553d0af5d0c8b7276f4597bcbd18da | 834 | py | Python | Lista-Exercicios/Exercicio3.py | m4g1CS/Target-Test | 79de28cc231a4695a2847fcc18216dc8924d8309 | [
"MIT"
] | null | null | null | Lista-Exercicios/Exercicio3.py | m4g1CS/Target-Test | 79de28cc231a4695a2847fcc18216dc8924d8309 | [
"MIT"
] | null | null | null | Lista-Exercicios/Exercicio3.py | m4g1CS/Target-Test | 79de28cc231a4695a2847fcc18216dc8924d8309 | [
"MIT"
] | null | null | null | #Exercício 3
#A - 1,3,5,7,
#B 2,4,8,16,32,64
#C 0,1,4,9,16,32,49
#D 4,16,36,64
#E 1,1,2,3,5,8
#F 2,10,12,16,17,18,19
print(f'O proximo termo: 200. Nao achei uma forma logica de calcular o valor, apenas supondo que todos comecem com a leta D')
exA(5)
print('\n')
exB(7)
print('\n')
exC(8)
print('\n')
exD(64)
print('\n')
print(f'O proximo termo: {exE(8)}') | 17.744681 | 125 | 0.517986 | #Exercício 3
#A - 1,3,5,7,
def exA(a):
r = 3 - 1
x = 1 + (a-1)*r
return print(f'O proximo termo: {x}')
#B 2,4,8,16,32,64
def exB(b):
r = 4 / 2
x = 2*(r**(b-1))
print(f'O proximo termo: {x:.0f}')
#C 0,1,4,9,16,32,49
def exC(c):
x = (c-1)**2
return print(f'O proximo termo: {x}')
#D 4,16,36,64
def exD(d):
r = 28 + 8
x = d + r
return print(f'O proximo termo: {x}')
#E 1,1,2,3,5,8
def exE(e):
if e == 1:
return 0
elif e == 2:
return 1
else:
return exE(e-1) + exE(e-2)
#F 2,10,12,16,17,18,19
print(f'O proximo termo: 200. Nao achei uma forma logica de calcular o valor, apenas supondo que todos comecem com a leta D')
exA(5)
print('\n')
exB(7)
print('\n')
exC(8)
print('\n')
exD(64)
print('\n')
print(f'O proximo termo: {exE(8)}') | 341 | 0 | 110 |
765c249c4db56a7524ae1d5aaab74ef2f25bccfa | 5,113 | py | Python | qcm/SetUpMcbSubtree.py | golfit/work-archive | bdd37d46fda3fde15ec2164d3335d6b4ed576bd7 | [
"MIT"
] | null | null | null | qcm/SetUpMcbSubtree.py | golfit/work-archive | bdd37d46fda3fde15ec2164d3335d6b4ed576bd7 | [
"MIT"
] | null | null | null | qcm/SetUpMcbSubtree.py | golfit/work-archive | bdd37d46fda3fde15ec2164d3335d6b4ed576bd7 | [
"MIT"
] | null | null | null | # Create nodes associated with Master Controller Board of Shoelace Antenna matching network.
# 23 Jan 2012, Ted Golfinopoulos
from MDSplus import *
tree=Tree("MAGNETICS", -1) #Open model tree
#For now, do work on a test shot until design is in steady state.
s=1120125998
tree.createPulse(s) #Create a test shot
tree = Tree("MAGNETICS", s, "EDIT")
# Set the default directory to SHOELACE location.
tree.setDefault(tree.getNode("SHOELACE"))
# Add a child node (subtree, "STRUCTURE") for the Master Controller Board (MCB) -
# this will store the associated nodes for the board outputs.
tree.addNode("MCB_OUT", "STRUCTURE")
tree.setDefault(tree.getNode("MCB_OUT")) #Make this child node default for further relative path references
tree.addNode("COMMENT", "TEXT")
tree.getNode("COMMENT").putData("Nodes pertaining to processing of outputs from MASTER CONTROLLER BOARD (MCB). ENCODER_CLK=clock frequency used by number encoder - divided down from MCB clock (F_CLK).")
tree.addNode("ENCODER_CLK", "NUMERIC")
tree.getNode("ENCODER_CLK").putData(4.0E6/16.0)
#Add nodes for logic states (codes) for series and parallel capacitor banks
### CONVENIENCE FUNCTIONS
#Function to print string for building a raw voltage signal given path references.
#Function setting up nodes for series and parallel codes (stores time-encoded signal). Identical topology for series and parallel caps - only names change.
# tree.setDefault(tree.getNode(".-.")) #Reset default to parent level.
###
#Write in changes
#tree.write()
#Stamp in serial code nodes
nodeName="SER_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated series bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Stamp in parallel code nodes
nodeName="PAR_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_10"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated parallel bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Add node for interpreting frequency determined from period counter on MCB.
nodeName="N_PER"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_11"
comment="N_PER=number of clock cycles counted in M periods of sync signal; M=Number of signal (sync) cycles over which sync period is counted; F_CLK=clock frequency of Master Controller Board (MCB); FREQ_OUT = F_CLK*M/N_PER [Hz]; NBITS=number of bits in binary number encoded in signals"
#Set up basic node template.
buildCodeNode(tree, nodeName, datPath, comment)
#Add additional nodes associated with converting clock counts per period into frequency.
tree.setDefault(tree.getNode("N_PER"))
tree.addNode("M", "NUMERIC")
tree.getNode("M").putData(50.) #Number of sync counts in accumulation period
tree.addNode("F_CLK", "NUMERIC")
tree.getNode("F_CLK").putData(4000000.) # Clock frequency on MCB [Hz]
tree.addNode("FREQ_OUT", "SIGNAL") #This node will hold the calculated frequency from the MCB.
tree.getNode("NBITS").putData(14) #Overwrite previous nbits number with correct amount for period counter.
datPath="\\MAGNETICS::TOP.SHOELACE.MCB_OUT:N_PER"
freqCalcTdi="GETNCI("+datPath+", \"ON\") ? Build_Signal(Build_With_Units(F_CLK*M/("+datPath+"), \"Hz\"), *, DIM_OF("+datPath+") ) : ABORT()" #Ternary operator determining whether parent node is on; calculate frequency using clock counts.
print(freqCalcTdi)
tree.getNode("FREQ_OUT").putData(Data.compile(freqCalcTdi))
#Write changes to tree.
tree.write()
#GETNCI(\MAGNETICS::TOP.SHOELACE.MCB_OUT:SER_CODE, "ON") ? Build_Signal(Build_With_Units(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09, "V"), *, DIM_OF(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09) : ABORT()
#GETNCI(BP1T_GHK, "ON") ? Build_Signal(Build_With_Units(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07 * 1 / (\MAG_RF_COILS:CALIB[59] * 1), "Tesla/s"), *, DIM_OF(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07)) : ABORT()
#\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09
| 56.186813 | 287 | 0.766282 | # Create nodes associated with Master Controller Board of Shoelace Antenna matching network.
# 23 Jan 2012, Ted Golfinopoulos
from MDSplus import *
tree=Tree("MAGNETICS", -1) #Open model tree
#For now, do work on a test shot until design is in steady state.
s=1120125998
tree.createPulse(s) #Create a test shot
tree = Tree("MAGNETICS", s, "EDIT")
# Set the default directory to SHOELACE location.
tree.setDefault(tree.getNode("SHOELACE"))
# Add a child node (subtree, "STRUCTURE") for the Master Controller Board (MCB) -
# this will store the associated nodes for the board outputs.
tree.addNode("MCB_OUT", "STRUCTURE")
tree.setDefault(tree.getNode("MCB_OUT")) #Make this child node default for further relative path references
tree.addNode("COMMENT", "TEXT")
tree.getNode("COMMENT").putData("Nodes pertaining to processing of outputs from MASTER CONTROLLER BOARD (MCB). ENCODER_CLK=clock frequency used by number encoder - divided down from MCB clock (F_CLK).")
tree.addNode("ENCODER_CLK", "NUMERIC")
tree.getNode("ENCODER_CLK").putData(4.0E6/16.0)
#Add nodes for logic states (codes) for series and parallel capacitor banks
### CONVENIENCE FUNCTIONS
#Function to print string for building a raw voltage signal given path references.
def buildSig(nodePath, datPath) :
# expr="GETNCI("+nodePath+", \"ON\") ? Build_Signal(Build_With_Units("+datPath+", \"V\"), *, DIM_OF("+datPath+") : ABORT()"
#expr="Build_Signal(Build_With_Units("+datPath+", \"V\"), *, DIM_OF("+datPath+")"
expr=datPath
print(expr)
return expr
#Function setting up nodes for series and parallel codes (stores time-encoded signal). Identical topology for series and parallel caps - only names change.
def buildCodeNode(tree,nodeName,datPath,comment) :
tree.addNode(nodeName, "SIGNAL")
nodePath="\\MAGNETICS::TOP.SHOELACE.MCB_OUT:"+nodeName
#tree.setDefault(tree.getNode(nodeName))
tree.addNode(nodeName+":RAW", "SIGNAL")
tree.getNode(nodeName+":RAW").putData(Data.compile(buildSig(nodePath, datPath))) #Put in String specifying where to get raw voltage with encoded series cap code.
tree.addNode(nodeName+":NBITS", "NUMERIC")
tree.getNode(nodeName+":NBITS").putData(7)
tree.addNode(nodeName+":COMMENT", "TEXT")
tree.getNode(nodeName+":COMMENT").putData(comment)
# tree.setDefault(tree.getNode(".-.")) #Reset default to parent level.
###
#Write in changes
#tree.write()
#Stamp in serial code nodes
nodeName="SER_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated series bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Stamp in parallel code nodes
nodeName="PAR_CODE"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_10"
comment=nodeName+"=number encoding which boards should turn on which capacitors in associated parallel bank. (Number which specifies tuning configuration for caps.); NBITS=number of bits in binary number encoded in signal"
buildCodeNode(tree,nodeName,datPath, comment)
#Add node for interpreting frequency determined from period counter on MCB.
nodeName="N_PER"
datPath="\\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_11"
comment="N_PER=number of clock cycles counted in M periods of sync signal; M=Number of signal (sync) cycles over which sync period is counted; F_CLK=clock frequency of Master Controller Board (MCB); FREQ_OUT = F_CLK*M/N_PER [Hz]; NBITS=number of bits in binary number encoded in signals"
#Set up basic node template.
buildCodeNode(tree, nodeName, datPath, comment)
#Add additional nodes associated with converting clock counts per period into frequency.
tree.setDefault(tree.getNode("N_PER"))
tree.addNode("M", "NUMERIC")
tree.getNode("M").putData(50.) #Number of sync counts in accumulation period
tree.addNode("F_CLK", "NUMERIC")
tree.getNode("F_CLK").putData(4000000.) # Clock frequency on MCB [Hz]
tree.addNode("FREQ_OUT", "SIGNAL") #This node will hold the calculated frequency from the MCB.
tree.getNode("NBITS").putData(14) #Overwrite previous nbits number with correct amount for period counter.
datPath="\\MAGNETICS::TOP.SHOELACE.MCB_OUT:N_PER"
freqCalcTdi="GETNCI("+datPath+", \"ON\") ? Build_Signal(Build_With_Units(F_CLK*M/("+datPath+"), \"Hz\"), *, DIM_OF("+datPath+") ) : ABORT()" #Ternary operator determining whether parent node is on; calculate frequency using clock counts.
print(freqCalcTdi)
tree.getNode("FREQ_OUT").putData(Data.compile(freqCalcTdi))
#Write changes to tree.
tree.write()
#GETNCI(\MAGNETICS::TOP.SHOELACE.MCB_OUT:SER_CODE, "ON") ? Build_Signal(Build_With_Units(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09, "V"), *, DIM_OF(\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09) : ABORT()
#GETNCI(BP1T_GHK, "ON") ? Build_Signal(Build_With_Units(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07 * 1 / (\MAG_RF_COILS:CALIB[59] * 1), "Tesla/s"), *, DIM_OF(.-.DATA_ACQ.CPCI:ACQ_216_1:INPUT_07)) : ABORT()
#\MAGNETICS::TOP.ACTIVE_MHD.DATA_ACQ.CPCI:ACQ_216_3:INPUT_09
| 807 | 0 | 44 |
dcec2838a26c5666106c209ffca5015a40b8de57 | 2,282 | py | Python | pidal/logging.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | 6 | 2021-02-05T04:21:00.000Z | 2021-11-29T06:46:21.000Z | pidal/logging.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | 1 | 2021-11-30T06:08:53.000Z | 2021-11-30T06:08:53.000Z | pidal/logging.py | pi-plan/pidal | bfd1b9c4de87bc92565acbcff108270265757e39 | [
"BSD-3-Clause"
] | null | null | null | import logging
import logging.handlers
import time
from typing import Literal, List, Any
from pidal import NAME
from pidal.config import LoggingConfig
logger = logging.getLogger(NAME)
handler_map = {
"NullHandler": logging.NullHandler,
"StreamHandler": logging.StreamHandler,
"FileHandler": logging.FileHandler,
"RotatingFileHandler": logging.handlers.RotatingFileHandler,
"TimedRotatingFileHandler": logging.handlers.TimedRotatingFileHandler,
"SysLogHandler": logging.handlers.SysLogHandler,
"HTTPHandler": logging.handlers.HTTPHandler,
"QueueHandler": logging.handlers.QueueHandler,
}
| 30.426667 | 79 | 0.672217 | import logging
import logging.handlers
import time
from typing import Literal, List, Any
from pidal import NAME
from pidal.config import LoggingConfig
logger = logging.getLogger(NAME)
handler_map = {
"NullHandler": logging.NullHandler,
"StreamHandler": logging.StreamHandler,
"FileHandler": logging.FileHandler,
"RotatingFileHandler": logging.handlers.RotatingFileHandler,
"TimedRotatingFileHandler": logging.handlers.TimedRotatingFileHandler,
"SysLogHandler": logging.handlers.SysLogHandler,
"HTTPHandler": logging.handlers.HTTPHandler,
"QueueHandler": logging.handlers.QueueHandler,
}
def init_logging(debug: bool):
config = LoggingConfig.get_instance()
ch = _create_handler(config.handler.class_name, config.handler.args)
formatter = logging.Formatter(fmt=config.format, datefmt=config.datefmt)
ch.setLevel(_name2level(config.level))
ch.setFormatter(formatter)
for logger in _get_logger():
logger.setLevel(_name2level(config.level))
logger.addHandler(ch)
if debug:
th = logging.StreamHandler()
th.setLevel(logging.DEBUG)
th.setFormatter(formatter)
for logger in _get_logger():
logger.addHandler(th)
def _get_logger() -> List[logging.Logger]:
result = []
result.append(logger)
for i in ["tornado", "tornado.access", "tornado.application",
"tornado.general"]:
ilog = logging.getLogger(i)
if not ilog:
continue
result.append(ilog)
return result
def _create_handler(class_name: str, args: List[List[Any]]) -> logging.Handler:
handler_cls = handler_map.get(class_name)
if not handler_cls:
raise Exception("logging config handler is unknown.")
if args:
args = [time.strftime(str(i[0])) if isinstance(i[0], str) else i[0] for
i in args if i] # type: ignore
handler = handler_cls(*args)
else:
handler = handler_cls()
return handler
def _name2level(name: str) -> Literal[10, 20, 30, 40, 50]:
numeric_level = getattr(logging, name.upper(), None)
if not numeric_level:
raise Exception("logging config level [{}]is unknown.".format(name))
return numeric_level
| 1,522 | 0 | 92 |
3c45b83badb19faa6c0d9b7178228e44605213f6 | 6,686 | py | Python | tests/integration/test_tox_gh_matrix_int.py | medmunds/tox-gh-matrix | 2055f89bd939c0d0e167a2749b97343ff3e0bcce | [
"MIT"
] | null | null | null | tests/integration/test_tox_gh_matrix_int.py | medmunds/tox-gh-matrix | 2055f89bd939c0d0e167a2749b97343ff3e0bcce | [
"MIT"
] | null | null | null | tests/integration/test_tox_gh_matrix_int.py | medmunds/tox-gh-matrix | 2055f89bd939c0d0e167a2749b97343ff3e0bcce | [
"MIT"
] | null | null | null | import json
import re
from textwrap import dedent
def parse_gh_output(result):
"""Extract a dict of GitHub Workflow set-output variables from result's output"""
matches = re.findall(r"::set-output\s+name=(\w+)::(.*)\n", result.out)
if matches is None:
return dict()
return dict(matches)
def test_custom_var(tox_ini, cmd):
"""--gh-matrix takes optional output variable name"""
tox_ini(
"""
[tox]
envlist = lint,test
"""
)
result = cmd("--gh-matrix=myvarname")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "myvarname" in gh_output
assert "envlist" not in gh_output # default not set
envlist = json.loads(gh_output["myvarname"])
assert envlist == [
{"name": "lint", "factors": ["lint"]},
{"name": "test", "factors": ["test"]},
]
def test_installed_python(tox_ini, cmd, mock_interpreter):
"""--gh-matrix provides 'python_installed' versions for available interpreters"""
mock_interpreter("python3.5", version_info=(3, 5, 6, "final", 0))
mock_interpreter("python3.10")
mock_interpreter("pypy3.8")
tox_ini(
"""
[tox]
envlist = py{27,35,310},pypy38
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{
"name": "py27",
"factors": ["py27"],
"python": {"version": "2.7", "spec": "2.7.0-alpha - 2.7"},
},
{
"name": "py35",
"factors": ["py35"],
"python": {"version": "3.5", "spec": "3.5.0-alpha - 3.5", "installed": "3.5.6"},
},
{
"name": "py310",
"factors": ["py310"],
"python": {
"version": "3.10",
"spec": "3.10.0-alpha - 3.10",
"installed": "3.10.0",
},
},
{
"name": "pypy38",
"factors": ["pypy38"],
"python": {
"version": "pypy-3.8",
"spec": "pypy-3.8",
"installed": "pypy-3.8.0-3.7.0",
},
},
]
def test_base_python(tox_ini, cmd, mock_interpreter):
"""Python version can come from an env's basepython"""
tox_ini(
"""
[tox]
envlist = check,build
[testenv:build]
basepython = python3.9
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "check", "factors": ["check"]},
{
"name": "build",
"factors": ["build"],
"python": {"version": "3.9", "spec": "3.9.0-alpha - 3.9"},
},
]
def test_ignore_outcome(tox_ini, cmd):
"""--gh-matrix identifies tox envs with ignore_outcome set"""
tox_ini(
"""
[tox]
envlist = release,dev
[testenv:dev]
ignore_outcome = true
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "release", "factors": ["release"]},
{"name": "dev", "factors": ["dev"], "ignore_outcome": True},
]
def test_limited_envlist(tox_ini, cmd):
"""Explicit -e envlist limits --gh-matrix output"""
tox_ini(
"""
[tox]
envlist = py{27,35,36,37,38,39,310}
"""
)
result = cmd("--gh-matrix", "-e", "py35,py39,unknown-env")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "envlist" in gh_output
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py35", "py39"]
assert "unknown-env" not in envnames
def test_skip_env(tox_ini, cmd, monkeypatch):
"""--gh-matrix filters out matches for TOX_SKIPENV"""
tox_ini(
"""
[tox]
envlist = py{38,39}-{unix,win,mac}
"""
)
# TOX_SKIPENV is a Python regular expression that must match
# the _entire_ envname to remove that env.
monkeypatch.setenv("TOX_SKIP_ENV", ".*-(unix|mac)")
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py38-win", "py39-win"]
| 28.818966 | 92 | 0.504786 | import json
import re
from textwrap import dedent
def parse_gh_output(result):
"""Extract a dict of GitHub Workflow set-output variables from result's output"""
matches = re.findall(r"::set-output\s+name=(\w+)::(.*)\n", result.out)
if matches is None:
return dict()
return dict(matches)
def test_gh_matrix(tox_ini, cmd, mock_interpreter):
tox_ini(
"""
[tox]
envlist = django{32,40}-py{38,39},docs
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "envlist" in gh_output # default output name
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{
"name": "django32-py38",
"factors": ["django32", "py38"],
"python": {"version": "3.8", "spec": "3.8.0-alpha - 3.8"},
},
{
"name": "django32-py39",
"factors": ["django32", "py39"],
"python": {"version": "3.9", "spec": "3.9.0-alpha - 3.9"},
},
{
"name": "django40-py38",
"factors": ["django40", "py38"],
"python": {"version": "3.8", "spec": "3.8.0-alpha - 3.8"},
},
{
"name": "django40-py39",
"factors": ["django40", "py39"],
"python": {"version": "3.9", "spec": "3.9.0-alpha - 3.9"},
},
{
"name": "docs",
"factors": ["docs"],
# no python version specified
},
]
def test_custom_var(tox_ini, cmd):
"""--gh-matrix takes optional output variable name"""
tox_ini(
"""
[tox]
envlist = lint,test
"""
)
result = cmd("--gh-matrix=myvarname")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "myvarname" in gh_output
assert "envlist" not in gh_output # default not set
envlist = json.loads(gh_output["myvarname"])
assert envlist == [
{"name": "lint", "factors": ["lint"]},
{"name": "test", "factors": ["test"]},
]
def test_installed_python(tox_ini, cmd, mock_interpreter):
"""--gh-matrix provides 'python_installed' versions for available interpreters"""
mock_interpreter("python3.5", version_info=(3, 5, 6, "final", 0))
mock_interpreter("python3.10")
mock_interpreter("pypy3.8")
tox_ini(
"""
[tox]
envlist = py{27,35,310},pypy38
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{
"name": "py27",
"factors": ["py27"],
"python": {"version": "2.7", "spec": "2.7.0-alpha - 2.7"},
},
{
"name": "py35",
"factors": ["py35"],
"python": {"version": "3.5", "spec": "3.5.0-alpha - 3.5", "installed": "3.5.6"},
},
{
"name": "py310",
"factors": ["py310"],
"python": {
"version": "3.10",
"spec": "3.10.0-alpha - 3.10",
"installed": "3.10.0",
},
},
{
"name": "pypy38",
"factors": ["pypy38"],
"python": {
"version": "pypy-3.8",
"spec": "pypy-3.8",
"installed": "pypy-3.8.0-3.7.0",
},
},
]
def test_base_python(tox_ini, cmd, mock_interpreter):
"""Python version can come from an env's basepython"""
tox_ini(
"""
[tox]
envlist = check,build
[testenv:build]
basepython = python3.9
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "check", "factors": ["check"]},
{
"name": "build",
"factors": ["build"],
"python": {"version": "3.9", "spec": "3.9.0-alpha - 3.9"},
},
]
def test_ignore_outcome(tox_ini, cmd):
"""--gh-matrix identifies tox envs with ignore_outcome set"""
tox_ini(
"""
[tox]
envlist = release,dev
[testenv:dev]
ignore_outcome = true
"""
)
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
assert envlist == [
{"name": "release", "factors": ["release"]},
{"name": "dev", "factors": ["dev"], "ignore_outcome": True},
]
def test_limited_envlist(tox_ini, cmd):
"""Explicit -e envlist limits --gh-matrix output"""
tox_ini(
"""
[tox]
envlist = py{27,35,36,37,38,39,310}
"""
)
result = cmd("--gh-matrix", "-e", "py35,py39,unknown-env")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
assert "envlist" in gh_output
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py35", "py39"]
assert "unknown-env" not in envnames
def test_skip_env(tox_ini, cmd, monkeypatch):
"""--gh-matrix filters out matches for TOX_SKIPENV"""
tox_ini(
"""
[tox]
envlist = py{38,39}-{unix,win,mac}
"""
)
# TOX_SKIPENV is a Python regular expression that must match
# the _entire_ envname to remove that env.
monkeypatch.setenv("TOX_SKIP_ENV", ".*-(unix|mac)")
result = cmd("--gh-matrix")
result.assert_success(is_run_test_env=False)
gh_output = parse_gh_output(result)
envlist = json.loads(gh_output["envlist"])
envnames = [env["name"] for env in envlist]
assert envnames == ["py38-win", "py39-win"]
def test_matrix_dump(tox_ini, cmd, mock_interpreter):
tox_ini(
"""
[tox]
envlist = lint,test
"""
)
result = cmd("--gh-matrix-dump")
result.assert_success(is_run_test_env=False)
# Formatted JSON output:
expected = dedent(
"""
[
{
"name": "lint",
"factors": [
"lint"
]
},
{
"name": "test",
"factors": [
"test"
]
}
]
"""
)
assert result.out.strip() == expected.strip()
| 1,861 | 0 | 46 |
59de7a19af5732fab44b71dd4fbffaf2fe1339e3 | 1,188 | py | Python | CodeFights/chessBoardCellColor.py | HKuz/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | 1 | 2020-06-14T20:10:04.000Z | 2020-06-14T20:10:04.000Z | CodeFights/chessBoardCellColor.py | makramjandar/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | null | null | null | CodeFights/chessBoardCellColor.py | makramjandar/Test_Code | 798efc9fc668ef021736a6d9699ef4713cf8b718 | [
"MIT"
] | 1 | 2019-12-09T12:48:05.000Z | 2019-12-09T12:48:05.000Z | #!/usr/local/bin/python
# Code Fights Chess Board Cell Color Problem
def chessBoardCellColor(cell1, cell2):
'''
Determine if the two given cells on chess board are same color
A, C, E, G odd cells are same color as B, D, F, H even cells
'''
return get_color(cell1) == get_color(cell2)
if __name__ == '__main__':
main()
| 28.285714 | 79 | 0.483165 | #!/usr/local/bin/python
# Code Fights Chess Board Cell Color Problem
def chessBoardCellColor(cell1, cell2):
'''
Determine if the two given cells on chess board are same color
A, C, E, G odd cells are same color as B, D, F, H even cells
'''
def get_color(cell):
return ("DARK" if (cell[0] in "ACEG" and int(cell[1]) % 2 == 1) or
(cell[0] in "BDFH" and int(cell[1]) % 2 == 0) else "LIGHT")
return get_color(cell1) == get_color(cell2)
def main():
tests = [
["A1", "C3", True],
["A1", "H3", False],
["A1", "A2", False],
["A1", "B2", True],
["B3", "H8", False],
["C3", "B5", False],
["G5", "E7", True],
["C8", "H8", False],
["D2", "D2", True],
["A2", "A5", False]
]
for t in tests:
res = chessBoardCellColor(t[0], t[1])
if t[2] == res:
print("PASSED: chessBoardCellColor({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: chessBoardCellColor({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
| 792 | 0 | 49 |
dcdb4b0b6d4e17758462d42e88bda480217cb957 | 154 | py | Python | stringreplace.py | KT12/DailyProgrammer | 127aaa4ab27a42706af01be80f7aae3b83f44fbc | [
"MIT"
] | null | null | null | stringreplace.py | KT12/DailyProgrammer | 127aaa4ab27a42706af01be80f7aae3b83f44fbc | [
"MIT"
] | null | null | null | stringreplace.py | KT12/DailyProgrammer | 127aaa4ab27a42706af01be80f7aae3b83f44fbc | [
"MIT"
] | null | null | null | # Challenge 16 Easy
| 19.25 | 44 | 0.642857 | # Challenge 16 Easy
def stringreplace(fstring, sstring):
for char in sstring:
fstring = fstring.replace(char, '')
return fstring
| 106 | 0 | 25 |
b654704a4d9c0877043127de9805ed4fbb8c9a7e | 1,969 | py | Python | 002-bloco-33-02/004-manipulando-json.py | clebertonf/python | 8c01029166bf5d568d7e325db18e277cd705b987 | [
"MIT"
] | null | null | null | 002-bloco-33-02/004-manipulando-json.py | clebertonf/python | 8c01029166bf5d568d7e325db18e277cd705b987 | [
"MIT"
] | null | null | null | 002-bloco-33-02/004-manipulando-json.py | clebertonf/python | 8c01029166bf5d568d7e325db18e277cd705b987 | [
"MIT"
] | null | null | null | import json
with open('pokemons.json') as file:
content = file.read() # leitura do arquivo
pokemons = json.loads(content)['results'] # o conteúdo é transformado em estrutura python equivalente, dicionário neste caso.
# acessamos a chave results que é onde contém nossa lista de pokemons
print(pokemons[0]) # imprime o primeiro pokemon da lista
# A leitura pode ser feita diretamente do arquivo, utilizando o método load ao invés de loads.
# O loads carrega o JSON a partir de um texto e o load carrega o JSON a partir de um arquivo.
# load ja ler o arquivo e retorna
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
print(pokemons[0]) # imprime o primeiro pokemon da lista
# Escrita de dados
# A escrita de arquivos no formato JSON é similar a escrita
# de arquivos comum, porém primeiro temos de transformar os dados.
# Lendo
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# Filtarndo
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# Escrevendo
# Abre o arquivo para escrevermos apenas o pokemons do tipo grama
with open("pokemons_file.json", "w") as file:
json_poke = json.dumps(grass_type_pokemons) # conversão de Python para o formato json (str)
file.write(json_poke)
# Assim como a desserialização, que faz a transformação de texto em formato JSON para Python ,
# a serialização, que é o caminho inverso, também possui um método equivalente para escrever em
# arquivos de forma direta.
# leitura de todos os pokemons
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# separamos somente os do tipo grama
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# abre o arquivo para escrita
with open("grass_pokemons.json", "w") as file:
# escreve no arquivo já transformando em formato json a estrutura
json.dump(grass_type_pokemons, file) | 32.816667 | 130 | 0.737938 | import json
with open('pokemons.json') as file:
content = file.read() # leitura do arquivo
pokemons = json.loads(content)['results'] # o conteúdo é transformado em estrutura python equivalente, dicionário neste caso.
# acessamos a chave results que é onde contém nossa lista de pokemons
print(pokemons[0]) # imprime o primeiro pokemon da lista
# A leitura pode ser feita diretamente do arquivo, utilizando o método load ao invés de loads.
# O loads carrega o JSON a partir de um texto e o load carrega o JSON a partir de um arquivo.
# load ja ler o arquivo e retorna
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
print(pokemons[0]) # imprime o primeiro pokemon da lista
# Escrita de dados
# A escrita de arquivos no formato JSON é similar a escrita
# de arquivos comum, porém primeiro temos de transformar os dados.
# Lendo
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# Filtarndo
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# Escrevendo
# Abre o arquivo para escrevermos apenas o pokemons do tipo grama
with open("pokemons_file.json", "w") as file:
json_poke = json.dumps(grass_type_pokemons) # conversão de Python para o formato json (str)
file.write(json_poke)
# Assim como a desserialização, que faz a transformação de texto em formato JSON para Python ,
# a serialização, que é o caminho inverso, também possui um método equivalente para escrever em
# arquivos de forma direta.
# leitura de todos os pokemons
with open("pokemons.json") as file:
pokemons = json.load(file)["results"]
# separamos somente os do tipo grama
grass_type_pokemons = [
pokemon for pokemon in pokemons if "Grass" in pokemon["type"]
]
# abre o arquivo para escrita
with open("grass_pokemons.json", "w") as file:
# escreve no arquivo já transformando em formato json a estrutura
json.dump(grass_type_pokemons, file) | 0 | 0 | 0 |
68cbd1af047b74ae82091d30d6d4537ad2f20850 | 2,726 | py | Python | ZyExpander/ZyExpander.py | MichaelScript/ZyExpander | 29a88edffd8df9bc17bac8aee02140cee45d070a | [
"MIT"
] | null | null | null | ZyExpander/ZyExpander.py | MichaelScript/ZyExpander | 29a88edffd8df9bc17bac8aee02140cee45d070a | [
"MIT"
] | null | null | null | ZyExpander/ZyExpander.py | MichaelScript/ZyExpander | 29a88edffd8df9bc17bac8aee02140cee45d070a | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# ZyExpander: A Nested ZipFile Expander For ZyBooks.
# By: Michael Green
# Expands and organizes by student nested zip files that ZyBook's grading
# system uses for some reason.
import zipfile
import os
import argparse
# Goes through our argument list and pulls out the files to process along with any
# optional inputs specified.
# Verifying that input arguments supplied are existing zip files
init()
| 37.342466 | 153 | 0.730007 | #!/usr/local/bin/python3
# ZyExpander: A Nested ZipFile Expander For ZyBooks.
# By: Michael Green
# Expands and organizes by student nested zip files that ZyBook's grading
# system uses for some reason.
import zipfile
import os
import argparse
# Goes through our argument list and pulls out the files to process along with any
# optional inputs specified.
def get_args():
# argparse makes our life easier by looking for flags and creating descriptions
# for the help flag automatically.
parser = argparse.ArgumentParser(description='Takes input zipfiles and expands them.')
parser.add_argument('zips', nargs='+',help="zipfiles you want to process")
parser.add_argument('-o',default=".",help="option to specify an output directory (default is current directory)")
parser.add_argument('-s', default="/Students/",help="option to specify the name of the students directory (default is Students)")
parser.add_argument('-p',default="/Processed/",help="option to specify where to place processed zip files (default is Processed)")
args = parser.parse_args()
return [args.zips,args.o,args.o + args.s,args.o + args.p]
def check_path(path):
if not os.path.exists(path):
print("Creating path: " + path + "...")
os.mkdir(path)
# Verifying that input arguments supplied are existing zip files
def type_check(args):
try:
for arg in args:
if ".zip" not in arg[-4:] and not os.path.isfile(arg):
raise ValueError(arg)
except ValueError as error:
print("Either the argument \"" + str(error.args[0]) + "\" isn't a zip file or it's not a file at all! Or maybe something else went horribly wrong....")
def expand(zip,output_paths):
lab = zipfile.ZipFile( open(zip, "rb") )
for student in lab.namelist():
#Replace of leading zip path with output dir and extra information on file names
student_dir = output_paths[0] + student.split("-")[0].lower() + "/"
check_path(student_dir)
# We have to extract the nested zip files, because you can't
# extract nested zips' contgents
lab.extract(student,student_dir)
# Now we can extract the nested zip files' contents
student_files = zipfile.ZipFile( open(student_dir + student, "rb") )
for student_file in student_files.namelist():
student_files.extract(student_file,student_dir)
# Removing the extra zip directory we had to extract
os.remove(student_dir + student)
student_files.close()
lab.close()
# Finishing flag for individual zip files
print("Processed: " + zip)
#Moving processed file into appropriate directory
os.rename(zip, output_paths[1] + zip)
def init():
args = get_args()
type_check(args[0])
for path in args[1:]:
check_path(path)
for zip in args[0]:
expand(zip,args[2:])
print("Extracted: " + zip )
init()
| 2,177 | 0 | 113 |
d5deb3b621d2fe7d21ed97442cce82aebdfe55b4 | 3,855 | py | Python | MapImageToDatConverter.py | justinbeetle/pyDragonWarrior | cfaf57161ab4950da537de9937d688bc7d24bf4a | [
"MIT"
] | 3 | 2021-04-07T14:43:20.000Z | 2021-04-17T21:26:08.000Z | MapImageToDatConverter.py | justinbeetle/pyDragonWarrior | cfaf57161ab4950da537de9937d688bc7d24bf4a | [
"MIT"
] | 1 | 2022-01-02T15:52:23.000Z | 2022-01-12T01:51:50.000Z | MapImageToDatConverter.py | justinbeetle/pyDragonWarrior | cfaf57161ab4950da537de9937d688bc7d24bf4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import pygame
# Run like this: MapImageToDatConverter.py ..\unusedAssets\maps\brecconary.png data\maps\brecconary.dat
if __name__ == '__main__':
try:
main()
except Exception as e:
import sys
import traceback
print(traceback.format_exception(None, # <- type(e) by docs, but ignored
e,
e.__traceback__),
file=sys.stderr, flush=True)
| 39.336735 | 116 | 0.601038 | #!/usr/bin/env python
import os
import pygame
# Run like this: MapImageToDatConverter.py ..\unusedAssets\maps\brecconary.png data\maps\brecconary.dat
def main():
# Initialize pygame
pygame.init()
# Setup to draw maps
tile_size_pixels = 16
screen = pygame.display.set_mode((160, 160), pygame.SRCALPHA | pygame.HWSURFACE)
clock = pygame.time.Clock()
# Load the map image to convert to dat file
base_path = os.path.split(os.path.abspath(__file__))[0]
map_image_file_name = os.path.join(base_path, sys.argv[1])
print('map_image_file_name =', map_image_file_name, flush=True)
map_dat_file_name = os.path.join(base_path, sys.argv[2])
print('mapDatFileName =', map_dat_file_name, flush=True)
map_image = pygame.image.load(map_image_file_name).convert()
print('mapImage.get_width() =', map_image.get_width(), flush=True)
print('mapImage.get_width() / tileSize_pixels =', map_image.get_width() / tile_size_pixels, flush=True)
print('mapImage.get_height() =', map_image.get_height(), flush=True)
print('mapImage.get_height() / tileSize_pixels =', map_image.get_height() / tile_size_pixels, flush=True)
print('Enter symbol for border:', flush=True)
border_symbol = '\n'
while border_symbol == '\n':
border_symbol = sys.stdin.read(1)
# Convert the image to dat file
tile_image_to_symbol_map = {}
map_dat_file = open(map_dat_file_name, 'w')
for map_y in range(map_image.get_height() // tile_size_pixels + 2):
map_dat_file.write(border_symbol)
map_dat_file.write('\n')
for map_y in range(map_image.get_height() // tile_size_pixels):
map_y_px = map_y * tile_size_pixels
map_dat_file.write(border_symbol)
for map_x in range(map_image.get_width() // tile_size_pixels):
map_x_px = map_x * tile_size_pixels
current_tile = map_image.subsurface(pygame.Rect(map_x_px, map_y_px, tile_size_pixels, tile_size_pixels))
screen.blit(current_tile, (0, 0))
# Determine if the tile has previously been seen
is_new_tile = True
for tile in tile_image_to_symbol_map:
is_tile_match = True
for tile_x in range(tile_size_pixels):
for tile_y in range(tile_size_pixels):
if tile.get_at((tile_x, tile_y)) != current_tile.get_at((tile_x, tile_y)):
is_tile_match = False
break
if not is_tile_match:
break
if is_tile_match:
symbol = tile_image_to_symbol_map[tile]
is_new_tile = False
break
if is_new_tile:
pygame.display.flip()
pygame.event.pump()
clock.tick(5)
# Prompt user for tile symbol
print('Enter symbol for this tile ' + str(map_x) + ',' + str(map_y) + ':', flush=True)
symbol = '\n'
while symbol == '\n':
symbol = sys.stdin.read(1)
tile_image_to_symbol_map[current_tile] = symbol
map_dat_file.write(symbol)
map_dat_file.write(border_symbol)
map_dat_file.write('\n')
for map_y in range(map_image.get_height() // tile_size_pixels + 2):
map_dat_file.write(border_symbol)
map_dat_file.close()
# Terminate pygame
pygame.quit()
if __name__ == '__main__':
try:
main()
except Exception as e:
import sys
import traceback
print(traceback.format_exception(None, # <- type(e) by docs, but ignored
e,
e.__traceback__),
file=sys.stderr, flush=True)
| 3,328 | 0 | 22 |
9a0062a25c2809ae39a74546b2c9f4f334f8c2dd | 604 | py | Python | app/tests/data/__init__.py | LIhDi/python-atendimento-agendamento-back-end | affb722440678415d1d6293e84be3f1743c915b7 | [
"MIT"
] | null | null | null | app/tests/data/__init__.py | LIhDi/python-atendimento-agendamento-back-end | affb722440678415d1d6293e84be3f1743c915b7 | [
"MIT"
] | null | null | null | app/tests/data/__init__.py | LIhDi/python-atendimento-agendamento-back-end | affb722440678415d1d6293e84be3f1743c915b7 | [
"MIT"
] | null | null | null | from faker import Faker
fake = Faker() | 22.37037 | 73 | 0.544702 | from faker import Faker
fake = Faker()
def fake_unit(id):
created_at = fake.date_time_between(start_date="-1y", end_date="now")
name = fake.word()
unit = {
"id_int": id,
"name": name,
"code": f"{name}_code_{fake.word()}",
"phone": fake.phone_number(),
"description": fake.name(),
"email": fake.email(),
"dflag": False,
"active": True,
"created_at": created_at,
}
return unit
def units_list():
units = []
for i in range(1, 3):
unit = fake_unit(id=i)
units.append(unit)
return units | 519 | 0 | 46 |
058139bc616e1e51cb399d2b0f7a525b5425d780 | 274 | py | Python | pygmalion/datasets/_titanic.py | BFavier/pygmalion | 76391431e55fa1c28dc7a1822f2917bf8487b94b | [
"MIT"
] | null | null | null | pygmalion/datasets/_titanic.py | BFavier/pygmalion | 76391431e55fa1c28dc7a1822f2917bf8487b94b | [
"MIT"
] | null | null | null | pygmalion/datasets/_titanic.py | BFavier/pygmalion | 76391431e55fa1c28dc7a1822f2917bf8487b94b | [
"MIT"
] | null | null | null | from ._download import download
def titanic(directory: str):
"""downloads 'titanic.csv' in the given directory"""
download(directory, "titanic.csv",
"https://drive.google.com/file/d/"
"1LYjbHW3wyJSMzGMMCmaOFNA_RIKqxRoI/view?usp=sharing")
| 30.444444 | 66 | 0.675182 | from ._download import download
def titanic(directory: str):
"""downloads 'titanic.csv' in the given directory"""
download(directory, "titanic.csv",
"https://drive.google.com/file/d/"
"1LYjbHW3wyJSMzGMMCmaOFNA_RIKqxRoI/view?usp=sharing")
| 0 | 0 | 0 |
76ac42adc6704f109a41c9d97c9253bd76355f66 | 353 | py | Python | py/fibonacci.py | bharath-acchu/oss-dummies | 30bbd433155c7d5473a25491aac3e4c937abaed0 | [
"MIT"
] | 1 | 2021-01-04T14:49:03.000Z | 2021-01-04T14:49:03.000Z | py/fibonacci.py | bharath-acchu/oss-dummies | 30bbd433155c7d5473a25491aac3e4c937abaed0 | [
"MIT"
] | 3 | 2018-09-01T07:02:23.000Z | 2018-10-27T21:29:36.000Z | py/fibonacci.py | bharath-acchu/oss-dummies | 30bbd433155c7d5473a25491aac3e4c937abaed0 | [
"MIT"
] | 15 | 2018-09-01T06:07:50.000Z | 2018-10-13T05:06:16.000Z | from array import *
fibo()
| 18.578947 | 48 | 0.410765 | from array import *
def fibo():
f = array('i',[0,1])
n = int(input('Enter the number of terms:'))
if n == 1:
print(f[0])
elif n == 2:
for i in range(2):
print(f[i])
else:
for i in range(3,n+1):
f.append(f[i-3]+f[i-2])
for i in f:
print(i)
fibo()
| 289 | 0 | 23 |
4c4c577c6cda2d8596194f3c4a2b8ec79bf8ca37 | 37,656 | py | Python | openstack-congress-9.0.0/congress/tests/datasources/test_murano_driver.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 50 | 2015-04-21T14:12:01.000Z | 2020-06-01T06:23:13.000Z | congress/tests/datasources/test_murano_driver.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | congress/tests/datasources/test_murano_driver.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 25 | 2015-05-22T04:02:33.000Z | 2020-01-14T12:15:12.000Z | # Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
# mocking muranoclient so that python-muranoclient
# doesn't need to be included in requirements.txt.
# (Including python-muranoclient in requirements.txt will
# cause failures in Jenkins because python-muranoclient is not
# included in global_requirements.txt at this point)
import sys
sys.modules['muranoclient'] = mock.Mock()
sys.modules['muranoclient.client'] = mock.Mock()
sys.modules['muranoclient.common'] = mock.Mock()
sys.modules['muranoclient.common.exceptions'] = mock.Mock()
from congress.datasources import murano_driver
from congress.tests import base
from congress.tests.datasources import util
from congress.tests import helper
# Sample responses from murano-client
env_response = [
util.ResponseObj({
u'created': u'2015-03-24T18:35:14',
u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'name': u'quick-env-2',
u'networking': {},
u'status': u'deploy failure',
u'tenant_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'updated': u'2015-03-24T18:46:56',
u'version': 1})]
service_response = [
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'MySQL'},
u'_actions': {u'74f5b2d2-1f8d-4b1a-8238-4155ce2cadb2_restartVM':
{u'enabled': True, u'name': u'restartVM'}},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'status': u'deploy failure',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance': {u'?': {u'_actions': {},
u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.3', u'172.24.4.4'],
u'keyname': u'',
u'name': u'bcnfli7nn738y1',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'MySqlDB',
u'password': u'Passw0rd.',
u'username': u''}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'Apache Tomcat'},
u'_actions': {},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'status': u'deploy failure',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance': {u'?': {u'_actions': {},
u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.4', u'172.24.4.4'],
u'keyname': u'',
u'name': u'woydqi7nn7ipc2',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'Tomcat'}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'PetClinic'},
u'_actions': {},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'status': u'deploy failure',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'Passw0rd.',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'})]
deployment_response = [
util.ResponseObj({
u'action': {u'args': {},
u'method': u'deploy',
u'object_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc'},
u'created': u'2015-03-24T18:36:23',
u'description':
{u'?': {u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'type': u'io.murano.Environment'},
u'defaultNetworks':
{u'environment':
{u'?': {u'id':
u'a2be8265b01743c0bdf645772d632bf0',
u'type': u'io.murano.resources.NeutronNetwork'},
u'name': u'quick-env-2-network'},
u'flat': None},
u'name': u'quick-env-2',
u'services':
[{u'?':
{u'_26411a1861294160833743e45d0eaad9':
{u'name': u'MySQL'},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance':
{u'?': {u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'bcnfli7nn738y1'},
u'name': u'MySqlDB',
u'password': u'*** SANITIZED ***',
u'username': u''},
{u'?':
{u'_26411a1861294160833743e45d0eaad9': {u'name': u'Apache Tomcat'},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance':
{u'?': {u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'woydqi7nn7ipc2'},
u'name': u'Tomcat'},
{u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'PetClinic'},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'*** SANITIZED ***',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'}]},
u'environment_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'finished': u'2015-03-24T18:46:56',
u'id': u'4aa60b31d8ce434284e03aa13c6e11e0',
u'result': {u'isException': True,
u'result':
{u'details': u'murano.common.exceptions.TimeoutException:'
' The Agent does not respondwithin 600 seconds',
u'message': u'[murano.common.exceptions.TimeoutException]'
': The Agent does not respondwithin 600 seconds'}},
u'started': u'2015-03-24T18:36:23',
u'state': u'completed_w_errors',
u'updated': u'2015-03-24T18:46:56'})]
package_response = [
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.apache.Tomcat'],
u'created': u'2015-03-23T21:28:11',
u'description': u'Apache Tomcat is an open source software '
'implementation of the Java Servlet and JavaServer Pages '
'technologies.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.apache.Tomcat',
u'id': u'a7d64980999948dc96401cdce5ae2141',
u'is_public': False,
u'name': u'Apache Tomcat',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:11'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.linux.Git'],
u'created': u'2015-03-23T21:26:56',
u'description': u'Simple Git repo hosted on Linux VM.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.linux.Git',
u'id': u'3ff58cdfeb27487fb3127fb8fd45109c',
u'is_public': False,
u'name': u'Git',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Linux', u'connection'],
u'type': u'Application',
u'updated': u'2015-03-23T21:26:56'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.MySql'],
u'created': u'2015-03-23T21:28:58',
u'description': u'MySql is a relational database management system '
'(RDBMS), and ships with\nno GUI tools to administer MySQL databases '
'or manage data contained within\nthe databases.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.MySql',
u'id': u'884b764c0ce6439d8566b3b2da967687',
u'is_public': False,
u'name': u'MySQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'MySql', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:58'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.java.PetClinic'],
u'created': u'2015-03-24T18:25:24',
u'description': u'An example of a Java app running on a '
'Apache Tomcat Servlet container and using the either Postgre SQL, '
'or MySql database\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.java.PetClinic',
u'id': u'9f7c9e2ed8f9462a8f9037032ab64755',
u'is_public': False,
u'name': u'PetClinic',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-24T18:25:24'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.PostgreSql'],
u'created': u'2015-03-23T21:29:10',
u'description': u'PostgreSQL is a powerful, open source '
'object-relational database system.\nIt has more than 15 years '
'of active development and a proven architecture\nthat has earned '
'it a strong reputation for reliability, data integrity,\nand '
'correctness.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.PostgreSql',
u'id': u'4b9c6a24c2e64f928156e0c87324c394',
u'is_public': False,
u'name': u'PostgreSQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'Postgre', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:29:10'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.SqlDatabase'],
u'created': u'2015-03-24T18:26:32',
u'description': u'This is the interface defining API for different '
'SQL - RDBMS databases\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases',
u'id': u'5add5a561da341c4875495c5887957a8',
u'is_public': False,
u'name': u'SQL Library',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'SQL', u'RDBMS'],
u'type': u'Library',
u'updated': u'2015-03-24T18:26:32'})]
action_response = 'c79eb72600024fa1995345a2b2eb3acd'
# Expected datasource table content
expected_states = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'deploy failure'),
]
expected_environment_parent_types = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Object'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Environment'),
]
expected_env_properties = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'created', '2015-03-24T18:35:14'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'version', 1),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'status', 'deploy failure'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'name', 'quick-env-2'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'updated', '2015-03-24T18:46:56'),
]
expected_service_properties = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '10.0.11.3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '172.24.4.4'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useFlatNetwork', False),
(u'769af50c-9629-4694-b623-e9b392941279', u'name', 'MySqlDB'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useEnvironmentNetwork', True),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'floatingIpAddress', '172.24.4.4'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbPassword', 'Passw0rd.'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'database', '769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'tomcat', 'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'warLocation',
'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'name', 'bcnfli7nn738y1'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbUser', 'pet_user'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'flavor', 'm1.small'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '10.0.11.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'name', 'woydqi7nn7ipc2'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'name', 'PetClinic'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'assignFloatingIp', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'assignFloatingIp', True),
(u'769af50c-9629-4694-b623-e9b392941279', u'password', 'Passw0rd.'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'flavor', 'm1.small'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbName', 'pet_db'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useFlatNetwork', False),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useEnvironmentNetwork', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'floatingIpAddress', '172.24.4.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '172.24.4.4'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'name', 'Tomcat'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
]
expected_package_properties = [
(u'4b9c6a24c2e64f928156e0c87324c394', u'is_public', False),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'connection'),
(u'884b764c0ce6439d8566b3b2da967687', u'created', '2015-03-23T21:28:58'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'SQL'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Servlets'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Servlets'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'created', '2015-03-23T21:29:10'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'fully_qualified_name',
'io.murano.apps.java.PetClinic'),
(u'884b764c0ce6439d8566b3b2da967687', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'created', '2015-03-24T18:26:32'),
(u'884b764c0ce6439d8566b3b2da967687', u'name', 'MySQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'Database'),
(u'5add5a561da341c4875495c5887957a8', u'enabled', True),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Database'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'type', 'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'type', 'Application'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'MySql'),
(u'5add5a561da341c4875495c5887957a8', u'fully_qualified_name',
'io.murano.databases'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'is_public', False),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'SQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'enabled', True),
(u'4b9c6a24c2e64f928156e0c87324c394', u'updated', '2015-03-23T21:29:10'),
(u'884b764c0ce6439d8566b3b2da967687', u'fully_qualified_name',
'io.murano.databases.MySql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'name', 'PetClinic'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'fully_qualified_name',
'io.murano.databases.PostgreSql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Java'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Postgre'),
(u'a7d64980999948dc96401cdce5ae2141', u'is_public', False),
(u'a7d64980999948dc96401cdce5ae2141', u'type', 'Application'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'name', 'PostgreSQL'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'Linux'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'is_public', False),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'SQL'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'class_definitions',
'io.murano.databases.SqlDatabase'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'updated', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'enabled', True),
(u'5add5a561da341c4875495c5887957a8', u'updated', '2015-03-24T18:26:32'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'class_definitions',
'io.murano.apps.java.PetClinic'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'class_definitions',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'created', '2015-03-24T18:25:24'),
(u'5add5a561da341c4875495c5887957a8', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'class_definitions',
'io.murano.databases.MySql'),
(u'884b764c0ce6439d8566b3b2da967687', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'name', 'Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'fully_qualified_name',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Server'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'RDBMS'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'class_definitions',
'io.murano.databases.PostgreSql'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Server'),
(u'a7d64980999948dc96401cdce5ae2141', u'updated', '2015-03-23T21:28:11'),
(u'884b764c0ce6439d8566b3b2da967687', u'updated', '2015-03-23T21:28:58'),
(u'a7d64980999948dc96401cdce5ae2141', u'name', 'Apache Tomcat'),
(u'884b764c0ce6439d8566b3b2da967687', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'created', '2015-03-23T21:28:11'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'created', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'name', 'SQL Library'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'type', 'Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'fully_qualified_name',
'io.murano.apps.linux.Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Java'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'updated', '2015-03-24T18:25:24'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'class_definitions',
'io.murano.apps.linux.Git'),
]
expected_service_objects = [
(u'769af50c-9629-4694-b623-e9b392941279',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.databases.MySql'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.java.PetClinic'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'769af50c-9629-4694-b623-e9b392941279',
u'io.murano.resources.LinuxMuranoInstance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.apache.Tomcat'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'io.murano.resources.LinuxMuranoInstance'),
]
expected_package_objects = [
(u'5add5a561da341c4875495c5887957a8',
u'610c6afc1fc54d23a58d316bf76e5f42', u'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'a7d64980999948dc96401cdce5ae2141',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'9f7c9e2ed8f9462a8f9037032ab64755',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'884b764c0ce6439d8566b3b2da967687',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
]
expected_service_parent_types = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.resources.Instance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxInstance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.Object'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.Object'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.resources.Instance'),
]
expected_service_relationships = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279', u'database'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', 'services'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'tomcat'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279', 'services'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570', 'services'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'instance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'instance'),
]
expected_connected = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
]
expected_deployment_objects = [
(u'a2be8265b01743c0bdf645772d632bf0', u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'io.murano.resources.NeutronNetwork')
]
expected_deployment_properties = [
(u'a2be8265b01743c0bdf645772d632bf0', u'name', 'quick-env-2-network')
]
expected_deployment_parent_types = [
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.Object'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.Network'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.NeutronNetwork')
]
| 47.306533 | 79 | 0.644359 | # Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
# mocking muranoclient so that python-muranoclient
# doesn't need to be included in requirements.txt.
# (Including python-muranoclient in requirements.txt will
# cause failures in Jenkins because python-muranoclient is not
# included in global_requirements.txt at this point)
import sys
sys.modules['muranoclient'] = mock.Mock()
sys.modules['muranoclient.client'] = mock.Mock()
sys.modules['muranoclient.common'] = mock.Mock()
sys.modules['muranoclient.common.exceptions'] = mock.Mock()
from congress.datasources import murano_driver
from congress.tests import base
from congress.tests.datasources import util
from congress.tests import helper
class TestMuranoDriver(base.TestCase):
def setUp(self):
super(TestMuranoDriver, self).setUp()
self.keystone_client_p = mock.patch(
"keystoneclient.v2_0.client.Client")
self.keystone_client_p.start()
self.murano_client = mock.MagicMock()
self.murano_client.environments.list.return_value = env_response
self.murano_client.services.list.return_value = service_response
self.murano_client.deployments.list.return_value = deployment_response
self.murano_client.packages.list.return_value = package_response
self.murano_client.actions.call.return_value = action_response
args = helper.datasource_openstack_args()
self.driver = murano_driver.MuranoDriver(args=args)
self.driver.murano_client = self.murano_client
def test_list_environments(self):
"""Test conversion of environments objects to tables."""
self.driver.state[self.driver.STATES] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
envs = self.driver.murano_client.environments.list()
self.driver._translate_environments(envs)
# datasource tables
states = list(self.driver.state[self.driver.STATES])
properties = list(self.driver.state[self.driver.PROPERTIES])
parent_types = list(self.driver.state[self.driver.PARENT_TYPES])
# verify tables
self.assertIsNotNone(states)
self.assertIsNotNone(properties)
for row in expected_states:
self.assertIn(row, states, ("%s not in states" % str(row)))
for row in expected_env_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
for row in expected_environment_parent_types:
self.assertIn(row, parent_types,
("%s not in parent_types" % str(row)))
def test_translate_services(self):
"""Test conversion of environment services to tables."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
envs = self.driver.murano_client.environments.list()
pkgs = self.driver.murano_client.packages.list()
# package properties are needed for mapping parent_types
self.driver._translate_packages(pkgs)
self.driver._translate_services(envs)
# datasource tables
objects = list(self.driver.state[self.driver.OBJECTS])
properties = list(self.driver.state[self.driver.PROPERTIES])
parent_types = list(self.driver.state[self.driver.PARENT_TYPES])
relationships = list(self.driver.state[self.driver.RELATIONSHIPS])
# verify tables
self.assertIsNotNone(objects)
self.assertIsNotNone(properties)
self.assertIsNotNone(parent_types)
self.assertIsNotNone(relationships)
for row in expected_service_objects:
self.assertIn(row, objects, ("%s not in objects" % str(row)))
for row in expected_service_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
for row in expected_service_parent_types:
self.assertIn(row, parent_types,
("%s not in parent_types" % str(row)))
for row in expected_service_relationships:
self.assertIn(row, relationships,
("%s not in relationships" % str(row)))
def test_translate_environment_services(self):
"""Test conversion of environment services to tables."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
envs = self.driver.murano_client.environments.list()
pkgs = self.driver.murano_client.packages.list()
# package properties are needed for mapping parent_types
self.driver._translate_packages(pkgs)
for env in envs:
services = self.murano_client.services.list(env.id)
self.driver._translate_environment_services(services, env.id)
# datasource tables
objects = list(self.driver.state[self.driver.OBJECTS])
properties = list(self.driver.state[self.driver.PROPERTIES])
parent_types = list(self.driver.state[self.driver.PARENT_TYPES])
relationships = list(self.driver.state[self.driver.RELATIONSHIPS])
# verify tables
self.assertIsNotNone(objects)
self.assertIsNotNone(properties)
self.assertIsNotNone(parent_types)
self.assertIsNotNone(relationships)
for row in expected_service_objects:
self.assertIn(row, objects, ("%s not in objects" % str(row)))
for row in expected_service_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
for row in expected_service_parent_types:
self.assertIn(row, parent_types,
("%s not in parent_types" % str(row)))
for row in expected_service_relationships:
self.assertIn(row, relationships,
("%s not in relationships" % str(row)))
def test_translate_packages(self):
"""Test conversion of application packages to tables."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
pkgs = self.driver.murano_client.packages.list()
self.driver._translate_packages(pkgs)
# datasource tables
objects = list(self.driver.state[self.driver.OBJECTS])
properties = list(self.driver.state[self.driver.PROPERTIES])
# verify tables
self.assertIsNotNone(objects)
self.assertIsNotNone(properties)
for row in expected_package_objects:
self.assertIn(row, objects, ("%s not in objects" % str(row)))
for row in expected_package_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
def test_translate_deployments(self):
"""Test conversion of deployments to tables."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
envs = self.driver.murano_client.environments.list()
pkgs = self.driver.murano_client.packages.list()
# package properties are needed for mapping parent_types
self.driver._translate_packages(pkgs)
self.driver._translate_deployments(envs)
# datasource tables
objects = list(self.driver.state[self.driver.OBJECTS])
properties = list(self.driver.state[self.driver.PROPERTIES])
parent_types = list(self.driver.state[self.driver.PARENT_TYPES])
# verify tables
self.assertIsNotNone(objects)
self.assertIsNotNone(properties)
self.assertIsNotNone(parent_types)
for row in expected_deployment_objects:
self.assertIn(row, objects, ("%s not in objects" % str(row)))
for row in expected_deployment_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
for row in expected_deployment_parent_types:
self.assertIn(row, parent_types,
("%s not in parent_types" % str(row)))
def test_translate_environment_deployments(self):
"""Test conversion of deployments to tables."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
envs = self.driver.murano_client.environments.list()
pkgs = self.driver.murano_client.packages.list()
# package properties are needed for mapping parent_types
self.driver._translate_packages(pkgs)
for env in envs:
deps = self.murano_client.deployments.list(env.id)
self.driver._translate_environment_deployments(deps, env.id)
# datasource tables
objects = list(self.driver.state[self.driver.OBJECTS])
properties = list(self.driver.state[self.driver.PROPERTIES])
parent_types = list(self.driver.state[self.driver.PARENT_TYPES])
# verify tables
self.assertIsNotNone(objects)
self.assertIsNotNone(properties)
self.assertIsNotNone(parent_types)
for row in expected_deployment_objects:
self.assertIn(row, objects, ("%s not in objects" % str(row)))
for row in expected_deployment_properties:
self.assertIn(row, properties,
("%s not in properties" % str(row)))
for row in expected_deployment_parent_types:
self.assertIn(row, parent_types,
("%s not in parent_types" % str(row)))
def test_translate_connected(self):
"""Test translation of relationships to connected table."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
self.driver.state[self.driver.CONNECTED] = set()
envs = self.driver.murano_client.environments.list()
self.driver._translate_services(envs) # to populate relationships
self.driver._translate_connected()
# datasource tables
connected = list(self.driver.state[self.driver.CONNECTED])
# verify tables
self.assertIsNotNone(connected)
for row in expected_connected:
self.assertIn(row, connected, ("%s not in connected" % str(row)))
def test_execute(self):
"""Test action execution."""
self.driver.state[self.driver.OBJECTS] = set()
self.driver.state[self.driver.PROPERTIES] = set()
self.driver.state[self.driver.PARENT_TYPES] = set()
self.driver.state[self.driver.RELATIONSHIPS] = set()
envs = self.driver.murano_client.environments.list()
pkgs = self.driver.murano_client.packages.list()
# package properties are needed for mapping parent_types
self.driver._translate_packages(pkgs)
self.driver._translate_services(envs)
action = 'muranoaction'
action_args = {'positional': ['ad9762b2d82f44ca8b8a6ce4a19dd1cc',
'769af50c-9629-4694-b623-e9b392941279',
'restartVM']}
self.driver.execute(action, action_args)
self.assertIn(action_response, self.driver.action_call_returns)
# Sample responses from murano-client
env_response = [
util.ResponseObj({
u'created': u'2015-03-24T18:35:14',
u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'name': u'quick-env-2',
u'networking': {},
u'status': u'deploy failure',
u'tenant_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'updated': u'2015-03-24T18:46:56',
u'version': 1})]
service_response = [
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'MySQL'},
u'_actions': {u'74f5b2d2-1f8d-4b1a-8238-4155ce2cadb2_restartVM':
{u'enabled': True, u'name': u'restartVM'}},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'status': u'deploy failure',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance': {u'?': {u'_actions': {},
u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.3', u'172.24.4.4'],
u'keyname': u'',
u'name': u'bcnfli7nn738y1',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'MySqlDB',
u'password': u'Passw0rd.',
u'username': u''}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'Apache Tomcat'},
u'_actions': {},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'status': u'deploy failure',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance': {u'?': {u'_actions': {},
u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type':
u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'floatingIpAddress': u'172.24.4.4',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'ipAddresses': [u'10.0.11.4', u'172.24.4.4'],
u'keyname': u'',
u'name': u'woydqi7nn7ipc2',
u'networks': {u'customNetworks': [],
u'primaryNetwork': None,
u'useEnvironmentNetwork': True,
u'useFlatNetwork': False},
u'securityGroupName': None,
u'sharedIps': []},
u'name': u'Tomcat'}),
util.ResponseObj({
u'?': {u'_26411a1861294160833743e45d0eaad9': {u'name': u'PetClinic'},
u'_actions': {},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'status': u'deploy failure',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'Passw0rd.',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'})]
deployment_response = [
util.ResponseObj({
u'action': {u'args': {},
u'method': u'deploy',
u'object_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc'},
u'created': u'2015-03-24T18:36:23',
u'description':
{u'?': {u'id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'type': u'io.murano.Environment'},
u'defaultNetworks':
{u'environment':
{u'?': {u'id':
u'a2be8265b01743c0bdf645772d632bf0',
u'type': u'io.murano.resources.NeutronNetwork'},
u'name': u'quick-env-2-network'},
u'flat': None},
u'name': u'quick-env-2',
u'services':
[{u'?':
{u'_26411a1861294160833743e45d0eaad9':
{u'name': u'MySQL'},
u'id': u'769af50c-9629-4694-b623-e9b392941279',
u'type': u'io.murano.databases.MySql'},
u'database': u'',
u'instance':
{u'?': {u'id': u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'bcnfli7nn738y1'},
u'name': u'MySqlDB',
u'password': u'*** SANITIZED ***',
u'username': u''},
{u'?':
{u'_26411a1861294160833743e45d0eaad9': {u'name': u'Apache Tomcat'},
u'id': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'type': u'io.murano.apps.apache.Tomcat'},
u'instance':
{u'?': {u'id': u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'type': u'io.murano.resources.LinuxMuranoInstance'},
u'assignFloatingIp': True,
u'availabilityZone': u'nova',
u'flavor': u'm1.small',
u'image': u'66e015aa-33c5-41ff-9b81-d8d17f9b02c3',
u'keyname': u'',
u'name': u'woydqi7nn7ipc2'},
u'name': u'Tomcat'},
{u'?': {u'_26411a1861294160833743e45d0eaad9':
{u'name': u'PetClinic'},
u'id': u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'type': u'io.murano.apps.java.PetClinic'},
u'database': u'769af50c-9629-4694-b623-e9b392941279',
u'dbName': u'pet_db',
u'dbPassword': u'*** SANITIZED ***',
u'dbUser': u'pet_user',
u'name': u'PetClinic',
u'tomcat': u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'warLocation':
u'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'}]},
u'environment_id': u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'finished': u'2015-03-24T18:46:56',
u'id': u'4aa60b31d8ce434284e03aa13c6e11e0',
u'result': {u'isException': True,
u'result':
{u'details': u'murano.common.exceptions.TimeoutException:'
' The Agent does not respondwithin 600 seconds',
u'message': u'[murano.common.exceptions.TimeoutException]'
': The Agent does not respondwithin 600 seconds'}},
u'started': u'2015-03-24T18:36:23',
u'state': u'completed_w_errors',
u'updated': u'2015-03-24T18:46:56'})]
package_response = [
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.apache.Tomcat'],
u'created': u'2015-03-23T21:28:11',
u'description': u'Apache Tomcat is an open source software '
'implementation of the Java Servlet and JavaServer Pages '
'technologies.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.apache.Tomcat',
u'id': u'a7d64980999948dc96401cdce5ae2141',
u'is_public': False,
u'name': u'Apache Tomcat',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:11'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.linux.Git'],
u'created': u'2015-03-23T21:26:56',
u'description': u'Simple Git repo hosted on Linux VM.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.linux.Git',
u'id': u'3ff58cdfeb27487fb3127fb8fd45109c',
u'is_public': False,
u'name': u'Git',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Linux', u'connection'],
u'type': u'Application',
u'updated': u'2015-03-23T21:26:56'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.MySql'],
u'created': u'2015-03-23T21:28:58',
u'description': u'MySql is a relational database management system '
'(RDBMS), and ships with\nno GUI tools to administer MySQL databases '
'or manage data contained within\nthe databases.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.MySql',
u'id': u'884b764c0ce6439d8566b3b2da967687',
u'is_public': False,
u'name': u'MySQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'MySql', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:28:58'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.apps.java.PetClinic'],
u'created': u'2015-03-24T18:25:24',
u'description': u'An example of a Java app running on a '
'Apache Tomcat Servlet container and using the either Postgre SQL, '
'or MySql database\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.apps.java.PetClinic',
u'id': u'9f7c9e2ed8f9462a8f9037032ab64755',
u'is_public': False,
u'name': u'PetClinic',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Servlets', u'Server', u'Pages', u'Java'],
u'type': u'Application',
u'updated': u'2015-03-24T18:25:24'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.PostgreSql'],
u'created': u'2015-03-23T21:29:10',
u'description': u'PostgreSQL is a powerful, open source '
'object-relational database system.\nIt has more than 15 years '
'of active development and a proven architecture\nthat has earned '
'it a strong reputation for reliability, data integrity,\nand '
'correctness.\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases.PostgreSql',
u'id': u'4b9c6a24c2e64f928156e0c87324c394',
u'is_public': False,
u'name': u'PostgreSQL',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'Database', u'Postgre', u'SQL', u'RDBMS'],
u'type': u'Application',
u'updated': u'2015-03-23T21:29:10'}),
util.ResponseObj({
u'author': u'Mirantis, Inc',
u'categories': [],
u'class_definitions': [u'io.murano.databases.SqlDatabase'],
u'created': u'2015-03-24T18:26:32',
u'description': u'This is the interface defining API for different '
'SQL - RDBMS databases\n',
u'enabled': True,
u'fully_qualified_name': u'io.murano.databases',
u'id': u'5add5a561da341c4875495c5887957a8',
u'is_public': False,
u'name': u'SQL Library',
u'owner_id': u'610c6afc1fc54d23a58d316bf76e5f42',
u'supplier': {},
u'tags': [u'SQL', u'RDBMS'],
u'type': u'Library',
u'updated': u'2015-03-24T18:26:32'})]
action_response = 'c79eb72600024fa1995345a2b2eb3acd'
# Expected datasource table content
expected_states = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'deploy failure'),
]
expected_environment_parent_types = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Object'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', 'io.murano.Environment'),
]
expected_env_properties = [
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'created', '2015-03-24T18:35:14'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'version', 1),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'status', 'deploy failure'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'name', 'quick-env-2'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'updated', '2015-03-24T18:46:56'),
]
expected_service_properties = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '10.0.11.3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'ipAddresses', '172.24.4.4'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useFlatNetwork', False),
(u'769af50c-9629-4694-b623-e9b392941279', u'name', 'MySqlDB'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useEnvironmentNetwork', True),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'floatingIpAddress', '172.24.4.4'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbPassword', 'Passw0rd.'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'database', '769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'tomcat', 'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'warLocation',
'https://dl.dropboxusercontent.com/u/1684617/petclinic.war'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'name', 'bcnfli7nn738y1'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbUser', 'pet_user'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'flavor', 'm1.small'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '10.0.11.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'name', 'woydqi7nn7ipc2'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'name', 'PetClinic'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'assignFloatingIp', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'assignFloatingIp', True),
(u'769af50c-9629-4694-b623-e9b392941279', u'password', 'Passw0rd.'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'flavor', 'm1.small'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570', u'dbName', 'pet_db'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'networks.useFlatNetwork', False),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'networks.useEnvironmentNetwork', True),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', u'availabilityZone', 'nova'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'floatingIpAddress', '172.24.4.4'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', u'ipAddresses', '172.24.4.4'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'name', 'Tomcat'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'image', '66e015aa-33c5-41ff-9b81-d8d17f9b02c3'),
]
expected_package_properties = [
(u'4b9c6a24c2e64f928156e0c87324c394', u'is_public', False),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'connection'),
(u'884b764c0ce6439d8566b3b2da967687', u'created', '2015-03-23T21:28:58'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'SQL'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Servlets'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Servlets'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'created', '2015-03-23T21:29:10'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'fully_qualified_name',
'io.murano.apps.java.PetClinic'),
(u'884b764c0ce6439d8566b3b2da967687', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'created', '2015-03-24T18:26:32'),
(u'884b764c0ce6439d8566b3b2da967687', u'name', 'MySQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'Database'),
(u'5add5a561da341c4875495c5887957a8', u'enabled', True),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Database'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'type', 'Application'),
(u'5add5a561da341c4875495c5887957a8', u'type', 'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'type', 'Application'),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'MySql'),
(u'5add5a561da341c4875495c5887957a8', u'fully_qualified_name',
'io.murano.databases'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'is_public', False),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'SQL'),
(u'884b764c0ce6439d8566b3b2da967687', u'enabled', True),
(u'4b9c6a24c2e64f928156e0c87324c394', u'updated', '2015-03-23T21:29:10'),
(u'884b764c0ce6439d8566b3b2da967687', u'fully_qualified_name',
'io.murano.databases.MySql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'name', 'PetClinic'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'fully_qualified_name',
'io.murano.databases.PostgreSql'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Java'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'Postgre'),
(u'a7d64980999948dc96401cdce5ae2141', u'is_public', False),
(u'a7d64980999948dc96401cdce5ae2141', u'type', 'Application'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'name', 'PostgreSQL'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'tags', 'Linux'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'is_public', False),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'SQL'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'author', 'Mirantis, Inc'),
(u'5add5a561da341c4875495c5887957a8', u'class_definitions',
'io.murano.databases.SqlDatabase'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'updated', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'enabled', True),
(u'5add5a561da341c4875495c5887957a8', u'updated', '2015-03-24T18:26:32'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'class_definitions',
'io.murano.apps.java.PetClinic'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'class_definitions',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'created', '2015-03-24T18:25:24'),
(u'5add5a561da341c4875495c5887957a8', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'class_definitions',
'io.murano.databases.MySql'),
(u'884b764c0ce6439d8566b3b2da967687', u'is_public', False),
(u'884b764c0ce6439d8566b3b2da967687', u'tags', 'RDBMS'),
(u'a7d64980999948dc96401cdce5ae2141', u'author', 'Mirantis, Inc'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'name', 'Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'fully_qualified_name',
'io.murano.apps.apache.Tomcat'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'tags', 'Server'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'tags', 'RDBMS'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'class_definitions',
'io.murano.databases.PostgreSql'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Pages'),
(u'4b9c6a24c2e64f928156e0c87324c394', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Server'),
(u'a7d64980999948dc96401cdce5ae2141', u'updated', '2015-03-23T21:28:11'),
(u'884b764c0ce6439d8566b3b2da967687', u'updated', '2015-03-23T21:28:58'),
(u'a7d64980999948dc96401cdce5ae2141', u'name', 'Apache Tomcat'),
(u'884b764c0ce6439d8566b3b2da967687', u'author', 'Mirantis, Inc'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'enabled', True),
(u'a7d64980999948dc96401cdce5ae2141', u'created', '2015-03-23T21:28:11'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'created', '2015-03-23T21:26:56'),
(u'5add5a561da341c4875495c5887957a8', u'name', 'SQL Library'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'type', 'Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'fully_qualified_name',
'io.murano.apps.linux.Git'),
(u'a7d64980999948dc96401cdce5ae2141', u'tags', 'Java'),
(u'9f7c9e2ed8f9462a8f9037032ab64755', u'updated', '2015-03-24T18:25:24'),
(u'3ff58cdfeb27487fb3127fb8fd45109c', u'class_definitions',
'io.murano.apps.linux.Git'),
]
expected_service_objects = [
(u'769af50c-9629-4694-b623-e9b392941279',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.databases.MySql'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.java.PetClinic'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
u'769af50c-9629-4694-b623-e9b392941279',
u'io.murano.resources.LinuxMuranoInstance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'ad9762b2d82f44ca8b8a6ce4a19dd1cc', u'io.murano.apps.apache.Tomcat'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'io.murano.resources.LinuxMuranoInstance'),
]
expected_package_objects = [
(u'5add5a561da341c4875495c5887957a8',
u'610c6afc1fc54d23a58d316bf76e5f42', u'Library'),
(u'4b9c6a24c2e64f928156e0c87324c394',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'3ff58cdfeb27487fb3127fb8fd45109c',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'a7d64980999948dc96401cdce5ae2141',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'9f7c9e2ed8f9462a8f9037032ab64755',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
(u'884b764c0ce6439d8566b3b2da967687',
u'610c6afc1fc54d23a58d316bf76e5f42', 'io.murano.Application'),
]
expected_service_parent_types = [
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.resources.Instance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxInstance'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'io.murano.Object'),
(u'76b9ca88-c668-4e37-a830-5845adc10b0e',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9',
'io.murano.resources.LinuxMuranoInstance'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.Object'),
(u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'io.murano.resources.Instance'),
]
expected_service_relationships = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279', u'database'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', 'services'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec', u'tomcat'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279', 'services'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570', 'services'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e', 'instance'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9', 'instance'),
]
expected_connected = [
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'ea6a7d9b-7799-4d00-9db3-4573cb94daec'),
(u'769af50c-9629-4694-b623-e9b392941279',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'fda74653-8b66-42e2-be16-12ebc87d7570'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'769af50c-9629-4694-b623-e9b392941279'),
(u'fda74653-8b66-42e2-be16-12ebc87d7570',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'76b9ca88-c668-4e37-a830-5845adc10b0e'),
(u'ea6a7d9b-7799-4d00-9db3-4573cb94daec',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
(u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'c52dda24-38d6-4f2f-9184-abca0beaa6e9'),
]
expected_deployment_objects = [
(u'a2be8265b01743c0bdf645772d632bf0', u'ad9762b2d82f44ca8b8a6ce4a19dd1cc',
u'io.murano.resources.NeutronNetwork')
]
expected_deployment_properties = [
(u'a2be8265b01743c0bdf645772d632bf0', u'name', 'quick-env-2-network')
]
expected_deployment_parent_types = [
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.Object'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.Network'),
(u'a2be8265b01743c0bdf645772d632bf0', 'io.murano.resources.NeutronNetwork')
]
| 754 | 10,379 | 23 |
8f4460076ef2873e960a811dd0517945cbe48b83 | 185 | py | Python | adamacs/__init__.py | datajoint-company/adamacs | 95b1d3bd928f321ad63d6e25ac4b16a0a0771c6b | [
"MIT"
] | 1 | 2021-11-09T12:15:52.000Z | 2021-11-09T12:15:52.000Z | adamacs/__init__.py | datajoint-company/adamacs | 95b1d3bd928f321ad63d6e25ac4b16a0a0771c6b | [
"MIT"
] | 14 | 2021-10-06T10:07:04.000Z | 2022-03-07T15:42:10.000Z | adamacs/__init__.py | datajoint-company/adamacs | 95b1d3bd928f321ad63d6e25ac4b16a0a0771c6b | [
"MIT"
] | 1 | 2022-02-08T23:01:59.000Z | 2022-02-08T23:01:59.000Z | import datajoint as dj
default_prefix = 'adamacs_'
if 'custom' not in dj.config:
dj.config['custom'] = {}
db_prefix = dj.config['custom'].get('database.prefix', default_prefix)
| 18.5 | 70 | 0.702703 | import datajoint as dj
default_prefix = 'adamacs_'
if 'custom' not in dj.config:
dj.config['custom'] = {}
db_prefix = dj.config['custom'].get('database.prefix', default_prefix)
| 0 | 0 | 0 |
18e39a6e00add946d0d9b67a23bb2109b0b685d3 | 381 | py | Python | tests/test_version.py | siddhpant/pypi-mobans | 5219c14255c3c4350408ffea64af1ae6849f3e82 | [
"BSD-3-Clause"
] | null | null | null | tests/test_version.py | siddhpant/pypi-mobans | 5219c14255c3c4350408ffea64af1ae6849f3e82 | [
"BSD-3-Clause"
] | null | null | null | tests/test_version.py | siddhpant/pypi-mobans | 5219c14255c3c4350408ffea64af1ae6849f3e82 | [
"BSD-3-Clause"
] | null | null | null | from test_utils import get_rendered_file
| 25.4 | 51 | 0.645669 | from test_utils import get_rendered_file
def test_version():
context = {
'author': 'author_name',
'version': '0.0.1',
'something_else': 'hello world',
}
filename = '_version.py.jj2'
rendered = get_rendered_file(filename, context)
assert 'author_name' in rendered
assert '0.0.1' in rendered
assert 'hello world' not in rendered
| 316 | 0 | 23 |
294ad5f2f15fda3e84177e992cf1266917250719 | 5,873 | py | Python | lib/python3.4/site-packages/anymail/backends/base_requests.py | levabd/smart4-portal | 740cae69ab16733fc26a81034640c2956edae014 | [
"MIT"
] | null | null | null | lib/python3.4/site-packages/anymail/backends/base_requests.py | levabd/smart4-portal | 740cae69ab16733fc26a81034640c2956edae014 | [
"MIT"
] | 6 | 2020-06-05T18:44:19.000Z | 2022-01-13T00:48:56.000Z | lib/python3.4/site-packages/anymail/backends/base_requests.py | levabd/smart4-portal | 740cae69ab16733fc26a81034640c2956edae014 | [
"MIT"
] | null | null | null | import json
import requests
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import urljoin
from .base import AnymailBaseBackend, BasePayload
from ..exceptions import AnymailRequestsAPIError, AnymailSerializationError
from .._version import __version__
class AnymailRequestsBackend(AnymailBaseBackend):
"""
Base Anymail email backend for ESPs that use an HTTP API via requests
"""
def __init__(self, api_url, **kwargs):
"""Init options from Django settings"""
self.api_url = api_url
super(AnymailRequestsBackend, self).__init__(**kwargs)
self.session = None
def post_to_esp(self, payload, message):
"""Post payload to ESP send API endpoint, and return the raw response.
payload is the result of build_message_payload
message is the original EmailMessage
return should be a requests.Response
Can raise AnymailRequestsAPIError for HTTP errors in the post
"""
params = payload.get_request_params(self.api_url)
try:
response = self.session.request(**params)
except requests.RequestException as err:
# raise an exception that is both AnymailRequestsAPIError
# and the original requests exception type
exc_class = type('AnymailRequestsAPIError', (AnymailRequestsAPIError, type(err)), {})
raise exc_class(
"Error posting to %s:" % params.get('url', '<missing url>'),
raised_from=err, email_message=message, payload=payload)
self.raise_for_status(response, payload, message)
return response
def raise_for_status(self, response, payload, message):
"""Raise AnymailRequestsAPIError if response is an HTTP error
Subclasses can override for custom error checking
(though should defer parsing/deserialization of the body to
parse_recipient_status)
"""
if response.status_code != 200:
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response)
def deserialize_json_response(self, response, payload, message):
"""Deserialize an ESP API response that's in json.
Useful for implementing deserialize_response
"""
try:
return response.json()
except ValueError:
raise AnymailRequestsAPIError("Invalid JSON in %s API response" % self.esp_name,
email_message=message, payload=payload, response=response)
class RequestsPayload(BasePayload):
"""Abstract Payload for AnymailRequestsBackend"""
def get_request_params(self, api_url):
"""Returns a dict of requests.request params that will send payload to the ESP.
:param api_url: the base api_url for the backend
:return: dict
"""
api_endpoint = self.get_api_endpoint()
if api_endpoint is not None:
url = urljoin(api_url, api_endpoint)
else:
url = api_url
return dict(
method=self.method,
url=url,
params=self.params,
data=self.serialize_data(),
headers=self.headers,
files=self.files,
auth=self.auth,
# json= is not here, because we prefer to do our own serialization
# to provide extra context in error messages
)
def get_api_endpoint(self):
"""Returns a str that should be joined to the backend's api_url for sending this payload."""
return None
def serialize_data(self):
"""Performs any necessary serialization on self.data, and returns the result."""
return self.data
def serialize_json(self, data):
"""Returns data serialized to json, raising appropriate errors.
Useful for implementing serialize_data in a subclass,
"""
try:
return json.dumps(data)
except TypeError as err:
# Add some context to the "not JSON serializable" message
raise AnymailSerializationError(orig_err=err, email_message=self.message,
backend=self.backend, payload=self)
| 36.937107 | 100 | 0.626596 | import json
import requests
# noinspection PyUnresolvedReferences
from six.moves.urllib.parse import urljoin
from .base import AnymailBaseBackend, BasePayload
from ..exceptions import AnymailRequestsAPIError, AnymailSerializationError
from .._version import __version__
class AnymailRequestsBackend(AnymailBaseBackend):
"""
Base Anymail email backend for ESPs that use an HTTP API via requests
"""
def __init__(self, api_url, **kwargs):
"""Init options from Django settings"""
self.api_url = api_url
super(AnymailRequestsBackend, self).__init__(**kwargs)
self.session = None
def open(self):
if self.session:
return False # already exists
try:
self.session = requests.Session()
except requests.RequestException:
if not self.fail_silently:
raise
else:
self.session.headers["User-Agent"] = "django-anymail/{version}-{esp} {orig}".format(
esp=self.esp_name.lower(), version=__version__,
orig=self.session.headers.get("User-Agent", ""))
return True
def close(self):
if self.session is None:
return
try:
self.session.close()
except requests.RequestException:
if not self.fail_silently:
raise
finally:
self.session = None
def _send(self, message):
if self.session is None:
class_name = self.__class__.__name__
raise RuntimeError(
"Session has not been opened in {class_name}._send. "
"(This is either an implementation error in {class_name}, "
"or you are incorrectly calling _send directly.)".format(class_name=class_name))
return super(AnymailRequestsBackend, self)._send(message)
def post_to_esp(self, payload, message):
"""Post payload to ESP send API endpoint, and return the raw response.
payload is the result of build_message_payload
message is the original EmailMessage
return should be a requests.Response
Can raise AnymailRequestsAPIError for HTTP errors in the post
"""
params = payload.get_request_params(self.api_url)
try:
response = self.session.request(**params)
except requests.RequestException as err:
# raise an exception that is both AnymailRequestsAPIError
# and the original requests exception type
exc_class = type('AnymailRequestsAPIError', (AnymailRequestsAPIError, type(err)), {})
raise exc_class(
"Error posting to %s:" % params.get('url', '<missing url>'),
raised_from=err, email_message=message, payload=payload)
self.raise_for_status(response, payload, message)
return response
def raise_for_status(self, response, payload, message):
"""Raise AnymailRequestsAPIError if response is an HTTP error
Subclasses can override for custom error checking
(though should defer parsing/deserialization of the body to
parse_recipient_status)
"""
if response.status_code != 200:
raise AnymailRequestsAPIError(email_message=message, payload=payload, response=response)
def deserialize_json_response(self, response, payload, message):
"""Deserialize an ESP API response that's in json.
Useful for implementing deserialize_response
"""
try:
return response.json()
except ValueError:
raise AnymailRequestsAPIError("Invalid JSON in %s API response" % self.esp_name,
email_message=message, payload=payload, response=response)
class RequestsPayload(BasePayload):
"""Abstract Payload for AnymailRequestsBackend"""
def __init__(self, message, defaults, backend,
method="POST", params=None, data=None,
headers=None, files=None, auth=None):
self.method = method
self.params = params
self.data = data
self.headers = headers
self.files = files
self.auth = auth
super(RequestsPayload, self).__init__(message, defaults, backend)
def get_request_params(self, api_url):
"""Returns a dict of requests.request params that will send payload to the ESP.
:param api_url: the base api_url for the backend
:return: dict
"""
api_endpoint = self.get_api_endpoint()
if api_endpoint is not None:
url = urljoin(api_url, api_endpoint)
else:
url = api_url
return dict(
method=self.method,
url=url,
params=self.params,
data=self.serialize_data(),
headers=self.headers,
files=self.files,
auth=self.auth,
# json= is not here, because we prefer to do our own serialization
# to provide extra context in error messages
)
def get_api_endpoint(self):
"""Returns a str that should be joined to the backend's api_url for sending this payload."""
return None
def serialize_data(self):
"""Performs any necessary serialization on self.data, and returns the result."""
return self.data
def serialize_json(self, data):
"""Returns data serialized to json, raising appropriate errors.
Useful for implementing serialize_data in a subclass,
"""
try:
return json.dumps(data)
except TypeError as err:
# Add some context to the "not JSON serializable" message
raise AnymailSerializationError(orig_err=err, email_message=self.message,
backend=self.backend, payload=self)
| 1,537 | 0 | 108 |
bbc9da079caef5a83fd53beb7fa6223f16b3a411 | 11,453 | py | Python | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | com/code/lxb/example/MysqlUtil.py | albert-bing/quantitativeTrading | c3a96d895aad3e1c728692200a68384682632f64 | [
"MIT"
] | null | null | null | # @Team:Big Data Group
# @Time:2020/7/6 16:10
# @Author:albert·bing
# @File:MysqlUtil.py
# @Software:PyCharm
# start your code
import pymysql
# 测试
host = '81.70.166.101'
# 生产
# host='172.21.0.49'
password = 'r1kJzB'
port = 3306
# 黄历数据入库
# 查询日期
# 星座日数据入库
# 星座周数据入库
# 星座月数据入库
# 星座年数据入库
# 星座详情码表入库
# 插入日历
# 插入当日的疫情状况 --- 国内
# 插入当日的疫情状况 --- 国外
# 插入国内疫情的历史数据
# 插入国外疫情的历史数据
# 插入国内省市的当日数据疫情的数据
# 将每日数据前面添加一个area_id
# 插入疫情小区数据
# 插入境外输入的数据
# 获取县区的信息
| 42.106618 | 163 | 0.660962 | # @Team:Big Data Group
# @Time:2020/7/6 16:10
# @Author:albert·bing
# @File:MysqlUtil.py
# @Software:PyCharm
# start your code
import pymysql
# 测试
host = '81.70.166.101'
# 生产
# host='172.21.0.49'
password = 'r1kJzB'
port = 3306
# 黄历数据入库
def insert_data_yellow_calendar(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
# sql = "select * from car_param_info limit 10;"
sql = 'insert into date_yellow_calendar(`y_day`,`gregorian_calendar`,`lunar_calendar`,`dao`,`start`,`yi`,`ji`,`chong`,\
`suici`,`tai`,`wuxing`,`cai`,`xi`,`fu`,`constellation`,`chinese_zodiac`,`xiongshen`,`jishen`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 查询日期
def select_data_date(start_date,end_date):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# sql = "SELECT year_id,format_date from date_calendar_full_scale where format_date <= '"+end_date+"' and format_date >= '"+start_date+"' ORDER BY format_date"
sql = "SELECT y_date from date_calendar where y_date <= '"+end_date+"' and y_date >= '"+start_date+"' ORDER BY y_date";
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.commit()
db.close()
return result
# 星座日数据入库
def insert_data_cons_day(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_day(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`negotiation_index`,`lucky_color`,`lucky_number`,' \
'`speed_dating_constellation`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,`wealth_fortune`,' \
'`health_fortune`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座周数据入库
def insert_data_cons_week(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`lucky_color`,`lucky_constellation`,' \
'`beware_constellation`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,`wealth_fortune`,' \
'`health_fortune`,`date_level`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座月数据入库
def insert_data_cons_month(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,' \
'`wealth_fortune`,`health_fortune`,`reduced_pressure`,`get_luck_way`,`date_level`) values (%s,%s,%s,%s,%s,' \
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座年数据入库
def insert_data_cons_year(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_info_wmy(`constellation`,`con_date`,`com_fortune_index`,`love_fortune_index`,' \
'`career_index`,`wealth_index`,`health_index`,`short_comment`,`com_fortune`,`love_fortune`,`career_fortune`,' \
'`wealth_fortune`,`health_fortune`,`get_luck_way`,`date_level`) values (%s,%s,%s,%s,%s,' \
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 星座详情码表入库
def insert_data_constellation_detail_info(data):
db = pymysql.connect(host=host, user='root', password=password, port=port,db='traffic')
cursor = db.cursor()
sql = 'insert into date_constellation_detail_info(`constellation`,`date_range`,`cons_features`,`four_image_attributes`,' \
'`palace`,`yin_yang_attributes`,`biggest_features`,`supervisor_plant`,`lucky_color`,`auspicious_items`,`lucky_number`,' \
'`lucky_metal`,`performance`,`advantage`,`disadvantage`,`basic_traits`,`specific_traits`,`acting_style`,`blind_spot`,' \
'`summary`) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入日历
def insert_data_calendar(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='traffic')
cursor = db.cursor()
sql = 'insert into date_calendar(`y_date`,`lunar`,`week`,`solar_terms`,`gregorian_calendar`) values (%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入当日的疫情状况 --- 国内
def insert_current_epidemic_internal(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_current_detail(`date_today`,`curr_time`,`existing_diagnosis`,`ed_compare_yesterday`,`asymptomatic`,' \
'`at_compare_yesterday`,`suspected`,`se_compare_yesterday`,`existing_critical_illness`, `eci_compare_yesterday`,' \
'`cumulative_diagnosis`,`cdi_compare_yesterday`,`import_abroadz`,`ia_compare_yesterday`,`cumulative_cure`,`cc_compare_yesterday`,' \
'`cumulative_deaths`,`cde_compare_yesterday`,`foreign_or_internal`,`create_time`,`update_time`)' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入当日的疫情状况 --- 国外
def insert_current_epidemic_foreign(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_current_detail(`date_today`,`curr_time`,`existing_diagnosis`,`ed_compare_yesterday`,' \
'`cumulative_diagnosis`,`cdi_compare_yesterday`,`cumulative_cure`,`cc_compare_yesterday`,' \
'`cumulative_deaths`,`cde_compare_yesterday`,`foreign_or_internal`,`create_time`,`update_time`)' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.execute(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国内疫情的历史数据
def insert_internal_province_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal(`date_today`,`province_name`,`city_name`,`cumulative_diagnosis`,' \
'`cumulative_cure`,`cumulative_deaths`,`new_add`,`existing_diagnosis`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国外疫情的历史数据
def insert_foreign_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 国家名称、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_foreign(`date_today`,`country_name`,`cumulative_diagnosis`,' \
'`cumulative_cure`,`cumulative_deaths`,`new_add`,`existing_diagnosis`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入国内省市的当日数据疫情的数据
def insert_internal_cur_day_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port,db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal(`date_today`,`province_name`,`city_name`,`new_add`,' \
'`existing_diagnosis`,`cumulative_diagnosis`,`cumulative_cure`,`cumulative_deaths`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功--国内省数据!\n")
# 将每日数据前面添加一个area_id
def insert_internal_cur_day_data_add_areaId():
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
# 省名称、市名称(省的话,就还是使用省名称)、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_internal_dim ( `area_id`, `date_today`, `province_name`, `city_name`, `new_add`, ' \
'`existing_diagnosis`, `cumulative_diagnosis`, `cumulative_cure`, `cumulative_deaths`, `create_time`,' \
' `update_time` ) SELECT dim.area_id, epi.date_today, epi.province_name, epi.city_name, epi.new_add,' \
' epi.existing_diagnosis, epi.cumulative_diagnosis, epi.cumulative_cure, epi.cumulative_deaths, epi.create_time,' \
' epi.update_time' \
' FROM epi_internal epi LEFT JOIN pro_city_area_dim dim ' \
'ON epi.province_name = dim.province AND epi.city_name = dim.area'
cursor.execute(sql)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入疫情小区数据
def insert_community_data(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
sql = 'REPLACE INTO epi_community(`date_today`,`province`,`city`,`district`,`street`,`middle_address`,`community`,' \
'`show_address`,`full_address`,`lng`,`lat`,`cnt_sum_certain`,`release_date`,`create_time`,`update_time`,`location`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,ST_GEOMFROMTEXT (%s));'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功!\n")
# 插入境外输入的数据
def insert_import_abroad(data):
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'REPLACE INTO epi_import_abroad(`date_today`,`province_name`,`class_name`,`new_add`,' \
'`existing_diagnosis`,`cumulative_diagnosis`,`cumulative_cure`,`cumulative_deaths`,`create_time`,`update_time`) ' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
cursor.executemany(sql, data)
cursor.close()
db.commit()
db.close()
print("mysql-插入成功--境外输入数据!\n")
# 获取县区的信息
def select_area():
db = pymysql.connect(host=host, user='root', password=password, port=port, db='epidemic')
cursor = db.cursor()
# 省名称、境外输入、日期、确诊(累计)人数、治愈人数、死亡人数、新增人数
sql = 'SELECT area from epidemic.pro_city_area_dim;'
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.commit()
db.close()
return result
| 11,487 | 0 | 374 |
19d688ebe4e57b3a6fa4d49d7606c0c9844d3728 | 1,349 | py | Python | ThirdParty/pybluez2-macos_fix/examples/advanced/l2-unreliable-client.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | ThirdParty/pybluez-master/examples/advanced/l2-unreliable-client.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | null | null | null | ThirdParty/pybluez-master/examples/advanced/l2-unreliable-client.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z | #!/usr/bin/env python3
"""PyBluez advanced example l2-unreliable-client.py"""
import sys
import bluetooth
import bluetooth._bluetooth as bluez # low level bluetooth wrappers
# Create the client socket
sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
if len(sys.argv) < 4:
print("Usage: l2-unreliable-client.py <addr> <timeout> <num_packets>")
print(" address - device that l2-unreliable-server is running on")
print(" timeout - wait timeout * 0.625ms before dropping unACK'd packets")
print(" num_packets - number of 627-byte packets to send on connect")
sys.exit(2)
bt_addr = sys.argv[1]
timeout = int(sys.argv[2])
num_packets = int(sys.argv[3])
print("Trying to connect to {}:1001...".format(bt_addr))
port = 0x1001
sock.connect((bt_addr, port))
print("Connected. Adjusting link parameters.")
print("Current flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
try:
bluetooth.write_flush_timeout(bt_addr, timeout)
except bluez.error as e:
print("Error setting flush timeout. Are you sure you're superuser?")
print(e)
sys.exit(1)
print("New flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
totalsent = 0
for i in range(num_packets):
pkt = "0" * 672
totalsent += sock.send(pkt)
print("Sent {} bytes total.".format(totalsent))
sock.close()
| 29.326087 | 79 | 0.713862 | #!/usr/bin/env python3
"""PyBluez advanced example l2-unreliable-client.py"""
import sys
import bluetooth
import bluetooth._bluetooth as bluez # low level bluetooth wrappers
# Create the client socket
sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
if len(sys.argv) < 4:
print("Usage: l2-unreliable-client.py <addr> <timeout> <num_packets>")
print(" address - device that l2-unreliable-server is running on")
print(" timeout - wait timeout * 0.625ms before dropping unACK'd packets")
print(" num_packets - number of 627-byte packets to send on connect")
sys.exit(2)
bt_addr = sys.argv[1]
timeout = int(sys.argv[2])
num_packets = int(sys.argv[3])
print("Trying to connect to {}:1001...".format(bt_addr))
port = 0x1001
sock.connect((bt_addr, port))
print("Connected. Adjusting link parameters.")
print("Current flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
try:
bluetooth.write_flush_timeout(bt_addr, timeout)
except bluez.error as e:
print("Error setting flush timeout. Are you sure you're superuser?")
print(e)
sys.exit(1)
print("New flush timeout is {} ms.".format(
bluetooth.read_flush_timeout(bt_addr)))
totalsent = 0
for i in range(num_packets):
pkt = "0" * 672
totalsent += sock.send(pkt)
print("Sent {} bytes total.".format(totalsent))
sock.close()
| 0 | 0 | 0 |
13a84437be2b59b0a0cbf1ccc83f4d2497dd1e50 | 14,328 | py | Python | src/gluonts/model/npts/_predictor.py | lucienwerner/gluon-ts | fd6ed9478242bb8519ea3eb92c7043a67935fbaf | [
"Apache-2.0"
] | 1 | 2020-11-30T18:05:24.000Z | 2020-11-30T18:05:24.000Z | src/gluonts/model/npts/_predictor.py | emited/gluon-ts | ee403b0d488df5dd54de6e1b15837b6f9517b9e2 | [
"Apache-2.0"
] | null | null | null | src/gluonts/model/npts/_predictor.py | emited/gluon-ts | ee403b0d488df5dd54de6e1b15837b6f9517b9e2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from enum import Enum
from typing import Iterator, List, Optional, Tuple, Union, Any
# Third-party imports
import numpy as np
import pandas as pd
# First-party imports
from gluonts.core.component import validated
from gluonts.core.exception import GluonTSDataError
from gluonts.dataset.common import Dataset
from gluonts.model.forecast import SampleForecast
from gluonts.model.predictor import RepresentablePredictor
from gluonts.time_feature import time_features_from_frequency_str
# Relative imports
from ._model import NPTS
class NPTSPredictor(RepresentablePredictor):
r"""
Implementation of Non-Parametric Time Series Forecaster.
Forecasts of NPTS for time step :math:`T` are one of the previous values
of the time series (these could be known values or predictions), sampled
according to the (un-normalized) distribution :math:`q_T(t) > 0`, where
:math:`0 <= t < T`.
The distribution :math:`q_T` is expressed in terms of a feature map
:math:`f(t)` which associates a time step :math:`t` with a
:math:`D`-dimensional feature map :math:`[f_1(t), ..., f_D(t)]`. More
details on the feature map can be found below.
We offer two types of distribution kernels.
**Exponential Kernel (NPTS Forecaster)**
The sampling distribution :math:`q_T` for the `exponential` kernel
can be `weighted` or `unweighted` and is defined as follows.
.. math::
q_T(t) =
\begin{cases}
\exp( - \sum_{i=1}^D \alpha \left| f_i(t) - f_i(T) \right| )
& \text{unweighted}\\
\exp( - \sum_{i=1}^D \alpha_i \left| f_i(t) - f_i(T) \right| )
& \text{weighted}
\end{cases}
In the above definition :math:`\alpha > 0` and :math:`\alpha_i > 0` are
user-defined sampling weights.
**Uniform Kernel (Climatological Forecaster)**
The sampling distribution :math:`q_T` for the `uniform` kernel can be
`seasonal` or not. The `seasonal` version is defined as follows.
.. math::
q_T(t) =
\begin{cases}
1.0
& \text{if }f(t) = f(T) \\
0.0
& \text{otherwise}
\end{cases}
The `not seasonal` version is defined as the constant map.
.. math::
q_T(t) = 1.0
**Feature Map**
The feature map :math:`f` is configurable. The special case
:math:`f(t) = [t]` results in the so-called `naive NPTS`. For
non-seasonal models, by default we have :math:`f(t) = [t]` for the NPTS
Forecaster (i.e., with the `exponential` kernel) and no features for the
Climatological Forecaster (i.e., the `uniform` kernel).
For seasonal NPTS and seasonal Climatological, time features determined
based on the frequency of the time series are added to the default
feature map.
The default time features for various frequencies are
.. math::
f(t) =
\begin{cases}
[\mathit{MINUTE\_OF\_HOUR}(t)] & \text{for minutely frequency}\\
[\mathit{HOUR\_OF\_DAY}(t)] & \text{for hourly frequency}\\
[\mathit{DAY\_OF\_WEEK}(t)] & \text{for daily frequency}\\
[\mathit{DAY\_OF\_MONTH}(t)] & \text{for weekly frequency}\\
[\mathit{MONTH\_OF\_YEAR}(t)] & \text{for monthly frequency}
\end{cases}
During prediction, one can provide custom features in `feat_dynamic_real`
(these have to be defined in both the training and the prediction range).
If the model is seasonal, these custom features are added to the default
feature map, otherwise they are ignored. If `feat_dynamic_real` is not
empty, one can disable default time features by setting the flag
`use_default_time_features` to `False`.
Parameters
----------
freq
time frequency string
prediction_length
number of time steps to predict
context_length
number of time-steps that are considered before making predictions
(the default value of None corresponds to the case where all time steps
in the history are considered)
kernel_type
the type of kernel to use (either "exponential" or "uniform")
exp_kernel_weights
single weight :math:`\alpha` or the weights for the features to use
in the exponential kernel; currently, we use the single weight version
and for seasonal NPTS we just rescale :math:`\alpha` by `feature_scale`
for seasonal features.
use_seasonal_model
whether to use seasonal variant
use_default_time_features
time features derived based on the frequency of the time series
num_default_time_features
this is not exposed; this parameter is for having more control on the
number of default time features, as the date_feature_set adds too
many per default.
feature_scale
scale for time (seasonal) features in order to sample past seasons
with higher probability
"""
@validated()
def predict_time_series(
self,
ts: pd.Series,
num_samples: int,
custom_features: np.ndarray = None,
item_id: Optional[Any] = None,
) -> SampleForecast:
"""
Given a training time series, this method generates `Forecast` object
containing prediction samples for `prediction_length` time points.
The predictions are generated via weighted sampling where the weights
are determined by the `NPTSPredictor` kernel type and feature map.
Parameters
----------
ts
training time series object
custom_features
custom features (covariates) to use
num_samples
number of samples to draw
item_id
item_id to identify the time series
Returns
-------
Forecast
A prediction for the supplied `ts` and `custom_features`.
"""
if np.all(np.isnan(ts.values[-self.context_length :])):
raise GluonTSDataError(
f"The last {self.context_length} positions of the target time "
f"series are all NaN. Please increase the `context_length` "
f"parameter of your NPTS model so the last "
f"{self.context_length} positions of each target contain at "
f"least one non-NaN value."
)
# Get the features for both training and prediction ranges
train_features, predict_features = self._get_features(
ts.index, self.prediction_length, custom_features
)
# Compute weights for sampling for each time step `t` in the
# prediction range
sampling_weights_iterator = NPTS.compute_weights(
train_features=train_features,
pred_features=predict_features,
target_isnan_positions=np.argwhere(np.isnan(ts.values)),
kernel=self.kernel,
do_exp=self._is_exp_kernel(),
)
# Generate forecasts
forecast = NPTS.predict(
targets=ts,
prediction_length=self.prediction_length,
sampling_weights_iterator=sampling_weights_iterator,
num_samples=num_samples,
item_id=item_id,
)
return forecast
def _get_features(
self,
train_index: pd.DatetimeIndex,
prediction_length: int,
custom_features: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Internal method for computing default, (optional) seasonal features
for the training and prediction ranges given time index for the
training range and the prediction length.
Appends `custom_features` if provided.
Parameters
----------
train_index
Pandas DatetimeIndex
prediction_length
prediction length
custom_features
shape: (num_custom_features, train_length + pred_length)
Returns
-------
a tuple of (training, prediction) feature tensors
shape: (num_features, train_length/pred_length)
"""
train_length = len(train_index)
full_time_index = pd.date_range(
train_index.min(),
periods=train_length + prediction_length,
freq=train_index.freq,
)
# Default feature map for both seasonal and non-seasonal models.
if self._is_exp_kernel():
# Default time index features: index of the time point
# [0, train_length + pred_length - 1]
features = np.expand_dims(
np.array(range(len(full_time_index))), axis=0
)
# Rescale time index features into the range: [-0.5, 0.5]
# similar to the seasonal features
# (see gluonts.time_feature)
features = features / (train_length + prediction_length - 1) - 0.5
else:
# For uniform seasonal model we do not add time index features
features = np.empty((0, len(full_time_index)))
# Add more features for seasonal variant
if self.use_seasonal_model:
if custom_features is not None:
total_length = train_length + prediction_length
assert len(custom_features.shape) == 2, (
"Custom features should be 2D-array where the rows "
"represent features and columns the time points."
)
assert custom_features.shape[1] == total_length, (
f"For a seasonal model, feat_dynamic_real must be defined "
f"for both training and prediction ranges. They are only "
f"provided for {custom_features.shape[1]} time steps "
f"instead of {train_length + prediction_length} steps."
)
features = np.vstack(
[features, self.feature_scale * custom_features]
)
if self.use_default_time_features or custom_features is None:
# construct seasonal features
seasonal_features_gen = time_features_from_frequency_str(
full_time_index.freqstr
)
seasonal_features = [
self.feature_scale * gen(full_time_index)
for gen in seasonal_features_gen[
: self.num_default_time_features
]
]
features = np.vstack([features, *seasonal_features])
train_features = features[:, :train_length]
pred_features = features[:, train_length:]
return train_features, pred_features
| 37.409922 | 79 | 0.620184 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from enum import Enum
from typing import Iterator, List, Optional, Tuple, Union, Any
# Third-party imports
import numpy as np
import pandas as pd
# First-party imports
from gluonts.core.component import validated
from gluonts.core.exception import GluonTSDataError
from gluonts.dataset.common import Dataset
from gluonts.model.forecast import SampleForecast
from gluonts.model.predictor import RepresentablePredictor
from gluonts.time_feature import time_features_from_frequency_str
# Relative imports
from ._model import NPTS
class KernelType(str, Enum):
exponential = "exponential"
uniform = "uniform"
class NPTSPredictor(RepresentablePredictor):
r"""
Implementation of Non-Parametric Time Series Forecaster.
Forecasts of NPTS for time step :math:`T` are one of the previous values
of the time series (these could be known values or predictions), sampled
according to the (un-normalized) distribution :math:`q_T(t) > 0`, where
:math:`0 <= t < T`.
The distribution :math:`q_T` is expressed in terms of a feature map
:math:`f(t)` which associates a time step :math:`t` with a
:math:`D`-dimensional feature map :math:`[f_1(t), ..., f_D(t)]`. More
details on the feature map can be found below.
We offer two types of distribution kernels.
**Exponential Kernel (NPTS Forecaster)**
The sampling distribution :math:`q_T` for the `exponential` kernel
can be `weighted` or `unweighted` and is defined as follows.
.. math::
q_T(t) =
\begin{cases}
\exp( - \sum_{i=1}^D \alpha \left| f_i(t) - f_i(T) \right| )
& \text{unweighted}\\
\exp( - \sum_{i=1}^D \alpha_i \left| f_i(t) - f_i(T) \right| )
& \text{weighted}
\end{cases}
In the above definition :math:`\alpha > 0` and :math:`\alpha_i > 0` are
user-defined sampling weights.
**Uniform Kernel (Climatological Forecaster)**
The sampling distribution :math:`q_T` for the `uniform` kernel can be
`seasonal` or not. The `seasonal` version is defined as follows.
.. math::
q_T(t) =
\begin{cases}
1.0
& \text{if }f(t) = f(T) \\
0.0
& \text{otherwise}
\end{cases}
The `not seasonal` version is defined as the constant map.
.. math::
q_T(t) = 1.0
**Feature Map**
The feature map :math:`f` is configurable. The special case
:math:`f(t) = [t]` results in the so-called `naive NPTS`. For
non-seasonal models, by default we have :math:`f(t) = [t]` for the NPTS
Forecaster (i.e., with the `exponential` kernel) and no features for the
Climatological Forecaster (i.e., the `uniform` kernel).
For seasonal NPTS and seasonal Climatological, time features determined
based on the frequency of the time series are added to the default
feature map.
The default time features for various frequencies are
.. math::
f(t) =
\begin{cases}
[\mathit{MINUTE\_OF\_HOUR}(t)] & \text{for minutely frequency}\\
[\mathit{HOUR\_OF\_DAY}(t)] & \text{for hourly frequency}\\
[\mathit{DAY\_OF\_WEEK}(t)] & \text{for daily frequency}\\
[\mathit{DAY\_OF\_MONTH}(t)] & \text{for weekly frequency}\\
[\mathit{MONTH\_OF\_YEAR}(t)] & \text{for monthly frequency}
\end{cases}
During prediction, one can provide custom features in `feat_dynamic_real`
(these have to be defined in both the training and the prediction range).
If the model is seasonal, these custom features are added to the default
feature map, otherwise they are ignored. If `feat_dynamic_real` is not
empty, one can disable default time features by setting the flag
`use_default_time_features` to `False`.
Parameters
----------
freq
time frequency string
prediction_length
number of time steps to predict
context_length
number of time-steps that are considered before making predictions
(the default value of None corresponds to the case where all time steps
in the history are considered)
kernel_type
the type of kernel to use (either "exponential" or "uniform")
exp_kernel_weights
single weight :math:`\alpha` or the weights for the features to use
in the exponential kernel; currently, we use the single weight version
and for seasonal NPTS we just rescale :math:`\alpha` by `feature_scale`
for seasonal features.
use_seasonal_model
whether to use seasonal variant
use_default_time_features
time features derived based on the frequency of the time series
num_default_time_features
this is not exposed; this parameter is for having more control on the
number of default time features, as the date_feature_set adds too
many per default.
feature_scale
scale for time (seasonal) features in order to sample past seasons
with higher probability
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
context_length: Optional[int] = None,
kernel_type: KernelType = KernelType.exponential,
exp_kernel_weights: Union[float, List[float]] = 1.0,
use_seasonal_model: bool = True,
use_default_time_features: bool = True,
num_default_time_features: int = 1,
feature_scale: float = 1000.0,
) -> None:
super().__init__(freq=freq, prediction_length=prediction_length)
# We limit the context length to some maximum value instead of
# looking at the whole history which might be too large.
self.context_length = (
context_length if context_length is not None else 1100
)
self.kernel_type = kernel_type
self.num_default_time_features = num_default_time_features
self.use_seasonal_model = use_seasonal_model
self.use_default_time_features = use_default_time_features
self.feature_scale = feature_scale
if not self._is_exp_kernel():
self.kernel = NPTS.uniform_kernel()
elif isinstance(exp_kernel_weights, float):
self.kernel = NPTS.log_distance_kernel(exp_kernel_weights)
elif isinstance(exp_kernel_weights, list):
self.kernel = NPTS.log_weighted_distance_kernel(exp_kernel_weights)
else:
raise RuntimeError(
'Unexpected "exp_kernel_weights" type - should be either'
"a float or a list of floats"
)
def _is_exp_kernel(self) -> bool:
return self.kernel_type == KernelType.exponential
def predict(
self, dataset: Dataset, num_samples: int = 100, **kwargs
) -> Iterator[SampleForecast]:
for data in dataset:
start = pd.Timestamp(data["start"])
target = np.asarray(data["target"], np.float32)
index = pd.date_range(
start=start, freq=self.freq, periods=len(target)
)
item_id = data.get("item_id", None)
# Slice the time series until context_length or history length
# depending on which ever is minimum
train_length = min(len(target), self.context_length)
ts = pd.Series(index=index, data=target)[-train_length:]
if "feat_dynamic_real" in data.keys():
custom_features = np.array(
[
dynamic_feature[
-train_length - self.prediction_length :
]
for dynamic_feature in data["feat_dynamic_real"]
]
)
else:
custom_features = None
yield self.predict_time_series(
ts, num_samples, custom_features, item_id=item_id
)
def predict_time_series(
self,
ts: pd.Series,
num_samples: int,
custom_features: np.ndarray = None,
item_id: Optional[Any] = None,
) -> SampleForecast:
"""
Given a training time series, this method generates `Forecast` object
containing prediction samples for `prediction_length` time points.
The predictions are generated via weighted sampling where the weights
are determined by the `NPTSPredictor` kernel type and feature map.
Parameters
----------
ts
training time series object
custom_features
custom features (covariates) to use
num_samples
number of samples to draw
item_id
item_id to identify the time series
Returns
-------
Forecast
A prediction for the supplied `ts` and `custom_features`.
"""
if np.all(np.isnan(ts.values[-self.context_length :])):
raise GluonTSDataError(
f"The last {self.context_length} positions of the target time "
f"series are all NaN. Please increase the `context_length` "
f"parameter of your NPTS model so the last "
f"{self.context_length} positions of each target contain at "
f"least one non-NaN value."
)
# Get the features for both training and prediction ranges
train_features, predict_features = self._get_features(
ts.index, self.prediction_length, custom_features
)
# Compute weights for sampling for each time step `t` in the
# prediction range
sampling_weights_iterator = NPTS.compute_weights(
train_features=train_features,
pred_features=predict_features,
target_isnan_positions=np.argwhere(np.isnan(ts.values)),
kernel=self.kernel,
do_exp=self._is_exp_kernel(),
)
# Generate forecasts
forecast = NPTS.predict(
targets=ts,
prediction_length=self.prediction_length,
sampling_weights_iterator=sampling_weights_iterator,
num_samples=num_samples,
item_id=item_id,
)
return forecast
def _get_features(
self,
train_index: pd.DatetimeIndex,
prediction_length: int,
custom_features: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Internal method for computing default, (optional) seasonal features
for the training and prediction ranges given time index for the
training range and the prediction length.
Appends `custom_features` if provided.
Parameters
----------
train_index
Pandas DatetimeIndex
prediction_length
prediction length
custom_features
shape: (num_custom_features, train_length + pred_length)
Returns
-------
a tuple of (training, prediction) feature tensors
shape: (num_features, train_length/pred_length)
"""
train_length = len(train_index)
full_time_index = pd.date_range(
train_index.min(),
periods=train_length + prediction_length,
freq=train_index.freq,
)
# Default feature map for both seasonal and non-seasonal models.
if self._is_exp_kernel():
# Default time index features: index of the time point
# [0, train_length + pred_length - 1]
features = np.expand_dims(
np.array(range(len(full_time_index))), axis=0
)
# Rescale time index features into the range: [-0.5, 0.5]
# similar to the seasonal features
# (see gluonts.time_feature)
features = features / (train_length + prediction_length - 1) - 0.5
else:
# For uniform seasonal model we do not add time index features
features = np.empty((0, len(full_time_index)))
# Add more features for seasonal variant
if self.use_seasonal_model:
if custom_features is not None:
total_length = train_length + prediction_length
assert len(custom_features.shape) == 2, (
"Custom features should be 2D-array where the rows "
"represent features and columns the time points."
)
assert custom_features.shape[1] == total_length, (
f"For a seasonal model, feat_dynamic_real must be defined "
f"for both training and prediction ranges. They are only "
f"provided for {custom_features.shape[1]} time steps "
f"instead of {train_length + prediction_length} steps."
)
features = np.vstack(
[features, self.feature_scale * custom_features]
)
if self.use_default_time_features or custom_features is None:
# construct seasonal features
seasonal_features_gen = time_features_from_frequency_str(
full_time_index.freqstr
)
seasonal_features = [
self.feature_scale * gen(full_time_index)
for gen in seasonal_features_gen[
: self.num_default_time_features
]
]
features = np.vstack([features, *seasonal_features])
train_features = features[:, :train_length]
pred_features = features[:, train_length:]
return train_features, pred_features
| 2,784 | 63 | 103 |
d253ac5f196af0d289d79174343fd2acec6dfaa2 | 9,556 | py | Python | tests/test_authorization.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | null | null | null | tests/test_authorization.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | null | null | null | tests/test_authorization.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from sdklib.http import HttpRequestContext
from sdklib.http.authorization import (
basic_authorization, x_11paths_authorization, X11PathsAuthentication, BasicAuthentication,
_get_11paths_serialized_headers
)
from sdklib.http.renderers import FormRenderer, JSONRenderer, MultiPartRenderer
from sdklib.http.headers import AUTHORIZATION_HEADER_NAME, X_11PATHS_BODY_HASH_HEADER_NAME
| 65.903448 | 138 | 0.657493 | # -*- coding: utf-8 -*-
import unittest
from sdklib.http import HttpRequestContext
from sdklib.http.authorization import (
basic_authorization, x_11paths_authorization, X11PathsAuthentication, BasicAuthentication,
_get_11paths_serialized_headers
)
from sdklib.http.renderers import FormRenderer, JSONRenderer, MultiPartRenderer
from sdklib.http.headers import AUTHORIZATION_HEADER_NAME, X_11PATHS_BODY_HASH_HEADER_NAME
class TestAuthorization(unittest.TestCase):
def test_basic_authentication(self):
value = basic_authorization(username="Aladdin", password="OpenSesame")
self.assertEqual("Basic QWxhZGRpbjpPcGVuU2VzYW1l", value)
def test_basic_authentication_class(self):
a = BasicAuthentication("Aladdin", "OpenSesame")
ctx = HttpRequestContext(headers={})
auth_ctx = a.apply_authentication(context=ctx)
self.assertEqual("Basic QWxhZGRpbjpPcGVuU2VzYW1l", auth_ctx.headers[AUTHORIZATION_HEADER_NAME])
def test_11paths_authentication(self):
context = HttpRequestContext(method="GET", url_path="/path/")
header_value = x_11paths_authorization(app_id="123456", secret="654321", context=context,
utc="2016-01-01 00:00:00")
self.assertEqual("11PATHS 123456 t0cS2yvvlcSqiKVK/v6tjG8pP4s=", header_value)
def test_11paths_authentication_with_query_params(self):
context = HttpRequestContext(method="GET", url_path="/path/",
query_params={"param": "value"})
header_value = x_11paths_authorization(app_id="123456", secret="654321", context=context,
utc="2016-01-01 00:00:00")
self.assertEqual("11PATHS 123456 kVXKo1ug8GRm0kyAjvruvtNDetU=", header_value)
def test_11paths_authentication_with_multiples_query_params(self):
context = HttpRequestContext(method="GET", url_path="/path/",
query_params={"param1": "value1", "param2": "value2"})
header_value1 = x_11paths_authorization(app_id="123456", secret="654321", context=context,
utc="2016-01-01 00:00:00")
self.assertEqual("11PATHS 123456 pof/ZVaAmmrbSOCJXiRWuQ5vrco=", header_value1)
context = HttpRequestContext(method="GET", url_path="/path/",
query_params={"param2": "value2", "param1": "value1"})
header_value2 = x_11paths_authorization(app_id="123456", secret="654321", context=context,
utc="2016-01-01 00:00:00")
self.assertEqual(header_value1, header_value2)
def test_11paths_authentication_with_body(self):
context = HttpRequestContext(method="POST", url_path="/path/",
body_params={"param": "value"}, renderer=FormRenderer())
header_value = x_11paths_authorization(app_id="123456", secret="654321", context=context,
utc="2016-01-01 00:00:00")
self.assertEqual("11PATHS 123456 8Ok3S1xUFLtjRxRkWVoZAKXZc1A=", header_value)
def test_11paths_authentication_class_with_static_time(self):
auth = X11PathsAuthentication(app_id="123456", secret="654321", utc="2016-01-01 00:00:00")
context = HttpRequestContext(method="POST", url_path="/path/",
body_params={"param": "value"}, renderer=FormRenderer())
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 123456 8Ok3S1xUFLtjRxRkWVoZAKXZc1A=", res_context.headers["Authorization"])
def test_11paths_authentication_class_with_dynamic_time(self):
auth = X11PathsAuthentication(app_id="123456", secret="654321")
context = HttpRequestContext(method="POST", url_path="/path/",
body_params={"param": "value"}, renderer=FormRenderer())
res_context = auth.apply_authentication(context=context)
self.assertNotEqual("11PATHS 123456 8Ok3S1xUFLtjRxRkWVoZAKXZc1A=", res_context.headers["Authorization"])
def test_11paths_authentication_form_multi(self):
auth = X11PathsAuthentication(app_id="QRKJw6qX4fykZ3G3yqkQ", secret="eHkAXTebECWBs4TtNbNMBYC99AzMrmaydUWcUFEM",
utc="2016-12-12 11:18:45")
context = HttpRequestContext(method="POST", url_path="/api/0.1/vulnerabilities/15fc104c-dc55-41d4-8d4e-4d76eda7a029/consequences",
body_params={"consequence.scopes[]": "1",
"consequence.impact[]": "1",
"consequence.description[es]": "test",
"consequence.description[en]": "test"},
renderer=FormRenderer())
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS QRKJw6qX4fykZ3G3yqkQ CMf3royzdD4l/P0RVKyr2uOXZ4Y=", res_context.headers[AUTHORIZATION_HEADER_NAME])
def test_11paths_authentication_class_json(self):
auth = X11PathsAuthentication(app_id="123456", secret="654321", utc="2016-01-01 00:00:00")
context = HttpRequestContext(method="POST", url_path="/path/", headers={"Content-Type": "application/json"},
body_params={"param": "value"}, renderer=JSONRenderer())
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 123456 6CFsVmrRxEz3Icz6U8SSHZ4RukE=", res_context.headers[AUTHORIZATION_HEADER_NAME])
self.assertEqual("f247c7579b452d08f38eec23c2d1a4a23daee0d2", res_context.headers[X_11PATHS_BODY_HASH_HEADER_NAME])
def test_11paths_authentication_class_json_ignorecase_header_name(self):
auth = X11PathsAuthentication(app_id="123456", secret="654321", utc="2016-01-01 00:00:00")
context = HttpRequestContext(method="POST", url_path="/path/", headers={"Content-type": "application/json"},
body_params={"param": "value"}, renderer=JSONRenderer())
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 123456 6CFsVmrRxEz3Icz6U8SSHZ4RukE=", res_context.headers[AUTHORIZATION_HEADER_NAME])
self.assertEqual("f247c7579b452d08f38eec23c2d1a4a23daee0d2", res_context.headers[X_11PATHS_BODY_HASH_HEADER_NAME])
def test_11paths_authentication_get_serialized_headers(self):
serializer_headers = _get_11paths_serialized_headers(
{
"X-11Paths-profile-id": "77ed609a-1a9b-4c16-97c2-ba32f72f5499",
"X-11paths-file-hash": "a30d2aef3f9da7f3273100bb7d412ccedb4c481f"
}
)
self.assertEqual(
"x-11paths-file-hash:a30d2aef3f9da7f3273100bb7d412ccedb4c481f x-11paths-profile-id:77ed609a-1a9b-4c16-97c2-ba32f72f5499",
serializer_headers
)
def test_11paths_authentication_class_multiples_headers(self):
auth = X11PathsAuthentication(app_id="2kNhWLEETQ46KWLnAg48", secret="lBc4BSeqCGkidJZXictc3yiHbKBS87hjE05YrswJ",
utc="2017-01-27 08:27:44")
context = HttpRequestContext(method="POST", url_path="/ExternalApi/CleanFile",
renderer=MultiPartRenderer(),
headers={"X-11paths-profile-id": "77ed609a-1a9b-4c16-97c2-ba32f72f5499",
"Content-Type": "multipart/form-data"},
files={"file": "tests/resources/file.png"})
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 2kNhWLEETQ46KWLnAg48 8/fuEv9NLn41ikh96hRHMFGs1ww=", res_context.headers["Authorization"])
def test_11paths_authentication_post_empty_body_params(self):
auth = X11PathsAuthentication(app_id="2kNhWLEETQ46KWLnAg48", secret="lBc4BSeqCGkidJZXictc3yiHbKBS87hjE05YrswJ",
utc="2017-01-27 08:27:44")
context = HttpRequestContext(method="POST", url_path="/ExternalApi/CleanFile",
renderer=JSONRenderer())
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 2kNhWLEETQ46KWLnAg48 atYkLRYJ3b+CXU+GdklyALAr9NE=",
res_context.headers["Authorization"])
self.assertNotIn(X_11PATHS_BODY_HASH_HEADER_NAME, res_context.headers)
self.assertEqual("application/x-www-form-urlencoded", res_context.headers["Content-Type"])
def test_11paths_authentication_post_json_empty_body_params(self):
auth = X11PathsAuthentication(app_id="2kNhWLEETQ46KWLnAg48", secret="lBc4BSeqCGkidJZXictc3yiHbKBS87hjE05YrswJ",
utc="2017-01-27 08:27:44")
context = HttpRequestContext(method="POST", url_path="/ExternalApi/CleanFile",
headers={"Content-Type": "application/json"})
res_context = auth.apply_authentication(context=context)
self.assertEqual("11PATHS 2kNhWLEETQ46KWLnAg48 u/91oWtEs2qkco5v6JXcfWx+FJ0=",
res_context.headers["Authorization"])
self.assertEqual("application/json", res_context.headers["Content-Type"])
self.assertEqual("da39a3ee5e6b4b0d3255bfef95601890afd80709", res_context.headers["X-11paths-body-hash"])
| 8,676 | 22 | 428 |
d21772b27dff2b87a5479b3616be305201f252f1 | 746 | py | Python | screens/titlescreen.py | JOSBEAK/HangMan-Project | 07233d4a44b3bdaedb1615f0b92d48e5fef50f5b | [
"MIT"
] | 16 | 2021-08-31T04:00:51.000Z | 2022-02-11T00:35:35.000Z | screens/titlescreen.py | JOSBEAK/HangMan-Project | 07233d4a44b3bdaedb1615f0b92d48e5fef50f5b | [
"MIT"
] | null | null | null | screens/titlescreen.py | JOSBEAK/HangMan-Project | 07233d4a44b3bdaedb1615f0b92d48e5fef50f5b | [
"MIT"
] | 1 | 2021-09-25T07:05:07.000Z | 2021-09-25T07:05:07.000Z | from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.animation import Animation
from kivymd.app import MDApp
Builder.load_file('screens/titlescreen.kv')
| 24.064516 | 53 | 0.675603 | from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import NumericProperty
from kivy.animation import Animation
from kivymd.app import MDApp
Builder.load_file('screens/titlescreen.kv')
class TitleScreen(Screen):
col_offset = NumericProperty(0)
def __init__(self, **kwargs):
super().__init__(**kwargs)
anim = Animation(col_offset=1)
anim += Animation(col_offset=0)
anim.repeat = True
anim.start(self)
def on_touch_down(self,touch):
if touch.is_double_tap:
self.change_screen()
def change_screen(self):
app_root = MDApp.get_running_app().root
setattr(app_root, 'current', '_main_screen_')
| 355 | 134 | 23 |
b302e5672149a086c52e8d8846803dd6e1acf46d | 2,555 | py | Python | src/analysis.py | MaikelVeen/abstract-analysis | ae1739bdd2f28abd76a24867a0abe1f4c1fe1c61 | [
"MIT"
] | null | null | null | src/analysis.py | MaikelVeen/abstract-analysis | ae1739bdd2f28abd76a24867a0abe1f4c1fe1c61 | [
"MIT"
] | null | null | null | src/analysis.py | MaikelVeen/abstract-analysis | ae1739bdd2f28abd76a24867a0abe1f4c1fe1c61 | [
"MIT"
] | null | null | null | import abstract_parser
import os
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import argparse
import pandas as pd
from datetime import datetime
# TODO: read these from a file
COMMON = ['background', 'objetctive', 'objectives', 'introduction',
'methods', 'method', 'importance', 'results', 'result', 'aim', 'aims' 'i', 'comment', 'on', 'in']
if __name__ == "__main__":
nltk.download('stopwords')
folder_path = _parse_argument()
file_paths = []
for (dirpath, _, filenames) in os.walk(folder_path):
for f in filenames:
file_paths.append(os.path.abspath(os.path.join(dirpath, f)))
for file_path in file_paths:
data = abstract_parser.parse_file(file_path)
_process_abstracts(data)
| 28.707865 | 107 | 0.678278 | import abstract_parser
import os
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import argparse
import pandas as pd
from datetime import datetime
# TODO: read these from a file
COMMON = ['background', 'objetctive', 'objectives', 'introduction',
'methods', 'method', 'importance', 'results', 'result', 'aim', 'aims' 'i', 'comment', 'on', 'in']
def _parse_argument():
parser = argparse.ArgumentParser(description='Abstract Text Analysis')
parser.add_argument("--f", "--folder", required=True)
args = parser.parse_args()
return args.f
def _preprocess_abstract(abstract):
tokens = nltk.word_tokenize(abstract)
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words] # Remove stop words
tokens = [w.lower() for w in tokens if w.isalpha()] # Only words lowercase
tokens = [w for w in tokens if not w in COMMON] # Remove common words
return tokens
def _get_wordbags(abstracts):
ab_wordbags = []
for abstract in abstracts:
ab_wordbags.append(_preprocess_abstract(abstract))
return ab_wordbags
def _process_abstracts(dataframe):
abstracts = dataframe['Abstract']
total_count = abstracts.size
ab_wordbags = _get_wordbags(abstracts)
corpus = _get_uniquewords(ab_wordbags)
dictionary = dict.fromkeys(corpus, 0)
# Get count for each word by checking each word bag
for word in dictionary:
for bag in ab_wordbags:
if word in bag:
dictionary[word] += 1
percentages = []
for word, count in dictionary.items():
percentages.append((count / total_count) * 100)
# Create dataframe and export
d = {'Word': dictionary.keys(), 'Count': dictionary.values(),
'Representation': percentages}
result_df = pd.DataFrame.from_dict(d)
result_df.to_csv(
f'word-export-{datetime.now().strftime("%H%M%S")}.csv', sep=';', index=False, header=False)
def _get_uniquewords(wordbags):
unique_words = set()
for bag in wordbags:
unique_words = unique_words.union(set(bag))
return unique_words
if __name__ == "__main__":
nltk.download('stopwords')
folder_path = _parse_argument()
file_paths = []
for (dirpath, _, filenames) in os.walk(folder_path):
for f in filenames:
file_paths.append(os.path.abspath(os.path.join(dirpath, f)))
for file_path in file_paths:
data = abstract_parser.parse_file(file_path)
_process_abstracts(data)
| 1,644 | 0 | 115 |
87738c69fde8d7df5d43cc853b36f2231d54619d | 34,713 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/modules/vyos_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/modules/vyos_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/modules/vyos_interfaces.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: vyos_interfaces
short_description: Interfaces resource module
description:
- This module manages the interface attributes on VyOS network devices.
- This module supports managing base attributes of Ethernet, Bonding, VXLAN, Loopback
and Virtual Tunnel Interfaces.
version_added: 1.0.0
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author:
- Nilashish Chakraborty (@nilashishc)
- Rohit Thakur (@rohitthakur2590)
options:
config:
description: The provided interfaces configuration.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface, e.g. eth0, eth1, bond0, vti1, vxlan2.
type: str
required: true
description:
description:
- Interface description.
type: str
duplex:
description:
- Interface duplex mode.
- Applicable for Ethernet interfaces only.
choices:
- full
- half
- auto
type: str
enabled:
default: true
description:
- Administrative state of the interface.
- Set the value to C(true) to administratively enable the interface or C(false)
to disable it.
type: bool
mtu:
description:
- MTU for a specific interface. Refer to vendor documentation for valid values.
- Applicable for Ethernet, Bonding, VXLAN and Virtual Tunnel interfaces.
type: int
speed:
description:
- Interface link speed.
- Applicable for Ethernet interfaces only.
type: str
choices:
- auto
- '10'
- '100'
- '1000'
- '2500'
- '10000'
vifs:
description:
- Virtual sub-interfaces related configuration.
- 802.1Q VLAN interfaces are represented as virtual sub-interfaces in VyOS.
type: list
elements: dict
suboptions:
vlan_id:
description:
- Identifier for the virtual sub-interface.
type: int
description:
description:
- Virtual sub-interface description.
type: str
enabled:
description:
- Administrative state of the virtual sub-interface.
- Set the value to C(true) to administratively enable the interface or
C(false) to disable it.
type: bool
default: true
mtu:
description:
- MTU for the virtual sub-interface.
- Refer to vendor documentation for valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the VyOS device
by executing the command B(show configuration commands | grep interfaces).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- rendered
- gathered
- parsed
default: merged
"""
EXAMPLES = """
# Using merged
#
# -------------
# Before state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
- name: Merge provided configuration with device configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Configured by Ansible
enabled: true
vifs:
- vlan_id: 200
description: VIF 200 - ETH2
- name: eth3
description: Configured by Ansible
mtu: 1500
- name: bond1
description: Bond - 1
mtu: 1200
- name: vti2
description: VTI - 2
enabled: false
state: merged
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "set interfaces ethernet eth2 description 'Configured by Ansible'",
# "set interfaces ethernet eth2 vif 200",
# "set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'",
# "set interfaces ethernet eth3 description 'Configured by Ansible'",
# "set interfaces ethernet eth3 mtu '1500'",
# "set interfaces bonding bond1",
# "set interfaces bonding bond1 description 'Bond - 1'",
# "set interfaces bonding bond1 mtu '1200'",
# "set interfaces vti vti2",
# "set interfaces vti vti2 description 'VTI - 2'",
# "set interfaces vti vti2 disable"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1200,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "VTI - 2",
# "enabled": false,
# "name": "vti2"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1200'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces loopback lo
# set interfaces vti vti2 description 'VTI - 2'
# set interfaces vti vti2 disable
#
# Using replaced
#
# -------------
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:f3:6c:b5'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ad:ef:65'
# set interfaces ethernet eth1 smp_affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:ab:4e:79'
# set interfaces ethernet eth2 mtu '500'
# set interfaces ethernet eth2 smp_affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth2 vif 200 description 'Configured by Ansible'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:17:3c:85'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces ethernet eth3 smp_affinity 'auto'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Replace device configurations of listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Replaced by Ansible
- name: eth3
description: Replaced by Ansible
- name: eth1
description: Replaced by Ansible
state: replaced
#
#
# -----------------------
# Module Execution Result
# -----------------------
#
# "before": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 500,
# "name": "eth2",
# "speed": "100",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Configured by Ansible Eng Team",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 vif 200 description",
# "set interfaces ethernet eth2 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 mtu",
# "set interfaces ethernet eth3 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "set interfaces ethernet eth1 description 'Replaced by Ansible'"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Replaced by Ansible'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Replaced by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200
# set interfaces ethernet eth3 description 'Replaced by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using overridden
#
#
# --------------
# Before state
# --------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Ethernet Interface - 0'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 mtu '1200'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 mtu '100'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100 description 'VIF 100 - ETH1'
# set interfaces ethernet eth1 vif 100 disable
# set interfaces ethernet eth2 description 'Configured by Ansible Team (Admin Down)'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
# set interfaces vti vti1 description 'Virtual Tunnel Interface - 1'
# set interfaces vti vti1 mtu '68'
#
#
- name: Overrides all device configuration with provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
description: Outbound Interface For The Appliance
speed: auto
duplex: auto
- name: eth2
speed: auto
duplex: auto
- name: eth3
mtu: 1200
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Virtual Tunnel Interface - 1",
# "enabled": true,
# "mtu": 68,
# "name": "vti1"
# },
# {
# "description": "Configured by Ansible Network",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible Team (Admin Down)",
# "enabled": false,
# "mtu": 600,
# "name": "eth2"
# },
# {
# "description": "Configured by Ansible Eng Team",
# "enabled": true,
# "mtu": 100,
# "name": "eth1",
# "vifs": [
# {
# "description": "VIF 100 - ETH1",
# "enabled": false,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Ethernet Interface - 0",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1200,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces vti vti1 description",
# "delete interfaces vti vti1 mtu",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth1 mtu",
# "delete interfaces ethernet eth1 vif 100 description",
# "delete interfaces ethernet eth1 vif 100 disable",
# "delete interfaces ethernet eth0 mtu",
# "set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 mtu",
# "set interfaces ethernet eth2 duplex 'auto'",
# "delete interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 speed 'auto'",
# "delete interfaces ethernet eth3 description",
# "set interfaces ethernet eth3 mtu '1200'"
# ],
#
# "after": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "vti1"
# },
# {
# "enabled": true,
# "mtu": 1200,
# "name": "eth3"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth2",
# "speed": "auto"
# },
# {
# "enabled": true,
# "name": "eth1",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Outbound Interface For The Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1200'
# set interfaces loopback lo
# set interfaces vti vti1
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1 description 'LAG - 1'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Network'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Delete attributes of given interfaces (Note - This won't delete the interfaces
themselves)
vyos.vyos.vyos_interfaces:
config:
- name: bond1
- name: eth1
- name: eth2
- name: eth3
state: deleted
#
#
# ------------------------
# Module Execution Results
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "description": "LAG - 1",
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": false,
# "mtu": 600,
# "name": "eth2",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces bonding bond1 description",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 description"
# ]
#
# "after": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using gathered
#
# Before state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
#
- name: Gather listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
state: gathered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "gathered": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
# Using rendered
#
#
- name: Render the commands for provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
enabled: true
duplex: auto
speed: auto
- name: eth1
description: Configured by Ansible - Interface 1
mtu: 1500
speed: auto
duplex: auto
enabled: true
vifs:
- vlan_id: 100
description: Eth1 - VIF 100
mtu: 400
enabled: true
- vlan_id: 101
description: Eth1 - VIF 101
enabled: true
- name: eth2
description: Configured by Ansible - Interface 2 (ADMIN DOWN)
mtu: 600
enabled: false
state: rendered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "rendered": [
# "set interfaces ethernet eth0 duplex 'auto'",
# "set interfaces ethernet eth0 speed 'auto'",
# "delete interfaces ethernet eth0 disable",
# "set interfaces ethernet eth1 duplex 'auto'",
# "delete interfaces ethernet eth1 disable",
# "set interfaces ethernet eth1 speed 'auto'",
# "set interfaces ethernet eth1 description 'Configured by Ansible - Interface 1'",
# "set interfaces ethernet eth1 mtu '1500'",
# "set interfaces ethernet eth1 vif 100 description 'Eth1 - VIF 100'",
# "set interfaces ethernet eth1 vif 100 mtu '400'",
# "set interfaces ethernet eth1 vif 101 description 'Eth1 - VIF 101'",
# "set interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 description 'Configured by Ansible - Interface 2 (ADMIN DOWN)'",
# "set interfaces ethernet eth2 mtu '600'"
# ]
# Using parsed
#
#
- name: Parse the configuration.
vyos.vyos.vyos_interfaces:
running_config:
"set interfaces ethernet eth0 address 'dhcp'
set interfaces ethernet eth0 duplex 'auto'
set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
set interfaces ethernet eth0 smp_affinity 'auto'
set interfaces ethernet eth0 speed 'auto'
set interfaces ethernet eth1 description 'Configured by Ansible'
set interfaces ethernet eth1 duplex 'auto'
set interfaces ethernet eth1 mtu '1500'
set interfaces ethernet eth1 speed 'auto'
set interfaces ethernet eth1 vif 200 description 'VIF - 200'
set interfaces ethernet eth2 description 'Configured by Ansible'
set interfaces ethernet eth2 duplex 'auto'
set interfaces ethernet eth2 mtu '1500'
set interfaces ethernet eth2 speed 'auto'
set interfaces ethernet eth2 vif 200 description 'VIF - 200'"
state: parsed
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "parsed": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The configuration as structured data after module completion.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- 'set interfaces ethernet eth1 mtu 1200'
- 'set interfaces ethernet eth2 vif 100 description VIF 100'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.interfaces.interfaces import (
InterfacesArgs,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.interfaces.interfaces import (
Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "overridden", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=InterfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 30.584141 | 129 | 0.569758 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: vyos_interfaces
short_description: Interfaces resource module
description:
- This module manages the interface attributes on VyOS network devices.
- This module supports managing base attributes of Ethernet, Bonding, VXLAN, Loopback
and Virtual Tunnel Interfaces.
version_added: 1.0.0
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author:
- Nilashish Chakraborty (@nilashishc)
- Rohit Thakur (@rohitthakur2590)
options:
config:
description: The provided interfaces configuration.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface, e.g. eth0, eth1, bond0, vti1, vxlan2.
type: str
required: true
description:
description:
- Interface description.
type: str
duplex:
description:
- Interface duplex mode.
- Applicable for Ethernet interfaces only.
choices:
- full
- half
- auto
type: str
enabled:
default: true
description:
- Administrative state of the interface.
- Set the value to C(true) to administratively enable the interface or C(false)
to disable it.
type: bool
mtu:
description:
- MTU for a specific interface. Refer to vendor documentation for valid values.
- Applicable for Ethernet, Bonding, VXLAN and Virtual Tunnel interfaces.
type: int
speed:
description:
- Interface link speed.
- Applicable for Ethernet interfaces only.
type: str
choices:
- auto
- '10'
- '100'
- '1000'
- '2500'
- '10000'
vifs:
description:
- Virtual sub-interfaces related configuration.
- 802.1Q VLAN interfaces are represented as virtual sub-interfaces in VyOS.
type: list
elements: dict
suboptions:
vlan_id:
description:
- Identifier for the virtual sub-interface.
type: int
description:
description:
- Virtual sub-interface description.
type: str
enabled:
description:
- Administrative state of the virtual sub-interface.
- Set the value to C(true) to administratively enable the interface or
C(false) to disable it.
type: bool
default: true
mtu:
description:
- MTU for the virtual sub-interface.
- Refer to vendor documentation for valid values.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the VyOS device
by executing the command B(show configuration commands | grep interfaces).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- rendered
- gathered
- parsed
default: merged
"""
EXAMPLES = """
# Using merged
#
# -------------
# Before state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
- name: Merge provided configuration with device configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Configured by Ansible
enabled: true
vifs:
- vlan_id: 200
description: VIF 200 - ETH2
- name: eth3
description: Configured by Ansible
mtu: 1500
- name: bond1
description: Bond - 1
mtu: 1200
- name: vti2
description: VTI - 2
enabled: false
state: merged
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "set interfaces ethernet eth2 description 'Configured by Ansible'",
# "set interfaces ethernet eth2 vif 200",
# "set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'",
# "set interfaces ethernet eth3 description 'Configured by Ansible'",
# "set interfaces ethernet eth3 mtu '1500'",
# "set interfaces bonding bond1",
# "set interfaces bonding bond1 description 'Bond - 1'",
# "set interfaces bonding bond1 mtu '1200'",
# "set interfaces vti vti2",
# "set interfaces vti vti2 description 'VTI - 2'",
# "set interfaces vti vti2 disable"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1200,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "VTI - 2",
# "enabled": false,
# "name": "vti2"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1200'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF 200 - ETH2'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces loopback lo
# set interfaces vti vti2 description 'VTI - 2'
# set interfaces vti vti2 disable
#
# Using replaced
#
# -------------
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:f3:6c:b5'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ad:ef:65'
# set interfaces ethernet eth1 smp_affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:ab:4e:79'
# set interfaces ethernet eth2 mtu '500'
# set interfaces ethernet eth2 smp_affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth2 vif 200 description 'Configured by Ansible'
# set interfaces ethernet eth3 description 'Configured by Ansible'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:17:3c:85'
# set interfaces ethernet eth3 mtu '1500'
# set interfaces ethernet eth3 smp_affinity 'auto'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Replace device configurations of listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
- name: eth2
description: Replaced by Ansible
- name: eth3
description: Replaced by Ansible
- name: eth1
description: Replaced by Ansible
state: replaced
#
#
# -----------------------
# Module Execution Result
# -----------------------
#
# "before": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 1500,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": true,
# "mtu": 500,
# "name": "eth2",
# "speed": "100",
# "vifs": [
# {
# "description": "VIF 200 - ETH2",
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Configured by Ansible Eng Team",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 vif 200 description",
# "set interfaces ethernet eth2 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 mtu",
# "set interfaces ethernet eth3 description 'Replaced by Ansible'",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "set interfaces ethernet eth1 description 'Replaced by Ansible'"
# ]
#
# "after": [
# {
# "description": "Bond - 1",
# "enabled": true,
# "mtu": 1400,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth2",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "200"
# }
# ]
# },
# {
# "description": "Replaced by Ansible",
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Management Interface for the Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# -------------
# After state:
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond1 description 'Bond - 1'
# set interfaces bonding bond1 mtu '1400'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Management Interface for the Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Replaced by Ansible'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 description 'Replaced by Ansible'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 vif 200
# set interfaces ethernet eth3 description 'Replaced by Ansible'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using overridden
#
#
# --------------
# Before state
# --------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Ethernet Interface - 0'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 mtu '1200'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Eng Team'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 mtu '100'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100 description 'VIF 100 - ETH1'
# set interfaces ethernet eth1 vif 100 disable
# set interfaces ethernet eth2 description 'Configured by Ansible Team (Admin Down)'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
# set interfaces vti vti1 description 'Virtual Tunnel Interface - 1'
# set interfaces vti vti1 mtu '68'
#
#
- name: Overrides all device configuration with provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
description: Outbound Interface For The Appliance
speed: auto
duplex: auto
- name: eth2
speed: auto
duplex: auto
- name: eth3
mtu: 1200
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Virtual Tunnel Interface - 1",
# "enabled": true,
# "mtu": 68,
# "name": "vti1"
# },
# {
# "description": "Configured by Ansible Network",
# "enabled": true,
# "name": "eth3"
# },
# {
# "description": "Configured by Ansible Team (Admin Down)",
# "enabled": false,
# "mtu": 600,
# "name": "eth2"
# },
# {
# "description": "Configured by Ansible Eng Team",
# "enabled": true,
# "mtu": 100,
# "name": "eth1",
# "vifs": [
# {
# "description": "VIF 100 - ETH1",
# "enabled": false,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Ethernet Interface - 0",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1200,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces vti vti1 description",
# "delete interfaces vti vti1 mtu",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth1 mtu",
# "delete interfaces ethernet eth1 vif 100 description",
# "delete interfaces ethernet eth1 vif 100 disable",
# "delete interfaces ethernet eth0 mtu",
# "set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 mtu",
# "set interfaces ethernet eth2 duplex 'auto'",
# "delete interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 speed 'auto'",
# "delete interfaces ethernet eth3 description",
# "set interfaces ethernet eth3 mtu '1200'"
# ],
#
# "after": [
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "vti1"
# },
# {
# "enabled": true,
# "mtu": 1200,
# "name": "eth3"
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth2",
# "speed": "auto"
# },
# {
# "enabled": true,
# "name": "eth1",
# "vifs": [
# {
# "enabled": true,
# "vlan_id": "100"
# }
# ]
# },
# {
# "description": "Outbound Interface For The Appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface For The Appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 vif 100
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 mtu '1200'
# set interfaces loopback lo
# set interfaces vti vti1
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1 description 'LAG - 1'
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible Network'
# set interfaces ethernet eth1 duplex 'full'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth1 speed '100'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 disable
# set interfaces ethernet eth2 duplex 'full'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 mtu '600'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth2 speed '100'
# set interfaces ethernet eth3 description 'Configured by Ansible Network'
# set interfaces ethernet eth3 duplex 'full'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 speed '100'
# set interfaces loopback lo
#
#
- name: Delete attributes of given interfaces (Note - This won't delete the interfaces
themselves)
vyos.vyos.vyos_interfaces:
config:
- name: bond1
- name: eth1
- name: eth2
- name: eth3
state: deleted
#
#
# ------------------------
# Module Execution Results
# ------------------------
#
# "before": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "description": "LAG - 1",
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth3",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible",
# "duplex": "full",
# "enabled": false,
# "mtu": 600,
# "name": "eth2",
# "speed": "100"
# },
# {
# "description": "Configured by Ansible Network",
# "duplex": "full",
# "enabled": true,
# "name": "eth1",
# "speed": "100"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
# "commands": [
# "delete interfaces bonding bond1 description",
# "delete interfaces ethernet eth1 speed",
# "delete interfaces ethernet eth1 duplex",
# "delete interfaces ethernet eth1 description",
# "delete interfaces ethernet eth2 speed",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 duplex",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 description",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth2 mtu",
# "delete interfaces ethernet eth2 disable",
# "delete interfaces ethernet eth3 speed",
# "delete interfaces ethernet eth3 duplex",
# "delete interfaces ethernet eth3 description"
# ]
#
# "after": [
# {
# "enabled": true,
# "mtu": 1300,
# "name": "bond0"
# },
# {
# "enabled": true,
# "name": "bond1"
# },
# {
# "enabled": true,
# "name": "lo"
# },
# {
# "enabled": true,
# "name": "eth3"
# },
# {
# "enabled": true,
# "name": "eth2"
# },
# {
# "enabled": true,
# "name": "eth1"
# },
# {
# "description": "Outbound Interface for this appliance",
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# ------------
# After state
# ------------
#
# vyos@vyos:~$ show configuration commands | grep interfaces
# set interfaces bonding bond0 mtu '1300'
# set interfaces bonding bond1
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 description 'Outbound Interface for this appliance'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ea:0f:b9'
# set interfaces ethernet eth1 smp-affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth2 smp-affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces loopback lo
#
#
# Using gathered
#
# Before state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
#
- name: Gather listed interfaces with provided configurations
vyos.vyos.vyos_interfaces:
config:
state: gathered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "gathered": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
#
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep interfaces
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 description 'Configured by Ansible'
# set interfaces ethernet eth1 duplex 'auto'
# set interfaces ethernet eth1 mtu '1500'
# set interfaces ethernet eth1 speed 'auto'
# set interfaces ethernet eth1 vif 200 description 'VIF - 200'
# set interfaces ethernet eth2 description 'Configured by Ansible'
# set interfaces ethernet eth2 duplex 'auto'
# set interfaces ethernet eth2 mtu '1500'
# set interfaces ethernet eth2 speed 'auto'
# set interfaces ethernet eth2 vif 200 description 'VIF - 200'
# Using rendered
#
#
- name: Render the commands for provided configuration
vyos.vyos.vyos_interfaces:
config:
- name: eth0
enabled: true
duplex: auto
speed: auto
- name: eth1
description: Configured by Ansible - Interface 1
mtu: 1500
speed: auto
duplex: auto
enabled: true
vifs:
- vlan_id: 100
description: Eth1 - VIF 100
mtu: 400
enabled: true
- vlan_id: 101
description: Eth1 - VIF 101
enabled: true
- name: eth2
description: Configured by Ansible - Interface 2 (ADMIN DOWN)
mtu: 600
enabled: false
state: rendered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "rendered": [
# "set interfaces ethernet eth0 duplex 'auto'",
# "set interfaces ethernet eth0 speed 'auto'",
# "delete interfaces ethernet eth0 disable",
# "set interfaces ethernet eth1 duplex 'auto'",
# "delete interfaces ethernet eth1 disable",
# "set interfaces ethernet eth1 speed 'auto'",
# "set interfaces ethernet eth1 description 'Configured by Ansible - Interface 1'",
# "set interfaces ethernet eth1 mtu '1500'",
# "set interfaces ethernet eth1 vif 100 description 'Eth1 - VIF 100'",
# "set interfaces ethernet eth1 vif 100 mtu '400'",
# "set interfaces ethernet eth1 vif 101 description 'Eth1 - VIF 101'",
# "set interfaces ethernet eth2 disable",
# "set interfaces ethernet eth2 description 'Configured by Ansible - Interface 2 (ADMIN DOWN)'",
# "set interfaces ethernet eth2 mtu '600'"
# ]
# Using parsed
#
#
- name: Parse the configuration.
vyos.vyos.vyos_interfaces:
running_config:
"set interfaces ethernet eth0 address 'dhcp'
set interfaces ethernet eth0 duplex 'auto'
set interfaces ethernet eth0 hw-id '08:00:27:50:5e:19'
set interfaces ethernet eth0 smp_affinity 'auto'
set interfaces ethernet eth0 speed 'auto'
set interfaces ethernet eth1 description 'Configured by Ansible'
set interfaces ethernet eth1 duplex 'auto'
set interfaces ethernet eth1 mtu '1500'
set interfaces ethernet eth1 speed 'auto'
set interfaces ethernet eth1 vif 200 description 'VIF - 200'
set interfaces ethernet eth2 description 'Configured by Ansible'
set interfaces ethernet eth2 duplex 'auto'
set interfaces ethernet eth2 mtu '1500'
set interfaces ethernet eth2 speed 'auto'
set interfaces ethernet eth2 vif 200 description 'VIF - 200'"
state: parsed
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "parsed": [
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth2",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "description": "Configured by Ansible",
# "duplex": "auto",
# "enabled": true,
# "mtu": 1500,
# "name": "eth1",
# "speed": "auto",
# "vifs": [
# {
# "description": "VIF - 200",
# "enabled": true,
# "vlan_id": 200
# }
# ]
# },
# {
# "duplex": "auto",
# "enabled": true,
# "name": "eth0",
# "speed": "auto"
# }
# ]
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
after:
description: The configuration as structured data after module completion.
returned: when changed
sample: >
The configuration returned will always be in the same format
of the parameters above.
type: list
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- 'set interfaces ethernet eth1 mtu 1200'
- 'set interfaces ethernet eth2 vif 100 description VIF 100'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.interfaces.interfaces import (
InterfacesArgs,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.interfaces.interfaces import (
Interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "overridden", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=InterfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |