hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1914ea214453a028d5c02ba5c0227b60cbbf920c
| 1,432
|
py
|
Python
|
Scripts4Orthofinder/renaming_cds_headers_TA.py
|
macmanes-lab/GeosmithiaComparativeGenomics
|
61dfa4d50dece25e9420d8b4cf2a7343489056fd
|
[
"CC0-1.0"
] | 1
|
2017-07-10T16:49:45.000Z
|
2017-07-10T16:49:45.000Z
|
Scripts4Orthofinder/renaming_cds_headers_TA.py
|
yuzhenpeng/GeosmithiaComparativeGenomics
|
61dfa4d50dece25e9420d8b4cf2a7343489056fd
|
[
"CC0-1.0"
] | 1
|
2018-09-30T00:17:31.000Z
|
2018-09-30T00:17:31.000Z
|
Scripts4Orthofinder/renaming_cds_headers_TA.py
|
yuzhenpeng/GeosmithiaComparativeGenomics
|
61dfa4d50dece25e9420d8b4cf2a7343489056fd
|
[
"CC0-1.0"
] | 3
|
2016-01-23T13:18:37.000Z
|
2021-06-17T13:48:59.000Z
|
#!/usr/bin/python3
# A program for renaming fasta headers
# USAGE: ./renaming_headers_TA.py --input path_to_input_directory
# Author: Taruna Aggarwal
# Affiliation: University of New Hampshire, Durham, NH, USA
# Date: 1/27/2016
# Purpose is to replace headers with their corresponding file names
# and add consecutive numbers to the new headers
# This script assumes there is a 1:1 relationship between your cds and pep files
# and that your cds files are named in a specific manner - SpeciesInitial_genusName.cds and an example is F_solani.cds
# The script will generate new files in the same directory as itself.
import argparse
import os
parser = argparse.ArgumentParser(description="This script renames files and their headers in a directory.")
parser.add_argument('--input', help="PATH to the directory with input files.", required=True)
args = parser.parse_args()
for file in os.listdir(args.input):
if file.endswith(".cds"):
working_file = open(args.input + '/' + file, "r")
new_file = open(file[:-4] + "_renamed.cds", "w")
print("Renaming {0}".format(file))
counter = 1
for currentLine in working_file:
currentLine = currentLine.rstrip()
if currentLine.startswith(">"):
new_file.write("{0}_{1}\n".format((file[:-4]), counter))
counter += 1
else:
new_file.write("{0}\n".format(currentLine))
| 39.777778
| 118
| 0.683659
|
567af8aebcf404e04e9d6d97ba0de7fbb63a13db
| 2,292
|
py
|
Python
|
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={"VideoClassificationPredictionInstance",},
)
class VideoClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Video Classification.
Attributes:
content (str):
The Google Cloud Storage location of the
video on which to perform the prediction.
mime_type (str):
The MIME type of the content of the video.
Only the following are supported: video/mp4
video/avi video/quicktime
time_segment_start (str):
The beginning, inclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision.
time_segment_end (str):
The end, exclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision, and "inf" or "Infinity"
is allowed, which means the end of the video.
"""
content = proto.Field(proto.STRING, number=1,)
mime_type = proto.Field(proto.STRING, number=2,)
time_segment_start = proto.Field(proto.STRING, number=3,)
time_segment_end = proto.Field(proto.STRING, number=4,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 38.2
| 74
| 0.67801
|
d796592a9dbc404e04f2ab4cdfe7c60bb100e5ad
| 202
|
py
|
Python
|
GeneratorInterface/GenFilters/python/BCToEFilter_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
GeneratorInterface/GenFilters/python/BCToEFilter_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
GeneratorInterface/GenFilters/python/BCToEFilter_cfi.py
|
menglu21/cmssw
|
c3d6cb102c0aaddf652805743370c28044d53da6
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
bctoefilter = cms.EDFilter("BCToEFilter",
filterAlgoPSet = cms.PSet(
maxAbsEta = cms.double(3.05),
eTThreshold = cms.double(10.0)
)
)
| 18.363636
| 41
| 0.658416
|
a68806eef7261d87a879fa40b52c88f47973860b
| 2,034
|
py
|
Python
|
Day 8/PROJECT: Caesar Cipher.py
|
Nishi-16-K/100DaysCodeChallenge-Python-
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | 1
|
2021-08-29T12:44:23.000Z
|
2021-08-29T12:44:23.000Z
|
Day 8/PROJECT: Caesar Cipher.py
|
Nishi-16-K/100DaysofCodeChallenge-Python
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | null | null | null |
Day 8/PROJECT: Caesar Cipher.py
|
Nishi-16-K/100DaysofCodeChallenge-Python
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | null | null | null |
import math
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def caesar(t_text, shift_amount, direct):
l_text = ""
if direct == "decode":
shift_amount *= -1
for i in l_text:
if i in alphabet:
position = alphabet.index(i)
new_position = position + shift_amount
l_text += alphabet[new_position]
else:
l_text = l_text+i
print(f"\nThe {direct}d result: {l_text}")
logo = """
,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba,
a8" "" "" `Y8 a8P_____88 I8[ "" "" `Y8 88P' "Y8
8b ,adPPPPP88 8PP""""""" `"Y8ba, ,adPPPPP88 88
"8a, ,aa 88, ,88 "8b, ,aa aa ]8I 88, ,88 88
`"Ybbd8"' `"8bbdP"Y8 `"Ybbd8"' `"YbbdP"' `"8bbdP"Y8 88
88 88
"" 88
88
,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba,
a8" "" 88 88P' "8a 88P' "8a a8P_____88 88P' "Y8
8b 88 88 d8 88 88 8PP""""""" 88
"8a, ,aa 88 88b, ,a8" 88 88 "8b, ,aa 88
`"Ybbd8"' 88 88`YbbdP"' 88 88 `"Ybbd8"' 88
88
88
"""
print(logo)
s_continue = True
while s_continue:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt: ")
text = input("\nType your message: ").lower()
shift = int(input("\nType the shift number: "))
shift = shift % 26
caesar(t_text=text, shift_amount=shift, direct=direction)
result = input("Type 'yes' if you want to go again. Otherwise type 'no'")
if result == "no":
s_continue = False
print("Bye. See You")
| 42.375
| 271
| 0.436087
|
37407c8b813d73c81e4cc168f0458556bc2b2f9e
| 1,224
|
py
|
Python
|
testapp/testapp/urls.py
|
shizus/django-afip
|
ee2f13923eea1c341d58fe1cd45c8a5736242e22
|
[
"ISC"
] | 1
|
2019-10-23T02:59:16.000Z
|
2019-10-23T02:59:16.000Z
|
testapp/testapp/urls.py
|
shizus/django-afip
|
ee2f13923eea1c341d58fe1cd45c8a5736242e22
|
[
"ISC"
] | null | null | null |
testapp/testapp/urls.py
|
shizus/django-afip
|
ee2f13923eea1c341d58fe1cd45c8a5736242e22
|
[
"ISC"
] | null | null | null |
"""testapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.views.static import serve
from django_afip import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(
r'^invoices/pdf/(?P<pk>\d+)$',
views.ReceiptPDFView.as_view(),
name='receipt_displaypdf_view',
),
url(
r'^invoices/pdf/(?P<pk>\d+)$',
views.ReceiptPDFDownloadView.as_view(),
name='receipt_pdf_view',
),
url(
r'^media/(?P<path>.*)$',
serve,
{'document_root': settings.MEDIA_ROOT},
),
]
| 29.853659
| 77
| 0.654412
|
b8596f10d86a62b0ed3bd16adff1b50b04b21849
| 254
|
py
|
Python
|
nc/lookups.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 25
|
2015-09-12T23:10:52.000Z
|
2021-03-24T08:39:46.000Z
|
nc/lookups.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 159
|
2015-07-01T03:57:23.000Z
|
2021-04-17T21:09:19.000Z
|
nc/lookups.py
|
copelco/NC-Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 8
|
2015-10-02T16:56:40.000Z
|
2020-10-18T01:16:29.000Z
|
from selectable.base import ModelLookup
from selectable.registry import registry
from nc.models import Agency
class AgencyLookup(ModelLookup):
model = Agency
search_fields = (
'name__icontains',
)
registry.register(AgencyLookup)
| 16.933333
| 40
| 0.748031
|
fa15c47ab1862a3b40dba9fc0294479fb7df7e8e
| 3,977
|
py
|
Python
|
awx/main/utils/ansible.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 1
|
2019-10-22T09:55:28.000Z
|
2019-10-22T09:55:28.000Z
|
awx/main/utils/ansible.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 2
|
2021-01-06T09:40:57.000Z
|
2022-03-02T09:54:55.000Z
|
awx/main/utils/ansible.py
|
Avinesh/awx
|
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
|
[
"Apache-2.0"
] | 1
|
2020-01-28T05:34:09.000Z
|
2020-01-28T05:34:09.000Z
|
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
# Python
import codecs
import re
import os
import logging
from itertools import islice
from configparser import ConfigParser
# Django
from django.utils.encoding import smart_str
logger = logging.getLogger('awx.main.utils.ansible')
__all__ = ['skip_directory', 'could_be_playbook', 'could_be_inventory']
valid_playbook_re = re.compile(r'^\s*?-?\s*?(?:hosts|include|import_playbook):\s*?.*?$')
valid_inventory_re = re.compile(r'^[a-zA-Z0-9_.=\[\]]')
def skip_directory(relative_directory_path):
path_elements = relative_directory_path.split(os.sep)
# Exclude files in a roles subdirectory.
if 'roles' in path_elements:
return True
# Filter files in a tasks subdirectory.
if 'tasks' in path_elements:
return True
for element in path_elements:
# Do not include dot files or dirs
if element.startswith('.'):
return True
# Exclude anything inside of group or host vars directories
if 'group_vars' in path_elements or 'host_vars' in path_elements:
return True
return False
def could_be_playbook(project_path, dir_path, filename):
if os.path.splitext(filename)[-1] not in ['.yml', '.yaml']:
return None
playbook_path = os.path.join(dir_path, filename)
# Filter files that do not have either hosts or top-level
# includes. Use regex to allow files with invalid YAML to
# show up.
matched = False
try:
for n, line in enumerate(codecs.open(
playbook_path,
'r',
encoding='utf-8',
errors='ignore'
)):
if valid_playbook_re.match(line):
matched = True
break
# Any YAML file can also be encrypted with vault;
# allow these to be used as the main playbook.
elif n == 0 and line.startswith('$ANSIBLE_VAULT;'):
matched = True
break
except IOError:
return None
if not matched:
return None
return os.path.relpath(playbook_path, smart_str(project_path))
def could_be_inventory(project_path, dir_path, filename):
# Decisions based exclusively on filename
inventory_path = os.path.join(dir_path, filename)
inventory_rel_path = os.path.relpath(inventory_path, smart_str(project_path))
suspected_ext = os.path.splitext(filename)[-1]
if filename in ['inventory', 'hosts']:
# Users commonly name their inventory files these names
return inventory_rel_path
elif suspected_ext == '.ini' or os.access(inventory_path, os.X_OK):
# Files with any of these extensions are always included
return inventory_rel_path
elif '.' in suspected_ext:
# If not using those extensions, inventory must have _no_ extension
return None
# Filter files that do not use a character set consistent with
# Ansible inventory mainly
try:
# only read through first 10 lines for performance
with codecs.open(
inventory_path,
'r',
encoding='utf-8',
errors='ignore'
) as inv_file:
for line in islice(inv_file, 10):
if not valid_inventory_re.match(line):
return None
except IOError:
return None
return inventory_rel_path
def read_ansible_config(project_path, variables_of_interest):
fnames = ['/etc/ansible/ansible.cfg']
if project_path:
fnames.insert(0, os.path.join(project_path, 'ansible.cfg'))
values = {}
try:
parser = ConfigParser()
parser.read(fnames)
if 'defaults' in parser:
for var in variables_of_interest:
if var in parser['defaults']:
values[var] = parser['defaults'][var]
except Exception:
logger.exception('Failed to read ansible configuration(s) {}'.format(fnames))
return values
| 32.867769
| 88
| 0.644456
|
e99066c8cdf1ab45bfaa4446e1a2efe4691be8dc
| 651
|
py
|
Python
|
LeetCode/0110_balanced_binary_tree.py
|
LenartBucar/PythonAlgorithms
|
7256a9e3bb71d37614c8cfa7bbb6d011ac456ae3
|
[
"MIT"
] | 144
|
2020-09-13T22:54:57.000Z
|
2022-02-24T21:54:25.000Z
|
LeetCode/0110_balanced_binary_tree.py
|
theGreenJedi/PythonAlgorithms
|
72e0608ba0ce41229653e75f0b03ea212be8f88e
|
[
"MIT"
] | 587
|
2020-05-06T18:55:07.000Z
|
2021-09-20T13:14:53.000Z
|
LeetCode/0110_balanced_binary_tree.py
|
theGreenJedi/PythonAlgorithms
|
72e0608ba0ce41229653e75f0b03ea212be8f88e
|
[
"MIT"
] | 523
|
2020-09-09T12:07:13.000Z
|
2022-02-24T21:54:31.000Z
|
#class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
def getHeight(root):
if root is None:
return 0
left_height, right_height = \
getHeight(root.left), getHeight(root.right)
if left_height < 0 or right_height < 0 or \
abs(left_height - right_height) > 1:
return -1
return max(left_height, right_height) + 1
return (getHeight(root) >= 0)
| 31
| 59
| 0.539171
|
4522c801b6279c5cbd8d964ac5ffdc93112e6513
| 1,074
|
py
|
Python
|
10/adapter_array.py
|
tsalgie/advent_of_code_2020
|
b944a9f04cf7cf24ca74edfa6f1e713b9bae1f11
|
[
"MIT"
] | null | null | null |
10/adapter_array.py
|
tsalgie/advent_of_code_2020
|
b944a9f04cf7cf24ca74edfa6f1e713b9bae1f11
|
[
"MIT"
] | null | null | null |
10/adapter_array.py
|
tsalgie/advent_of_code_2020
|
b944a9f04cf7cf24ca74edfa6f1e713b9bae1f11
|
[
"MIT"
] | null | null | null |
def adapter_array_01(jolts):
differences = [0, 0, 1]
for i in range(len(jolts) - 1):
differences[jolts[i + 1] - jolts[i] - 1] += 1
return differences[0] * differences[2]
def adapter_array_02(jolts):
groups = [True if jolts[i + 2] - jolts[i] <= 3 else False for i in range(len(jolts) - 2)]
group_counts = [0]
for g in groups:
if g:
group_counts[-1] += 1
else:
group_counts += [0] if group_counts[-1] != 0 else []
mult = lambda l : l[0] * mult(l[1:]) if len(l) > 1 else l[0]
valid_subchains = lambda length : len(["{0:b}".format(i) for i in range(pow(2, length)) if '111' not in "{0:b}".format(i)])
return mult(list(map(valid_subchains, group_counts)))
if __name__ == "__main__":
with open('input.txt') as f:
contents = sorted([int(l.strip()) for l in f.readlines()])
with open('output.txt', 'w') as f:
f.write("Part one: {}\n".format(adapter_array_01([0] + contents)))
f.write("Part two: {}\n".format(adapter_array_02([0] + contents + [contents[-1] + 3])))
| 42.96
| 127
| 0.581006
|
b9f4a8d1f9f372c6b78fc54736d833b63d573a58
| 4,087
|
py
|
Python
|
tests/metadata/test_roundtrips.py
|
zeburek/cattrs
|
c1aea4cf3a74cc42c90bf4e8a1e79390a323d774
|
[
"MIT"
] | 3
|
2019-12-13T23:54:38.000Z
|
2021-06-08T18:29:51.000Z
|
tests/metadata/test_roundtrips.py
|
zeburek/cattrs
|
c1aea4cf3a74cc42c90bf4e8a1e79390a323d774
|
[
"MIT"
] | null | null | null |
tests/metadata/test_roundtrips.py
|
zeburek/cattrs
|
c1aea4cf3a74cc42c90bf4e8a1e79390a323d774
|
[
"MIT"
] | null | null | null |
"""Test both structuring and unstructuring."""
from dataclasses import MISSING, dataclass, fields, make_dataclass
from typing import Optional, Union
import pytest
from hypothesis import assume, given
from hypothesis.strategies import sampled_from
from convclasses import Converter, UnstructureStrategy, mod
from . import nested_typed_classes, simple_typed_attrs, simple_typed_classes
unstructure_strats = sampled_from(list(UnstructureStrategy))
@given(simple_typed_classes(), unstructure_strats)
def test_simple_roundtrip(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(simple_typed_attrs(defaults=True), unstructure_strats)
def test_simple_roundtrip_defaults(cls_and_vals, strat):
"""
Simple classes with metadata can be unstructured and restructured.
"""
a, _ = cls_and_vals
cl = make_dataclass("HypClass", [("a", a.type, a)])
converter = Converter(unstruct_strat=strat)
inst = cl()
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(simple_typed_classes())
def test_simple_name_modifiers(cls_and_vals):
"""
Simple classes with metadata can be unstructured and restructured.
"""
a, vals = cls_and_vals
converter = Converter()
if len(fields(a)) > 0:
fld = mod.name("t-t", fields(a)[0])
cl = make_dataclass("HypClass", [("t_t", fld.type, fld)])
inst = cl(vals[0])
assert converter.unstructure(inst).get("t-t", MISSING) is not MISSING
else:
cl = make_dataclass("HypClass", [])
inst = cl()
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(nested_typed_classes, unstructure_strats)
def test_nested_roundtrip(cls_and_vals, strat):
"""
Nested classes with metadata can be unstructured and restructured.
"""
converter = Converter(unstruct_strat=strat)
cl, vals = cls_and_vals
# Vals are a tuple, convert into a dictionary.
inst = cl(*vals)
assert inst == converter.structure(converter.unstructure(inst), cl)
@given(
simple_typed_classes(defaults=False),
simple_typed_classes(defaults=False),
unstructure_strats,
)
def test_union_field_roundtrip(cl_and_vals_a, cl_and_vals_b, strat):
"""
Classes with union fields can be unstructured and structured.
"""
converter = Converter(unstruct_strat=strat)
cl_a, vals_a = cl_and_vals_a
cl_b, vals_b = cl_and_vals_b
a_field_names = {a.name for a in fields(cl_a)}
b_field_names = {a.name for a in fields(cl_b)}
assume(a_field_names)
assume(b_field_names)
common_names = a_field_names & b_field_names
assume(len(a_field_names) > len(common_names))
@dataclass
class C(object):
a: Union[cl_a, cl_b]
inst = C(a=cl_a(*vals_a))
if strat is UnstructureStrategy.AS_DICT:
assert inst == converter.structure(converter.unstructure(inst), C)
else:
# Our disambiguation functions only support dictionaries for now.
with pytest.raises(ValueError):
converter.structure(converter.unstructure(inst), C)
def handler(obj, _):
return converter.structure(obj, cl_a)
converter._union_registry[Union[cl_a, cl_b]] = handler
assert inst == converter.structure(converter.unstructure(inst), C)
del converter._union_registry[Union[cl_a, cl_b]]
@given(simple_typed_classes(defaults=False))
def test_optional_field_roundtrip(cl_and_vals):
"""
Classes with optional fields can be unstructured and structured.
"""
converter = Converter()
cl, vals = cl_and_vals
@dataclass
class C(object):
a: Optional[cl]
inst = C(a=cl(*vals))
assert inst == converter.structure(converter.unstructure(inst), C)
inst = C(a=None)
unstructured = converter.unstructure(inst)
assert inst == converter.structure(unstructured, C)
| 31.682171
| 77
| 0.706141
|
a00081a94558434b1dfea5311fb70543790c62b3
| 6,094
|
py
|
Python
|
advection-1d/run_sampling.py
|
themisbo/Rule-based-Bayesian-regr
|
9dc3e896e67117a43580f0a58199d3b8203f6f9d
|
[
"Apache-2.0"
] | null | null | null |
advection-1d/run_sampling.py
|
themisbo/Rule-based-Bayesian-regr
|
9dc3e896e67117a43580f0a58199d3b8203f6f9d
|
[
"Apache-2.0"
] | null | null | null |
advection-1d/run_sampling.py
|
themisbo/Rule-based-Bayesian-regr
|
9dc3e896e67117a43580f0a58199d3b8203f6f9d
|
[
"Apache-2.0"
] | 1
|
2022-02-11T14:20:12.000Z
|
2022-02-11T14:20:12.000Z
|
#!/usr/bin/env python
from cycler import cycler
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import scipy as sp
import theano
from simulator import run
plt.style.use("ggplot")
np.random.seed(1000005)
scale = 0.002
y_true, x = run(amplitude=0.001, phi=np.pi)
y_true = y_true * 10
shape = y_true.shape
times = shape[0]
noise = np.random.normal(scale=scale, size=shape)
y = y_true + noise
y_flat = y.flatten()
N_KNOT = 10
knots = np.linspace(0, 2 * np.pi, N_KNOT)
N_MODEL_KNOTS = 5 * N_KNOT
basis_funcs = sp.interpolate.BSpline(knots, np.eye(N_MODEL_KNOTS), k=3)
colors = ["red", "green", "blue"]
fig = plt.figure(figsize=(7.5, 5))
ax = fig.gca()
for i in range(times):
plt.plot(
x, y_true[i, :], color=colors[i], label="Exact solution for t=" + str(i + 1)
)
plt.plot(x, y[i, :], "o", color=colors[i], label="Data for t=" + str(i + 1))
plt.axvline(x=np.pi, linestyle="--")
ax.set_ylabel("u")
ax.set_xlabel("x")
ax.legend()
# plt.savefig("plots/advection_1d_data.png")
class BasisFunc:
def __init__(self):
"""
Initialize the class
"""
self.Bx = None
self.Bx_ = None
def create_basis(self, x):
"""
Create the basis object
"""
Bx = basis_funcs(x)
Bx_ = theano.shared(Bx)
self.Bx = Bx
self.Bx_ = Bx_
class RuleBasisFunc(BasisFunc):
def __init__(self):
"""
Initialize the class
"""
super().__init__()
self.discr = None
def create_discr(self, no_points, min, max):
"""
Create discretization
"""
self.discr = np.linspace(min, max, no_points)
no_points = 25
xlow = 0
xmid = np.pi
xhi = 2 * np.pi
data_base = BasisFunc()
data_base.create_basis(x)
rule_first = RuleBasisFunc()
rule_first.create_discr(no_points, xlow, xmid)
rule_first.create_basis(rule_first.discr)
rule_second = RuleBasisFunc()
rule_second.create_discr(no_points, xmid, xhi)
rule_second.create_basis(rule_second.discr)
rule_third = RuleBasisFunc()
rule_third.create_discr(2, xlow, xhi)
rule_third.create_basis(rule_third.discr)
def logp_rule(a0, ฯ_a, ฮ_a):
"""
Construct the rule penalty
"""
a = a0 + (ฯ_a * ฮ_a).cumsum(axis=0)
points_r1 = rule_first.Bx_.dot(a).T.flatten()
points_r2 = rule_second.Bx_.dot(a).T.flatten()
points_r3 = rule_third.Bx_.dot(a).T.flatten()
rule_log_lik = 0
for i in range(no_points):
rule_log_lik = rule_log_lik + pm.math.switch(pm.math.lt(points_r1[i], 0), 1, 0)
rule_log_lik = rule_log_lik + pm.math.switch(pm.math.gt(points_r2[i], 0), 1, 0)
rule_log_lik = rule_log_lik + pm.math.switch(
pm.math.gt(pm.math.abs_(points_r3[0] - points_r3[1]), 0.001), 1, 0
)
rule_log_lik = rule_log_lik + pm.math.switch(
pm.math.gt(pm.math.abs_(points_r3[2] - points_r3[3]), 0.001), 1, 0
)
rule_log_lik = rule_log_lik + pm.math.switch(
pm.math.gt(pm.math.abs_(points_r3[4] - points_r3[5]), 0.001), 1, 0
)
for num in range(times - 1):
rule_log_lik = rule_log_lik + pm.math.switch(
pm.math.lt(
points_r1[i + (num + 1) * no_points], points_r1[i + num * no_points]
),
1,
0,
)
rule_log_lik = rule_log_lik + pm.math.switch(
pm.math.gt(
points_r2[i + (num + 1) * no_points], points_r2[i + num * no_points]
),
1,
0,
)
rule_ratio = rule_log_lik / ((times + 3) * no_points)
return pm.Beta.dist(alpha=1.0, beta=100.0).logp(rule_ratio)
use_rule = True
with pm.Model() as model:
ฯ_a = pm.HalfCauchy("ฯ_a", 0.1, shape=times)
a0 = pm.Normal("a0", 0.0, 0.1, shape=times)
ฮ_a = pm.Normal("ฮ_a", 0.0, 5.0, shape=(N_MODEL_KNOTS, times))
a = pm.Deterministic("a", a0 + (ฯ_a * ฮ_a).cumsum(axis=0))
res = data_base.Bx_.dot(a)
res_fl = res.T.flatten()
obs = pm.Normal("obs", res_fl, scale, observed=y_flat)
if use_rule:
LL_rule = pm.Potential("LL_rule", logp_rule(a0, ฯ_a, ฮ_a))
with model:
trace = pm.sample_smc(draws=10000)
plt.rc("axes", prop_cycle=cycler("color", ["r", "g", "b"]))
thin = 10
fig = plt.figure(figsize=(7.5, 5))
ax = fig.gca()
for j in range(trace["a"].shape[2]):
a = trace["a"][0 * thin, :, j]
yvals = data_base.Bx.dot(a)
plt.plot(x, yvals, label="Posterior curves for t=" + str(j + 1))
for iter in range(int(trace["a"].shape[0] / thin)):
for j in range(trace["a"].shape[2]):
a = trace["a"][iter * thin, :, j]
yvals = data_base.Bx.dot(a)
plt.plot(x, yvals, alpha=0.1)
for j in range(times):
plt.scatter(x, y[j, :], label="Data for t=" + str(j + 1))
plt.plot(x, y_true[j, :], color="k")
plt.plot(x, y_true[j, :], color="k", label="Exact solutions")
if use_rule:
plt.axvline(x=np.pi, linestyle="--")
ax.set_ylabel("u")
ax.set_xlabel("x")
ax.legend()
plt.savefig("plots/advection_1d_posterior" + "_rule" * use_rule + ".png")
import pickle
pickle.dump(trace["a"], open("rule_spline", "wb"))
trace = pickle.load(open("rule_spline", "rb"))
mean_post = np.mean(trace["a"], axis=0)
mean_post = np.mean(trace, axis=0)
for j in range(mean_post.shape[1]):
a = mean_post[:, j]
yvals = data_base.Bx.dot(a)
plt.plot(x, yvals, label="Posterior curves for t=" + str(j + 1))
# y_pred = []
# for j in range(mean_post.shape[1]):
# a = mean_post[:, j]
# yvals = data_base.Bx.dot(a)
# y_pred.append(yvals)
# y_pred = np.array(y_pred)
res = data_base.Bx.dot(mean_post)
res_fl = res.T.flatten()
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
mean_squared_error(res_fl, y_flat)
mean_absolute_error(res_fl, y_flat)
explained_variance_score(res_fl, y_flat)
r2_score(res_fl, y_flat)
norule_waic = pm.waic(trace, model)
norule_loo = pm.loo(trace, model)
| 26.611354
| 88
| 0.615852
|
9afb106a80ff4e3c67efda3159e90bd69eee442e
| 202
|
py
|
Python
|
addons/google_account/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/google_account/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/google_account/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import models
from . import controllers
from .models.google_service import TIMEOUT # noqa
| 25.25
| 74
| 0.727723
|
cffcd5ae6ca68f4643d868d6a6a7f0652ae5b102
| 8,022
|
py
|
Python
|
cw/simulation/gym_wrapper.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | 1
|
2021-10-06T07:20:43.000Z
|
2021-10-06T07:20:43.000Z
|
cw/simulation/gym_wrapper.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | 3
|
2019-02-18T13:49:39.000Z
|
2020-12-28T04:13:27.000Z
|
cw/simulation/gym_wrapper.py
|
aarondewindt/cw
|
6be9770da0f0fc34ea47d7ab83e6929c4823e98a
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Any, Union
from abc import ABC, abstractmethod
from threading import Thread, RLock, Condition
from enum import Flag, auto
from concurrent.futures import Future
import gym
import xarray as xr
from gym import spaces
from tqdm.auto import trange
from tqdm import tqdm
from cw.simulation.module_base import ModuleBase
from cw.simulation.logging import BatchLogger
from cw.synchronization import BinarySemaphore
class ResetState(Flag):
no_reset = auto()
reset = auto()
running = auto()
first_step = auto()
no_reset_running = no_reset | running
no_reset_first_step = no_reset | first_step
reset_ending_episode = reset | running
reset_first_step = reset | first_step
class GymEnvironment(ModuleBase, gym.Env):
metadata = {'render.modes': []}
def __init__(self,
required_states=None,
target_time_step=None):
super().__init__(
is_discreet=True,
target_time_step=target_time_step,
required_states=required_states,
)
self.simulation_thread: Union[Thread, None] = None
self.environment_semaphore: Union[BinarySemaphore, None] = None
self.agent_semaphore: Union[BinarySemaphore, None] = None
self.reset_semaphore: Union[BinarySemaphore, None] = None
self.last_results: Union[xr.Dataset, Any, None] = None
self.last_batch_results: Union[xr.Dataset, None] = None
self.simulation_result_futures = []
self.simulation_result_futures_lock = RLock()
self.is_done_lock = RLock()
self.step_lock = RLock()
self.thread_running = False
self.reset_state = ResetState.no_reset_first_step
self.is_done = False
def initialize(self, simulation):
super().initialize(simulation)
def simulation_thread_target(self, n_steps) -> None:
"""
Thread target running simulantion batch.
:param n_steps: Number of steps per episode.
"""
# Set a batch logger on the simulation.
batch_logger = BatchLogger()
batch_logger.initialize(self.simulation)
original_logger = self.simulation.logging
self.simulation.logging = batch_logger
# Set reset state to not resetting, and processing the first step.
self.reset_state = ResetState.no_reset_first_step
try:
while True:
# Break the loop if the thread is not supposed to be running anymore.
if not self.thread_running:
break
# Restore the initial simulation states at the start of each episode.
self.simulation.restore_states()
# Run simulation
self.last_results = self.simulation.run(n_steps)
with self.simulation_result_futures_lock:
for future in self.simulation_result_futures:
if not future.done():
future.set_result(self.last_results)
self.simulation_result_futures = []
# If resetting we need to run the first iteration up the agent and
# let the reset function return an observation of the first step.
if self.reset_state & ResetState.reset:
# Set reset state to resetting and first iteration.
self.reset_state = ResetState.reset_first_step
else:
# Set reset state to not resetting and first step.
self.reset_state = ResetState.no_reset_first_step
except KeyboardInterrupt:
self.thread_running = False
finally:
self.environment_semaphore = None
self.agent_semaphore = None
self.reset_semaphore = None
self.simulation_thread = None
self.thread_running = False
self.last_batch_results = batch_logger.finish_batch()
self.simulation.logging = original_logger
def start_simulation_thread(self, n_steps):
# Don't start simulation thread if it's already running.
if self.simulation_thread is None:
self.simulation.stash_states()
# Initialize binary Semaphores to False to they are blocked by default.
self.environment_semaphore = BinarySemaphore(False)
self.agent_semaphore = BinarySemaphore(False)
self.reset_semaphore = BinarySemaphore(False)
# Create and start thread.
self.simulation_thread = Thread(target=self.simulation_thread_target,
daemon=True, args=(n_steps,))
self.thread_running = True
self.simulation_thread.start()
else:
print("Simulation thread already running")
def end_simulation_thread(self):
if self.simulation_thread is not None:
self.thread_running = False
self.simulation.stop()
def run_step(self, is_last):
if self.reset_state == ResetState.reset_ending_episode:
return
with self.is_done_lock:
self.is_done = is_last
# Run the environment step function.
self.environment_step(is_last)
self.agent_semaphore.release()
# Release the reset function if this is the first step
# after a reset.
if self.reset_state == ResetState.reset_first_step:
self.reset_semaphore.release()
self.environment_semaphore.acquire()
def run_end(self):
pass
def environment_step(self, is_last):
pass
def step(self, action: Any) -> Tuple[Any, float, bool, dict]:
if self.reset_state & ResetState.first_step:
# We are running the first step, so change the reset state.
# And make sure we block on the agent_semaphore later on.
self.reset_state = ResetState.no_reset_running
self.agent_semaphore.acquire(False)
# Make sure we are running a simulation batch.
if self.simulation_thread is None:
raise Exception("Simulation thread not running.")
# Perform action.
self.act(action)
# Release the simulation.
self.environment_semaphore.release()
# Lock until the simulation has run the iteration.
self.agent_semaphore.acquire()
with self.is_done_lock:
# Observe environment.
observation, reward, info = self.observe(self.is_done)
if self.is_done:
self.environment_semaphore.release()
return observation, reward, self.is_done, info
@abstractmethod
def act(self, action: Any):
pass
@abstractmethod
def observe(self, done: bool) -> Tuple[Any, float, dict]:
pass
def reset(self):
if self.reset_state == ResetState.no_reset_first_step:
# No actions have been taken yet on the current episode
# so there is no need to reset it. Just return the current
# observation.
observation = self.observe(self.is_done)[0]
return observation
# Set the state to reset and still running (aka, ending episode)
# Stop simulation
# Release the environment semaphore to let the simulation finish.
self.reset_state = ResetState.reset_ending_episode
self.simulation.stop()
self.environment_semaphore.release()
# Wait for the new episode to start.
self.reset_semaphore.acquire()
# Return observation.
observation = self.observe(self.is_done)[0]
return observation
def render(self, mode='human'):
pass
def close(self):
pass
def seed(self, seed=None):
pass
def create_result_future(self):
future = Future()
with self.simulation_result_futures_lock:
self.simulation_result_futures.append(future)
return future
| 33.848101
| 85
| 0.633009
|
1d9d11a4ccd8aa8bd286678bd9dea643b128a408
| 5,541
|
py
|
Python
|
otp/nametag/Nametag.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
otp/nametag/Nametag.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
otp/nametag/Nametag.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from NametagConstants import *
import NametagGlobals
from otp.margins.ClickablePopup import ClickablePopup
from otp.otpbase import OTPGlobals
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
class Nametag(ClickablePopup):
CName = 1
CSpeech = 2
CThought = 4
NAME_PADDING = 0.2
CHAT_ALPHA = 1.0
DEFAULT_CHAT_WORDWRAP = 10.0
IS_3D = False # 3D variants will set this to True.
def __init__(self):
if self.IS_3D:
ClickablePopup.__init__(self, NametagGlobals.camera)
else:
ClickablePopup.__init__(self)
self.contents = 0 # To be set by subclass.
self.innerNP = NodePath.anyPath(self).attachNewNode('nametag_contents')
self.wordWrap = 7.5
self.chatWordWrap = None
self.font = None
self.speechFont = None
self.name = ''
self.displayName = ''
self.qtColor = VBase4(1,1,1,1)
self.colorCode = CCNormal
self.avatar = None
self.icon = NodePath('icon')
self.frame = (0, 0, 0, 0)
self.nameFg = (0,0,0,1)
self.nameBg = (1,1,1,1)
self.chatFg = (0,0,0,1)
self.chatBg = (1,1,1,1)
self.chatString = ''
self.chatFlags = 0
def destroy(self):
ClickablePopup.destroy(self)
def setContents(self, contents):
self.contents = contents
self.update()
def setAvatar(self, avatar):
self.avatar = avatar
def setChatWordwrap(self, chatWordWrap):
self.chatWordWrap = chatWordWrap
def tick(self):
pass # Does nothing by default.
def clickStateChanged(self):
self.update(False)
def getButton(self):
cs = self.getClickState()
if self.buttons is None:
return None
elif cs in self.buttons:
return self.buttons[cs]
else:
return self.buttons.get(0)
def update(self, scale=True):
if self.colorCode in NAMETAG_COLORS:
cc = self.colorCode
else:
cc = CCNormal
self.nameFg, self.nameBg, self.chatFg, self.chatBg = NAMETAG_COLORS[cc][self.getClickState()]
self.innerNP.node().removeAllChildren()
if self.contents & self.CThought and self.chatFlags & CFThought:
balloon = self.showBalloon(self.getThoughtBalloon(), self.chatString)
elif self.contents & self.CSpeech and self.chatFlags&CFSpeech:
balloon = self.showBalloon(self.getSpeechBalloon(), self.chatString)
elif self.contents & self.CName and self.displayName:
self.showName()
return
else:
return
if scale and self.IS_3D:
balloon.setScale(0)
scaleLerp = Sequence(Wait(0.10), LerpScaleInterval(balloon, 0.2, VBase3(1, 1, 1), VBase3(0, 0, 0), blendType='easeInOut'))
scaleLerp.start()
def showBalloon(self, balloon, text):
if not self.speechFont:
# If no font is set, we can't display anything yet...
return
color = self.qtColor if (self.chatFlags&CFQuicktalker) else self.chatBg
if color[3] > self.CHAT_ALPHA:
color = (color[0], color[1], color[2], self.CHAT_ALPHA)
reversed = (self.IS_3D and (self.chatFlags&CFReversed))
balloon, frame = balloon.generate(text, self.speechFont, textColor=self.chatFg,
balloonColor=color,
wordWrap=self.chatWordWrap or \
self.DEFAULT_CHAT_WORDWRAP,
button=self.getButton(),
reversed=reversed)
balloon.reparentTo(self.innerNP)
self.frame = frame
return balloon
def showName(self):
if not self.font:
# If no font is set, we can't actually display a name yet...
return
# Create text node:
self.innerNP.attachNewNode(self.icon)
t = self.innerNP.attachNewNode(TextNode('name'), 1)
t.node().setFont(self.font)
t.node().setAlign(TextNode.ACenter)
t.node().setWordwrap(self.wordWrap)
t.node().setText(self.displayName)
t.node().setTextColor(self.nameFg)
t.setTransparency(self.nameFg[3] < 1.0)
width, height = t.node().getWidth(), t.node().getHeight()
# Put the actual written name a little in front of the nametag and
# disable depth write so the text appears nice and clear, free from
# z-fighting and bizarre artifacts. The text renders *after* the tag
# behind it, due to both being in the transparency bin,
# so there's really no problem with doing this.
t.setY(-0.05)
t.setAttrib(DepthWriteAttrib.make(0))
# Apply panel behind the text:
panel = NametagGlobals.nametagCardModel.copyTo(self.innerNP, 0)
panel.setPos((t.node().getLeft()+t.node().getRight())/2.0, 0,
(t.node().getTop()+t.node().getBottom())/2.0)
panel.setScale(width + self.NAME_PADDING, 1, height + self.NAME_PADDING)
panel.setColor(self.nameBg)
panel.setTransparency(self.nameBg[3] < 1.0)
self.frame = (t.node().getLeft()-self.NAME_PADDING/2.0,
t.node().getRight()+self.NAME_PADDING/2.0,
t.node().getBottom()-self.NAME_PADDING/2.0,
t.node().getTop()+self.NAME_PADDING/2.0)
| 34.416149
| 134
| 0.590507
|
ec7379ebdebe4ffc10d5c05215a99e8b909d4cf0
| 28,110
|
py
|
Python
|
coordination/formation_communication_demo/control/script/sim_addleader_1.py
|
robin-shaun/xtdrone
|
f255d001e2b83e2dd54e8086f881c58a4efd53ee
|
[
"MIT"
] | null | null | null |
coordination/formation_communication_demo/control/script/sim_addleader_1.py
|
robin-shaun/xtdrone
|
f255d001e2b83e2dd54e8086f881c58a4efd53ee
|
[
"MIT"
] | null | null | null |
coordination/formation_communication_demo/control/script/sim_addleader_1.py
|
robin-shaun/xtdrone
|
f255d001e2b83e2dd54e8086f881c58a4efd53ee
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
* This is the version that leader can hover, last modify on 2022.1.27 by Lan
Framework for Parallel Simulation
* To control all UAV in Gazebo(simulation environment) using MPI
Before running this code, you need to launch your simulation Environment and px4 via:
$ roslaunch px4 iris3_parallel_simulation.launch # 3 UAVs in simulation
$ roslaunch px4 iris10_parallel_simulation.launch # 10 UAVs in simulation
And then, you can run this code via:
$ mpiexec -n 3 python px4_control_sim_addleader.py # 3 UAVs in simulation
$ mpiexec -n 10 python px4_control_sim_addleader.py # 10 UAVs in simulation
"""
import tf
import time
import math
from pyquaternion import Quaternion
from mpi4py import MPI
import rospy
from mavros_msgs.msg import GlobalPositionTarget, State, PositionTarget
from mavros_msgs.srv import CommandBool, CommandTOL, SetMode, SetMavFrame
from geometry_msgs.msg import PoseStamped, Twist, TwistStamped
from sensor_msgs.msg import Imu, NavSatFix
from std_msgs.msg import Float32, Float64, String
from control.msg import UAVDataMsg, AllUAVData, NeighborMsg, AllNeighborMsg, CommVerify, UavComm
import numpy as np
import copy
import tf_conversions as tfc
comm = MPI.COMM_WORLD
uav_id = comm.Get_rank()
uav_num_sim = comm.Get_size()
uav_num_real = 0
uav_num = uav_num_sim + uav_num_real
uav_bias = [[0,0,0],[1.2,-1.2,0],[-1.2,-1.2,0],[1.2,1.2,0],[-1.2,1.2,0],[2.4,-2.4,0],[0,-2.4,0],[-2.4,-2.4,0],[2.4,0,0],[-2.4,0,0],[2.4,2.4,0],[0,2.4,0],[-2.4,2.4,0],[-7,-7,0],[-6,-7,0],[-5,-7,0],[-4,-7,0],[3,6,0],[4,6,0],[5,6,0]]
#uav_bias = [[2,-6,0],[-2,-6,0],[2,2,0],[-2,2,0],[4,-6,0],[4,-8,0],[2,-8,0],[3,-7,0],[-4,-6,0],[-4,-8,0],[-2,-8,0],[-3,-7,0],[4,2,0],[4,4,0],[2,4,0],[3,3,0],[-4,2,0],[-4,4,0],[-2,4,0],[-3,3,0]]
formation_keys = ['waiting', 'FORM_1', 'FORM_2', 'FORM_3', 'AUTO.TAKEOFF', 'OFFBOARD', 'HOVER', 'AUTO.LAND']
class Px4Controller:
def __init__(self):
self.uav_id = uav_id + uav_num_real
self.namespace = "UAV{proc}".format(proc=self.uav_id)
self.imu = None
self.gps = None
self.local_pose = PoseStamped()
self.local_velocity = TwistStamped()
self.current_state = None
self.current_heading = None
self.takeoff_height = 0.6
self.local_enu_position = None
self.cur_target_pose = None
self.global_target = None
self.target_yaw = 0.0
self.global_pose = PoseStamped()
self.takeoff_target_pose = PoseStamped()
self.hover_target_pose = PoseStamped()
self.target_pose = None
self.target_vel = TwistStamped()
self.motion_type = 0 # 0:position 1:vel 2: accel
self.frame_id = 7 # 1 :flu, 7: position
self.received_new_task = False
self.arm_state = False
self.mavros_state = None
self.received_imu = False
self.frame = "BODY"
self.formation_config = 'waiting'
self.cmd = None
self.gcs_cmd = String()
self.last_gcs_cmd = String()
self.form_flag = 0
self.first_form3_flag = True
self.first_form1_flag = True
self.first_form2_flag = True
self.leader_desire_pose = None
self.last_form = 0
self.uav_data = UAVDataMsg()
self.all_uav_data_sim = AllUAVData()
self.all_uav_data_real = AllUAVData()
self.all_comm_data = CommVerify()
# Expand to uav_num lines,add neighbor information in "def read_set_file"
self.neighbor_num = 0
self.neighbor_id = []
self.comm_succ = []
for i in range(0, uav_num):
self.neighbor_id.append([])
self.comm_succ.append([])
self.leader_id = [0]
self.all_desired_position = []
self.all_neighbor_data = AllNeighborMsg()
# control parameters
self.Kpx = 0.5
self.Kpy = 0.5
self.Kpz = 0.5
self.Kpangz = 1
self.velxy_max = 1.5
self.velz_max = 1.5
self.velangz_max = 0.5
self.gamma = 1
self.integral_x = 0 # integralgrete
self.integral_y = 0 # integralgrete
self.integral_z = 0 # integralgrete
# ros subscribers
self.local_pose_sub = rospy.Subscriber(self.namespace + "/mavros/local_position/pose", PoseStamped,
self.local_pose_callback)
self.local_velocity_sub = rospy.Subscriber(self.namespace + "/mavros/local_position/velocity_local",
TwistStamped, self.local_velocity_callback)
self.mavros_sub = rospy.Subscriber(self.namespace + "/mavros/state", State, self.mavros_state_callback)
self.gps_sub = rospy.Subscriber(self.namespace + "/mavros/global_position/global", NavSatFix, self.gps_callback)
self.imu_sub = rospy.Subscriber(self.namespace + "/mavros/imu/data", Imu, self.imu_callback)
self.gcs_cmd_sub = rospy.Subscriber("/xtdrone_gcs/cmd", String, self.gcs_cmd_callback)
self.keyboard_cmd_sub = rospy.Subscriber("/xtdrone/leader/cmd", String, self.cmd_callback)
self.keyboard_cmd_sub2 = rospy.Subscriber("/xtdrone/iris_1/cmd", String, self.cmd_callback)
self.all_uav_data_sub = rospy.Subscriber("/xtdrone_gcs/all_uav_data", AllUAVData,
self.all_uav_data_callback)
self.communication_verify_sub = rospy.Subscriber("/communication_verify", CommVerify, self.communication_verify_callback)
# ros publishers
# self.pose_target_pub = rospy.Publisher(self.namespace + '/mavros/setpoint_position/local', PoseStamped, queue_size=10)
self.local_target_pub = rospy.Publisher(self.namespace + '/mavros/setpoint_raw/local', PositionTarget,
queue_size=10)
# self.twist_target_pub = rospy.Publisher(self.namespace + '/mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10)
self.uav_data_pub = rospy.Publisher(self.namespace + '/formation/uav_data', UAVDataMsg, queue_size=10)
self.global_position_pub = rospy.Publisher(self.namespace + '/global_position/uav_data', PoseStamped,
queue_size=10)
# ros services
self.armService = rospy.ServiceProxy(self.namespace + '/mavros/cmd/arming', CommandBool)
self.takeoffService = rospy.ServiceProxy(self.namespace + '/mavros/cmd/takeoff', CommandTOL)
self.landService = rospy.ServiceProxy(self.namespace + '/mavros/cmd/land', CommandTOL)
self.flightModeService = rospy.ServiceProxy(self.namespace + '/mavros/set_mode', SetMode)
self.frameService = rospy.ServiceProxy(self.namespace + '/mavros/setpoint_velocity/mav_frame', SetMavFrame)
print(self.namespace, ": Px4 Controller Start!")
self.data_velocity = []
self.data_position = []
self.read_set_file('UAV_pos1')
def working(self):
rospy.init_node(self.namespace + "_control_node")
for i in range(10):
if self.current_heading is not None:
break
else:
print(self.namespace, ": Waiting for initialization.")
time.sleep(0.5)
rate = rospy.Rate(15)
loop_count = 0
loop_factor = 4
# self.read_set_file('UAV_pos')
while rospy.is_shutdown() is False:
self.neighbor_num = len(self.neighbor_id[self.uav_id])
loop_count += 1
self.global_position_pub.publish(self.global_pose)
self.construct_uav_data()
self.uav_data_pub.publish(self.uav_data) # 15Hz
# if loop_count % 10 == 0:
# print(str(self.uav_id)+"'s neighbor number: ", self.neighbor_num)
if self.gcs_cmd == "AUTO.TAKEOFF":
self.last_gcs_cmd = "AUTO.TAKEOFF"
self.form_flag = 0
self.motion_type = 0
self.frame_id = 7
if self.arm_state == False:
self.arm()
if self.mavros_state != "OFFBOARD":
self.flight_mode_set(mode='OFFBOARD')
self.target_pose = self.construct_target(x=self.takeoff_target_pose.pose.position.x,
y=self.takeoff_target_pose.pose.position.y,
z=self.takeoff_height,
yaw=self.current_heading)
self.local_target_pub.publish(self.target_pose)
elif self.gcs_cmd == 'FORM_1':
self.motion_type = 1
self.frame_id = 1
self.last_gcs_cmd = 'FORM_1'
if self.form_flag != 1:
self.form_flag = 1
if self.mavros_state != "OFFBOARD":
self.flight_mode_set(mode='OFFBOARD')
if self.uav_id == 0 or self.neighbor_num == 0:
self.motion_type = 1
if self.first_form1_flag:
self.leader_desire_pose = copy.deepcopy(self.uav_data)
self.last_form = 1
self.first_form1_flag = False
if self.uav_id == 0:
self.leader_desire_pose.pose.position.y += 0.04
self.leader_formation_control()
self.target_pose = self.construct_target(vx=self.target_vel.twist.linear.x,
vy=self.target_vel.twist.linear.y,
vz=self.target_vel.twist.linear.z,
yaw_rate=self.target_vel.twist.angular.z)
else:
self.leader_desire_pose = copy.deepcopy(self.uav_data)
self.formation_control()
self.target_pose = self.construct_target(vx=self.target_vel.twist.linear.x,
vy=self.target_vel.twist.linear.y,
vz=self.target_vel.twist.linear.z,
yaw_rate=self.target_vel.twist.angular.z)
self.local_target_pub.publish(self.target_pose)
elif self.gcs_cmd == 'FORM_2':
self.motion_type = 1
self.frame_id = 1
self.last_gcs_cmd = 'FORM_2'
if self.form_flag != 2:
self.form_flag = 2
if self.mavros_state != "OFFBOARD":
self.flight_mode_set(mode='OFFBOARD')
if self.uav_id == 0 or self.neighbor_num == 0:
self.motion_type = 1
if self.first_form2_flag:
self.leader_desire_pose = copy.deepcopy(self.uav_data)
self.last_form = 2
self.first_form2_flag = False
if self.uav_id == 0:
self.leader_desire_pose.pose.position.y -= 0.04
self.leader_formation_control()
self.target_pose = self.construct_target(vx=self.target_vel.twist.linear.x,
vy=self.target_vel.twist.linear.y,
vz=self.target_vel.twist.linear.z,
yaw_rate=self.target_vel.twist.angular.z)
else:
self.formation_control()
self.leader_desire_pose = copy.deepcopy(self.uav_data)
self.target_pose = self.construct_target(vx=self.target_vel.twist.linear.x,
vy=self.target_vel.twist.linear.y,
vz=self.target_vel.twist.linear.z,
yaw_rate=self.target_vel.twist.angular.z)
self.local_target_pub.publish(self.target_pose)
elif self.gcs_cmd == 'AUTO.LAND':
self.last_gcs_cmd = 'AUTO.LAND'
self.form_flag = 0
self.last_form = 0
if self.mavros_state != "AUTO.LAND":
self.flight_mode_set(mode='AUTO.LAND')
if (self.mavros_state == 'AUTO.LAND') and (self.local_pose.pose.position.z < 0.15):
if self.arm_state == True:
self.disarm()
# print(self.namespace, ": Land Success!")
elif self.gcs_cmd == "HOVER":
self.last_gcs_cmd = "HOVER"
self.form_flag = 0
self.motion_type = 0
self.frame_id = 7
if self.arm_state == False:
self.arm()
if self.mavros_state != "OFFBOARD":
self.flight_mode_set(mode='OFFBOARD')
self.target_pose = self.construct_target(x=self.hover_target_pose.pose.position.x,
y=self.hover_target_pose.pose.position.y,
z=self.hover_target_pose.pose.position.z,
yaw=self.current_heading)
self.local_target_pub.publish(self.target_pose)
else:
self.gcs_cmd = self.last_gcs_cmd
self.form_flag = 0
# self.state = self.flight_mode_set(mode='OFFBOARD')
# if (self.uav_id == 11) or self.uav_id == 18:
# print(str(self.uav_id),"'s neighbors: ", self.neighbor_id[self.uav_id])
rate.sleep()
print("over over over")
def formation_control(self):
local_desire_relative_pose = self.all_desired_position[self.uav_id]
delta_x = 0
delta_vx = 0
delta_y = 0
delta_vy = 0
delta_z = 0
delta_vz = 0
self.target_vel.twist.angular.x = 0
self.target_vel.twist.angular.y = 0
self.target_vel.twist.angular.z = self.Kpangz * (self.target_yaw - self.current_heading)
self.neighbor_num = len(self.neighbor_id[self.uav_id])
if self.neighbor_num > 0:
weight = 4.0 / (self.gamma * self.gamma*self.neighbor_num)
else:
weight = 0.0
for i in range(self.neighbor_num):
try:
delta_x += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].pose.position.x - self.uav_data.pose.position.x - \
self.all_desired_position[self.neighbor_id[self.uav_id][i]][0] + local_desire_relative_pose[0]
delta_vx += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].velocity.linear.x - self.uav_data.velocity.linear.x
delta_y += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].pose.position.y - self.uav_data.pose.position.y - \
self.all_desired_position[self.neighbor_id[self.uav_id][i]][1] + local_desire_relative_pose[1]
delta_vy += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].velocity.linear.y - self.uav_data.velocity.linear.y
delta_z += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].pose.position.z - self.uav_data.pose.position.z - \
self.all_desired_position[self.neighbor_id[self.uav_id][i]][2] + local_desire_relative_pose[2]
delta_vz += self.all_uav_data_sim.data[self.neighbor_id[self.uav_id][i]].velocity.linear.z - self.uav_data.velocity.linear.z
except:
print("id: ",self.uav_id)
print("neighbor: ", self.neighbor_id[self.uav_id])
print("num: ", self.neighbor_num)
if self.uav_id == 10:
print(delta_z)
print(delta_vz)
# accel
self.integral_x = weight * (delta_x + self.gamma * delta_vx)
self.integral_y = weight * (delta_y + self.gamma * delta_vy)
self.integral_z = weight * (delta_z + self.gamma * delta_vz)
# v(k+1)=vk+ak*T
self.target_vel.twist.linear.x = self.uav_data.velocity.linear.x + self.integral_x * 0.5 * self.Kpx
self.target_vel.twist.linear.y = self.uav_data.velocity.linear.y + self.integral_y * 0.5 * self.Kpy
self.target_vel.twist.linear.z = self.uav_data.velocity.linear.z + self.integral_z * 0.5 * self.Kpz
self.target_vel.twist.linear.x = self.limit_amplitude(self.target_vel.twist.linear.x,
self.velxy_max)
self.target_vel.twist.linear.y = self.limit_amplitude(self.target_vel.twist.linear.y,
self.velxy_max)
self.target_vel.twist.linear.z = self.limit_amplitude(self.target_vel.twist.linear.z,
self.velz_max)
self.target_vel.twist.angular.z = self.limit_amplitude(self.target_vel.twist.angular.z * self.Kpangz,
self.velangz_max)
self.data_position.append(
[self.uav_data.pose.position.x, self.uav_data.pose.position.y, self.uav_data.pose.position.z])
self.data_velocity.append(
[self.uav_data.velocity.linear.x, self.uav_data.velocity.linear.y, self.uav_data.velocity.linear.z])
#np.save('data/'+str(self.uav_id) + ' data_position_demo1.txt', self.data_position)
#np.save('data/'+str(self.uav_id) + ' data_velocity_demo1.txt', self.data_velocity)
def leader_formation_control(self):
self.target_vel.twist.angular.x = 0
self.target_vel.twist.angular.y = 0
self.target_vel.twist.angular.z = self.Kpangz * (self.target_yaw - self.current_heading)
self.target_vel.twist.linear.x = self.leader_desire_pose.pose.position.x - self.uav_data.pose.position.x
self.target_vel.twist.linear.y = self.leader_desire_pose.pose.position.y - self.uav_data.pose.position.y
self.target_vel.twist.linear.z = self.takeoff_height - self.uav_data.pose.position.z
# print(self.target_vel.twist.linear)
self.target_vel.twist.linear.x = self.limit_amplitude(self.target_vel.twist.linear.x * 0.7, self.velxy_max)
self.target_vel.twist.linear.y = self.limit_amplitude(self.target_vel.twist.linear.y * 0.7, self.velxy_max)
self.target_vel.twist.linear.z = self.limit_amplitude(self.target_vel.twist.linear.z * 0.7, self.velz_max)
self.target_vel.twist.angular.z = self.limit_amplitude(self.target_vel.twist.angular.z * self.Kpangz,
self.velangz_max)
self.data_position.append(
[self.uav_data.pose.position.x, self.uav_data.pose.position.y, self.uav_data.pose.position.z])
self.data_velocity.append(
[self.uav_data.velocity.linear.x, self.uav_data.velocity.linear.y, self.uav_data.velocity.linear.z])
#np.save('data/' + str(self.uav_id) + ' data_position_demo1.txt', self.data_position)
#np.save('data/' + str(self.uav_id) + ' data_velocity_demo1.txt', self.data_velocity)
def trans_flu2enu(self):
self.q = np.array(
[self.local_pose.pose.orientation.x, self.local_pose.pose.orientation.y, self.local_pose.pose.orientation.z,
self.local_pose.pose.orientation.w])
Pw = np.array(
[self.target_vel.twist.linear.x, self.target_vel.twist.linear.y, self.target_vel.twist.linear.z, 0])
self.q_ = self.qconj(self.q)
Ps = self.qAd(self.q_, Pw)
self.target_vel.twist.linear.x = Ps[0]
self.target_vel.twist.linear.y = Ps[1]
self.target_vel.twist.linear.z = Ps[2]
def limit_amplitude(self, data, max_amplitude):
if max_amplitude < 0:
print('Warning! Max Amplitude should be positive!')
if data <= -max_amplitude:
data = -max_amplitude
elif data >= max_amplitude:
data = max_amplitude
return data
def communication_verify_callback(self, msg):
self.all_comm_data = msg
self.leader_id = msg.leader
self.neighbor_id[self.uav_id] = []
self.comm_succ[self.uav_id] = msg.transMatrix[self.uav_id].transMatrix
for j in range(len(self.leader_id)):
if self.uav_id in self.leader_id:
if self.leader_id[j]< self.uav_id and self.comm_succ[self.uav_id][j] == 1:
self.neighbor_id[self.uav_id].append(self.leader_id[j])
else:
if self.comm_succ[self.uav_id][j] == 1:
self.neighbor_id[self.uav_id].append(self.leader_id[j])
self.neighbor_num = len(self.neighbor_id[self.uav_id])
# print("UAV",self.uav_id,"'s neighbor_num: ", self.neighbor_num)
def read_set_file(self, txt_all_pos):
all_pos_path = 'txt/' + txt_all_pos + '.txt'
self.all_desired_position = np.loadtxt(all_pos_path)
def construct_uav_data(self):
self.uav_data.header.stamp = rospy.Time.now()
self.uav_data.uav_id = self.uav_id
self.uav_data.pose = self.global_pose.pose
self.uav_data.velocity = self.local_velocity.twist
self.uav_data.heading = self.current_heading
self.uav_data.is_sim = 1
def construct_target(self, x=0, y=0, z=0, vx=0, vy=0, vz=0, afx=0, afy=0, afz=0, yaw=0, yaw_rate=0):
target_raw_pose = PositionTarget()
target_raw_pose.coordinate_frame = self.frame_id
target_raw_pose.position.x = x
target_raw_pose.position.y = y
target_raw_pose.position.z = z
target_raw_pose.velocity.x = vx
target_raw_pose.velocity.y = vy
target_raw_pose.velocity.z = vz
target_raw_pose.acceleration_or_force.x = afx
target_raw_pose.acceleration_or_force.y = afy
target_raw_pose.acceleration_or_force.z = afz
target_raw_pose.yaw = yaw
target_raw_pose.yaw_rate = yaw_rate
if (self.motion_type == 0):
target_raw_pose.type_mask = PositionTarget.IGNORE_VX + PositionTarget.IGNORE_VY + PositionTarget.IGNORE_VZ \
+ PositionTarget.IGNORE_AFX + PositionTarget.IGNORE_AFY + PositionTarget.IGNORE_AFZ \
+ PositionTarget.IGNORE_YAW_RATE
if (self.motion_type == 1):
target_raw_pose.type_mask = PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY + PositionTarget.IGNORE_PZ \
+ PositionTarget.IGNORE_AFX + PositionTarget.IGNORE_AFY + PositionTarget.IGNORE_AFZ \
+ PositionTarget.IGNORE_YAW
if (self.motion_type == 2):
target_raw_pose.type_mask = PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY + PositionTarget.IGNORE_PZ \
+ PositionTarget.IGNORE_VX + PositionTarget.IGNORE_VY + PositionTarget.IGNORE_VZ \
+ PositionTarget.IGNORE_YAW
return target_raw_pose
'''
cur_p : poseStamped
target_p: positionTarget
'''
def position_distance(self, cur_p, target_p, threshold=0.1):
delta_x = math.fabs(cur_p.pose.position.x - target_p.position.x)
delta_y = math.fabs(cur_p.pose.position.y - target_p.position.y)
delta_z = math.fabs(cur_p.pose.position.z - target_p.position.z)
if delta_x + delta_y + delta_z < threshold:
return True
else:
return False
def cmd_yaw(self, yaw):
quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
self.target_pose.pose.orientation.x = quaternion[0]
self.target_pose.pose.orientation.y = quaternion[1]
self.target_pose.pose.orientation.z = quaternion[2]
self.target_pose.pose.orientation.w = quaternion[3]
def gcs_cmd_callback(self, msg):
self.gcs_cmd = msg.data
def all_uav_data_callback(self, msg):
self.all_uav_data_sim = msg
def local_pose_callback(self, msg):
self.local_pose = msg
self.local_enu_position = msg
self.global_pose = copy.deepcopy(msg)
if self.gcs_cmd != "AUTO.TAKEOFF":
self.takeoff_target_pose = msg
if self.gcs_cmd != "HOVER":
self.hover_target_pose = msg
if self.gcs_cmd != "FORM_1":
self.first_form1_flag = True
if self.gcs_cmd != "FORM_2":
self.first_form2_flag = True
if self.gcs_cmd != "FORM_3":
self.first_form3_flag = True
if self.uav_id < len(uav_bias):
self.global_pose.pose.position.x += uav_bias[self.uav_id][0]
self.global_pose.pose.position.y += uav_bias[self.uav_id][1]
self.global_pose.pose.position.z += uav_bias[self.uav_id][2]
def local_velocity_callback(self, msg):
self.local_velocity = msg
def mavros_state_callback(self, msg):
self.mavros_state = msg.mode
self.arm_state = msg.armed
def imu_callback(self, msg):
global global_imu, current_heading
self.imu = msg
self.current_heading = self.q2yaw(self.imu.orientation)
self.received_imu = True
def cmd_vel_flu_callback(self, msg):
# self.target_vel.twist.linear.x, self.target_vel.twist.linear.y = self.flu2enu(msg.linear.x, msg.linear.y)
self.target_vel.twist.linear.x = msg.linear.x
self.target_vel.twist.linear.y = msg.linear.y
self.target_vel.twist.linear.z = msg.linear.z
self.target_vel.twist.angular.x = 0
self.target_vel.twist.angular.y = 0
self.target_vel.twist.angular.z = msg.angular.z
def cmd_vel_enu_callback(self, msg):
self.target_vel.twist = msg
def gps_callback(self, msg):
self.gps = msg
def cmd_callback(self, msg):
if (msg.data in formation_keys and not msg.data == self.formation_config):
self.formation_config = msg.data
#print("keyboard cmd: ", self.formation_config)
self.gcs_cmd = self.formation_config
def q2yaw(self, q):
if isinstance(q, Quaternion):
rotate_z_rad = q.yaw_pitch_roll[0]
else:
q_ = Quaternion(q.w, q.x, q.y, q.z)
rotate_z_rad = q_.yaw_pitch_roll[0]
return rotate_z_rad
def arm(self):
if self.armService(True):
return True
else:
print("Vehicle arming failed!")
return False
def takeoff(self):
if self.takeoffService(True):
return True
else:
print("Vehicle takeoff failed!")
return False
def disarm(self):
if self.armService(False):
return True
else:
print("Vehicle disarming failed!")
return False
def flight_mode_set(self, mode):
""" mode selectable
MANUAL, ACRO, ALTCTL, POSCTL, OFFBOARD, STABILIZED, RATTITUDE
AUTO.MISSION, AUTO.LOITER, AUTO.RTL, AUTO.LAND, AUTO.RTGS, AUTO.READY, AUTO.TAKEOFF
"""
if self.flightModeService(custom_mode=mode):
return True
else:
print(self.namespace + mode + "Failed")
def takeoff_detection(self):
if self.local_pose.pose.position.z > 0.2 and self.arm_state:
return True
else:
return False
def qprod(self, q1, q2):
return tfc.transformations.quaternion_multiply(q1, q2)
def qconj(self, q):
return np.hstack((-q[:3], q[3]))
def qAd(self, q, p):
return self.qprod(self.qprod(q, p), self.q)
if __name__ == '__main__':
controller = Px4Controller()
controller.working()
| 48.216123
| 230
| 0.591996
|
c86a454c43579753111d971d356c495e671ae1f1
| 8,447
|
py
|
Python
|
api/serializers.py
|
cusele/DB_OPS
|
f666bc56844cb6985937314190508ed393ce2171
|
[
"Apache-2.0"
] | null | null | null |
api/serializers.py
|
cusele/DB_OPS
|
f666bc56844cb6985937314190508ed393ce2171
|
[
"Apache-2.0"
] | null | null | null |
api/serializers.py
|
cusele/DB_OPS
|
f666bc56844cb6985937314190508ed393ce2171
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _#_ coding:utf-8 _*_
from rest_framework import serializers
from OpsManage.models import *
from wiki.models import *
from orders.models import *
from filemanage.models import *
from django.contrib.auth.models import Group,User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','last_login','is_superuser','username',
'first_name','last_name','email','is_staff',
'is_active','date_joined')
class ServiceSerializer(serializers.ModelSerializer):
project_name = serializers.CharField(source='project.project_name', read_only=True)
project_id = serializers.IntegerField(source='project.id', read_only=True)
class Meta:
model = Service_Assets
fields = ('id','service_name','project_name','project_id')
class ProjectSerializer(serializers.ModelSerializer):
service_assets = ServiceSerializer(many=True, read_only=True,required=False)
class Meta:
model = Project_Assets
fields = ('id','project_name','service_assets')
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ('id','name')
class ZoneSerializer(serializers.ModelSerializer):
class Meta:
model = Zone_Assets
fields = ('id','zone_name','zone_network','zone_contact','zone_number')
# class AssetStatusSerializer(serializers.ModelSerializer):
# class Meta:
# model = Assets_Satus
# fields = ('id','status_name')
class CronSerializer(serializers.ModelSerializer):
class Meta:
model = Cron_Config
fields = ('id','cron_minute','cron_hour','cron_day',
'cron_week','cron_month','cron_user',
'cron_name','cron_desc','cron_server',
'cron_command','cron_script','cron_status')
class AssetsSerializer(serializers.ModelSerializer):
class Meta:
model = Assets
fields = ('id','assets_type','name','sn','buy_time','expire_date',
'buy_user','management_ip','manufacturer','provider',
'model','status','put_zone','group','business','project')
class AssetsLogsSerializer(serializers.ModelSerializer):
class Meta:
model = Log_Assets
fields = ('id','assets_id','assets_user','assets_content','assets_type','create_time')
class ProjectConfigSerializer(serializers.ModelSerializer):
project_number = serializers.StringRelatedField(many=True)
class Meta:
model = Project_Config
fields = ('id','project_env','project_name','project_local_command',
'project_repo_dir','project_dir','project_exclude',
"project_type",'project_address','project_repertory',
'project_status','project_remote_command','project_user',
'project_uuid','project_number')
class DeployLogsSerializer(serializers.ModelSerializer):
class Meta:
model = Log_Project_Config
fields = ('id','project_id','project_user','project_name',
'project_content','project_branch','create_time')
class AnbiblePlaybookSerializer(serializers.ModelSerializer):
server_number = serializers.StringRelatedField(many=True)
class Meta:
model = Ansible_Playbook
fields = ('id','playbook_name','playbook_desc','playbook_vars',
'playbook_uuid','playbook_file','playbook_auth_group',
'playbook_auth_user','server_number')
class AnsibleModelLogsSerializer(serializers.ModelSerializer):
class Meta:
model = Log_Ansible_Model
fields = ('id','ans_user','ans_model','ans_args',
'ans_server','create_time')
class AnsiblePlaybookLogsSerializer(serializers.ModelSerializer):
class Meta:
model = Log_Ansible_Playbook
fields = ('id','ans_user','ans_name','ans_content','ans_id',
'ans_server','ans_content','create_time')
class CronLogsSerializer(serializers.ModelSerializer):
class Meta:
model = Log_Cron_Config
fields = ('id','cron_id','cron_user','cron_name','cron_content',
'cron_server','create_time')
class ServerSerializer(serializers.ModelSerializer):
assets = AssetsSerializer(required=False)
# keyfile = serializers.FileField(max_length=None, use_url=True)
class Meta:
model = Server_Assets
fields = ('id','ip','hostname','username','port','passwd',
'cpu','cpu_number','vcpu_number','keyfile',
'cpu_core','disk_total','ram_total','kernel',
'selinux','swap','system','assets',
'sudo_passwd')
def create(self, data):
if(data.get('assets')):
assets_data = data.pop('assets')
assets = Assets.objects.create(**assets_data)
else:
assets = Assets()
data['assets'] = assets;
server = Server_Assets.objects.create(**data)
return server
class NetworkSerializer(serializers.ModelSerializer):
assets = AssetsSerializer(required=False)
class Meta:
model = Network_Assets
fields = ('id','ip','bandwidth','port_number','firmware',
'cpu','stone','configure_detail','assets',
'port','passwd','sudo_passwd','username')
def create(self, data):
if(data.get('assets')):
assets_data = data.pop('assets')
assets = Assets.objects.create(**assets_data)
else:
assets = Assets()
data['assets'] = assets;
server = Network_Assets.objects.create(**data)
return server
class DeployOrderSerializer(serializers.ModelSerializer):
class Meta:
model = Project_Order
fields = ('id','order_project','order_subject','order_content',
'order_branch','order_comid','order_tag','order_audit',
'order_status','order_level','order_cancel','create_time',
'order_user')
class InceptionSerializer(serializers.ModelSerializer):
class Meta:
model = Inception_Server_Config
fields = ('id','db_name','db_host','db_user','db_passwd','db_port',
'db_backup_host','db_backup_user','db_backup_port',
'db_backup_passwd')
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order_System
fields = ('id','order_subject','order_status','order_cancel','order_level')
class DataBaseServerSerializer(serializers.ModelSerializer):
class Meta:
model = DataBase_Server_Config
fields = ('id','db_env','db_name','db_host','db_user',
'db_passwd','db_port','db_mark','db_service',
'db_group','db_project','db_type',"db_mode")
class CustomSQLSerializer(serializers.ModelSerializer):
class Meta:
model = Custom_High_Risk_SQL
fields = ('id','sql')
class HistroySQLSerializer(serializers.ModelSerializer):
class Meta:
model = SQL_Execute_Histroy
fields = ('id','exe_sql','exe_user','exec_status','exe_result')
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id','name')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id','name')
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id','title','content','category','author')
class UploadFilesSerializer(serializers.ModelSerializer):
class Meta:
model = UploadFiles
fields = ('file_path', 'file_type')
class UploadFilesOrderSerializer(serializers.ModelSerializer):
files = UploadFilesSerializer(many=True)
class Meta:
model = FileUpload_Audit_Order
fields = ('id', 'dest_path', 'dest_server',
'chown_user','chown_rwx','files')
class DownloadFilesSerializer(serializers.ModelSerializer):
class Meta:
model = FileDownload_Audit_Order
fields = ('id','order_content', 'dest_server','dest_path')
| 37.542222
| 95
| 0.626613
|
410747b6e64a6aac948591d4c25638b708fa92d1
| 184
|
py
|
Python
|
users/urls.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
users/urls.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
users/urls.py
|
GuillaumeStaub/client-manager
|
81298dc43956499f05c0f4d55992fd7dfced49b0
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.UsersLogin.as_view(), name='login'),
path('logout/',views.logout_view, name='logout'),
]
| 23
| 61
| 0.684783
|
2299238e662bfd4a024a5d2be64439c166def86b
| 263
|
py
|
Python
|
.vscode/desafios/desafio 010.py
|
FonsecaThay/Curso-de-python
|
58095dcb1f59d9e61aeab5a9de332e463f330d12
|
[
"MIT"
] | null | null | null |
.vscode/desafios/desafio 010.py
|
FonsecaThay/Curso-de-python
|
58095dcb1f59d9e61aeab5a9de332e463f330d12
|
[
"MIT"
] | null | null | null |
.vscode/desafios/desafio 010.py
|
FonsecaThay/Curso-de-python
|
58095dcb1f59d9e61aeab5a9de332e463f330d12
|
[
"MIT"
] | null | null | null |
v = float(input('Coloque seu saldo aqui e descubra quantos dรณlares vocรช pode comprar: R$'))
uss = v / 5.15
#uss2= v / 3.27
euro = v / 5.82
print(f'Com R${v:.2f} vocรช pode comprar US${uss:.2f} dรณlares')
print(f'Com R${v:.2f} vocรช pode comprar US${euro:.2f} euros')
| 43.833333
| 91
| 0.665399
|
518409a322cf2cf5de0a5302471422cdcb77b500
| 521
|
py
|
Python
|
examples/dind/python/__main__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-03-13T20:27:32.000Z
|
2020-12-10T15:47:31.000Z
|
examples/dind/python/__main__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 97
|
2020-03-24T01:00:33.000Z
|
2021-04-25T20:27:28.000Z
|
examples/dind/python/__main__.py
|
jaxxstorm/pulumi-rke
|
9031a8509ce0f210348ece7419c1e7392174d603
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-03-23T21:44:29.000Z
|
2021-05-13T14:28:22.000Z
|
"""A Python Pulumi program"""
import pulumi
import pulumi_rke as rke
cluster = rke.Cluster("actions",
cluster_name="python-test-cluster",
ignore_docker_version=True,
dind=True,
dind_dns_server=True,
nodes=[{
"user": "docker",
"roles": [ "controlplane", "worker", "etcd" ],
"address": "pulumi-gha-node1"
}])
| 28.944444
| 72
| 0.424184
|
7622ad8bc4ee5e99bfe7e20f6f7600399a6cae21
| 963
|
py
|
Python
|
00.basic_interaction/code/13.globals/main.py
|
Gaetz/python-training
|
542f658883c66aaa932fb9e385225cfd573bb6de
|
[
"MIT"
] | 1
|
2021-10-05T11:45:28.000Z
|
2021-10-05T11:45:28.000Z
|
00.basic_interaction/code/13.globals/main.py
|
Gaetz/python-training
|
542f658883c66aaa932fb9e385225cfd573bb6de
|
[
"MIT"
] | null | null | null |
00.basic_interaction/code/13.globals/main.py
|
Gaetz/python-training
|
542f658883c66aaa932fb9e385225cfd573bb6de
|
[
"MIT"
] | null | null | null |
import pygame, time, sys
def main():
pygame.init()
load()
while not quit_game:
inputs()
update()
draw()
time.sleep(2) # End program after 2 seconds
def load():
global screen, font, text, key, quit_game
screen = pygame.display.set_mode((800, 600))
font = pygame.font.Font(None, 24)
text = font.render("Hello, world :)", False, (50, 200, 100))
key = False
quit_game = False
def inputs():
global key
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
key = True
def update():
global key, text, font, quit
if key:
text = font.render("How are you ?", False, (255, 255, 255))
quit = True
def draw():
global screen, text
screen.fill((0, 0, 150))
screen.blit(text, (10, 10))
pygame.display.update()
if __name__ == "__main__":
main()
| 21.4
| 67
| 0.559709
|
53b773b6e2a5fdd2946d8d850d51f5c467d3e8ca
| 555
|
py
|
Python
|
src/radios/__init__.py
|
frenck/python-radios
|
f5b36cab45b639f2d11fe003033914473e66db08
|
[
"MIT"
] | 7
|
2022-02-21T13:20:15.000Z
|
2022-02-26T10:58:17.000Z
|
src/radios/__init__.py
|
frenck/python-radios
|
f5b36cab45b639f2d11fe003033914473e66db08
|
[
"MIT"
] | 2
|
2022-03-07T16:46:22.000Z
|
2022-03-30T06:34:03.000Z
|
src/radios/__init__.py
|
frenck/python-radios
|
f5b36cab45b639f2d11fe003033914473e66db08
|
[
"MIT"
] | null | null | null |
"""Asynchronous Python client for the Radio Browser APIs."""
from .const import FilterBy, Order
from .exceptions import (
RadioBrowserConnectionError,
RadioBrowserConnectionTimeoutError,
RadioBrowserError,
)
from .models import Country, Language, Station, Stats, Tag
from .radio_browser import RadioBrowser
__all__ = [
"RadioBrowser",
"Stats",
"Station",
"Order",
"Country",
"Language",
"Tag",
"FilterBy",
"RadioBrowserConnectionError",
"RadioBrowserConnectionTimeoutError",
"RadioBrowserError",
]
| 23.125
| 60
| 0.708108
|
041c6a8b85eb7e24e55a1ecfd4b1878ed03000b9
| 3,675
|
py
|
Python
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/import_key_material_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/import_key_material_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/import_key_material_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ImportKeyMaterialRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'ImportKeyMaterialRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id=None, body=None):
"""ImportKeyMaterialRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this ImportKeyMaterialRequest.
API็ๆฌๅท
:return: The version_id of this ImportKeyMaterialRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this ImportKeyMaterialRequest.
API็ๆฌๅท
:param version_id: The version_id of this ImportKeyMaterialRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this ImportKeyMaterialRequest.
:return: The body of this ImportKeyMaterialRequest.
:rtype: ImportKeyMaterialRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ImportKeyMaterialRequest.
:param body: The body of this ImportKeyMaterialRequest.
:type: ImportKeyMaterialRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImportKeyMaterialRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.438849
| 79
| 0.564082
|
26ecf1356a94670f7ec987d7db7041d9ccdadc0e
| 14,904
|
py
|
Python
|
pandas/tests/frame/methods/test_rename.py
|
mathause/pandas
|
72327f32e2328d3e13b6c55617d71036fccdd0e1
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-01-20T21:13:08.000Z
|
2021-01-31T09:59:18.000Z
|
pandas/tests/frame/methods/test_rename.py
|
mathause/pandas
|
72327f32e2328d3e13b6c55617d71036fccdd0e1
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-13T22:07:00.000Z
|
2019-11-14T13:46:34.000Z
|
pandas/tests/frame/methods/test_rename.py
|
mathause/pandas
|
72327f32e2328d3e13b6c55617d71036fccdd0e1
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2017-05-08T13:57:21.000Z
|
2017-05-08T13:57:21.000Z
|
from collections import ChainMap
import inspect
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
merge,
)
import pandas._testing as tm
class TestRename:
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_rename_mi(self, klass):
obj = klass(
[11, 21, 31],
index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]),
)
obj.rename(str.lower)
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
@pytest.mark.parametrize(
"args,kwargs",
[
((ChainMap({"A": "a"}, {"B": "b"}),), {"axis": "columns"}),
((), {"columns": ChainMap({"A": "a"}, {"B": "b"})}),
],
)
def test_rename_chainmap(self, args, kwargs):
# see gh-23859
colAData = range(1, 11)
colBdata = np.random.randn(10)
df = DataFrame({"A": colAData, "B": colBdata})
result = df.rename(*args, **kwargs)
expected = DataFrame({"a": colAData, "b": colBdata})
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) setitem copy/view
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values)
renamed.loc[:, "foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
return_value = float_frame.rename(columns={"C": "foo"}, inplace=True)
assert return_value is None
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(index=str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, index=str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, index=str.lower, columns=str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_rename_positional_raises(self):
# GH 29136
df = DataFrame(columns=["A", "B"])
msg = r"rename\(\) takes from 1 to 2 positional arguments"
with pytest.raises(TypeError, match=msg):
df.rename(None, str.lower)
def test_rename_no_mappings_raises(self):
# GH 29136
df = DataFrame([[1]])
msg = "must pass an index to rename"
with pytest.raises(TypeError, match=msg):
df.rename()
with pytest.raises(TypeError, match=msg):
df.rename(None, index=None)
with pytest.raises(TypeError, match=msg):
df.rename(None, columns=None)
with pytest.raises(TypeError, match=msg):
df.rename(None, columns=None, index=None)
def test_rename_mapper_and_positional_arguments_raises(self):
# GH 29136
df = DataFrame([[1]])
msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=msg):
df.rename({}, index={})
with pytest.raises(TypeError, match=msg):
df.rename({}, columns={})
with pytest.raises(TypeError, match=msg):
df.rename({}, columns={}, index={})
@td.skip_array_manager_not_yet_implemented
def test_rename_with_duplicate_columns(self):
# GH#4403
df4 = DataFrame(
{"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]},
index=MultiIndex.from_tuples(
[(600809, 20130331)], names=["STK_ID", "RPT_Date"]
),
)
df5 = DataFrame(
{
"RPT_Date": [20120930, 20121231, 20130331],
"STK_ID": [600809] * 3,
"STK_Name": ["้ฅก้ฉฆ", "้ฅก้ฉฆ", "้ฅก้ฉฆ"],
"TClose": [38.05, 41.66, 30.01],
},
index=MultiIndex.from_tuples(
[(600809, 20120930), (600809, 20121231), (600809, 20130331)],
names=["STK_ID", "RPT_Date"],
),
)
# TODO: can we construct this without merge?
k = merge(df4, df5, how="inner", left_index=True, right_index=True)
result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"})
str(result)
result.dtypes
expected = DataFrame(
[[0.0454, 22.02, 0.0422, 20130331, 600809, "้ฅก้ฉฆ", 30.01]],
columns=[
"RT",
"TClose",
"TExg",
"RPT_Date",
"STK_ID",
"STK_Name",
"QT_Close",
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
tm.assert_frame_equal(result, expected)
| 36.440098
| 88
| 0.558038
|
d1442331e870f10fa1cbba13ab526e098c1c29fb
| 1,081
|
py
|
Python
|
elasticsearch6/client/xpack/monitoring.py
|
turtle321/elasticsearch-py
|
80cd96ef96f34e3bb291fdf4f643da5a1016a8d7
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch6/client/xpack/monitoring.py
|
turtle321/elasticsearch-py
|
80cd96ef96f34e3bb291fdf4f643da5a1016a8d7
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch6/client/xpack/monitoring.py
|
turtle321/elasticsearch-py
|
80cd96ef96f34e3bb291fdf4f643da5a1016a8d7
|
[
"Apache-2.0"
] | null | null | null |
from elasticsearch6.client.utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class MonitoringClient(NamespacedClient):
@query_params('interval', 'system_api_version', 'system_id')
def bulk(self, body, doc_type=None, params=None):
"""
`<http://www.elastic.co/guide/en/monitoring/current/appendix-api-bulk.html>`_
:arg body: The operation definition and data (action-data pairs),
separated by newlines
:arg doc_type: Default document type for items which don't provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of the
payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', _make_path('_xpack',
'monitoring', doc_type, '_bulk'), params=params,
body=self.client._bulk_body(body))
| 49.136364
| 96
| 0.678076
|
f746b62b4f4b6d73439630f4f196227fbc7aaa2f
| 3,836
|
py
|
Python
|
cirq/ion/convert_to_ion_gates_test.py
|
kunalq/Cirq
|
e73c9bef672e83143ab04e7f169988149055d630
|
[
"Apache-2.0"
] | 1
|
2019-09-04T16:55:30.000Z
|
2019-09-04T16:55:30.000Z
|
cirq/ion/convert_to_ion_gates_test.py
|
1eedaegon/Cirq
|
de0c5e855069bba71e55b070fc9b06f58c07a861
|
[
"Apache-2.0"
] | null | null | null |
cirq/ion/convert_to_ion_gates_test.py
|
1eedaegon/Cirq
|
de0c5e855069bba71e55b070fc9b06f58c07a861
|
[
"Apache-2.0"
] | 1
|
2020-12-18T16:36:41.000Z
|
2020-12-18T16:36:41.000Z
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import cirq
class OtherX(cirq.SingleQubitGate):
def _unitary_(self) -> np.ndarray:
return np.array([[0, 1], [1, 0]])
class NoUnitary(cirq.SingleQubitGate):
pass
class OtherCNOT(cirq.TwoQubitGate):
def _unitary_(self) -> np.ndarray:
return np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]])
def test_convert_to_ion_gates():
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
op = cirq.CNOT(q0, q1)
circuit = cirq.Circuit()
with pytest.raises(TypeError):
cirq.ion.ConvertToIonGates().convert_one(circuit)
with pytest.raises(TypeError):
cirq.ion.ConvertToIonGates().convert_one(NoUnitary().on(q0))
no_unitary_op = NoUnitary().on(q0)
assert cirq.ion.ConvertToIonGates(ignore_failures=True).convert_one(
no_unitary_op) == [no_unitary_op]
rx = cirq.ion.ConvertToIonGates().convert_one(OtherX().on(q0))
rop = cirq.ion.ConvertToIonGates().convert_one(op)
rcnot = cirq.ion.ConvertToIonGates().convert_one(OtherCNOT().on(q0, q1))
assert rx == [
cirq.PhasedXPowGate(phase_exponent=1).on(cirq.GridQubit(0, 0))
]
assert rop == [cirq.Ry(np.pi/2).on(op.qubits[0]),
cirq.ion.MS(np.pi/4).on(op.qubits[0], op.qubits[1]),
cirq.ops.Rx(-1*np.pi/2).on(op.qubits[0]),
cirq.ops.Rx(-1*np.pi/2).on(op.qubits[1]),
cirq.ops.Ry(-1*np.pi/2).on(op.qubits[0])]
assert rcnot == [
cirq.PhasedXPowGate(phase_exponent=-0.75,
exponent=0.5).on(cirq.GridQubit(0, 0)),
cirq.PhasedXPowGate(phase_exponent=1,
exponent=0.25).on(cirq.GridQubit(0, 1)),
cirq.T.on(cirq.GridQubit(0, 0)),
cirq.MS(-0.5 * np.pi / 2).on(cirq.GridQubit(0, 0), cirq.GridQubit(0,
1)),
(cirq.Y**0.5).on(cirq.GridQubit(0, 0)),
cirq.PhasedXPowGate(phase_exponent=1,
exponent=0.25).on(cirq.GridQubit(0, 1)),
(cirq.Z**-0.75).on(cirq.GridQubit(0, 0))
]
def test_convert_to_ion_circuit():
q0 = cirq.LineQubit(0)
q1 = cirq.LineQubit(1)
us = cirq.Duration(nanos=1000)
ion_device = cirq.IonDevice(us, us, us, [q0, q1])
clifford_circuit_1 = cirq.Circuit()
clifford_circuit_1.append([cirq.X(q0), cirq.H(q1),
cirq.MS(np.pi/4).on(q0, q1)])
ion_circuit_1 = cirq.ion.ConvertToIonGates().convert_circuit(
clifford_circuit_1)
ion_device.validate_circuit(ion_circuit_1)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(
clifford_circuit_1, ion_circuit_1, atol=1e-6)
clifford_circuit_2 = cirq.Circuit()
clifford_circuit_2.append([cirq.X(q0), cirq.CNOT(q1, q0), cirq.MS(
np.pi/4).on(q0, q1)])
ion_circuit_2 = cirq.ion.ConvertToIonGates().convert_circuit(
clifford_circuit_2)
ion_device.validate_circuit(ion_circuit_2)
cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(
clifford_circuit_2, ion_circuit_2, atol=1e-6)
| 36.884615
| 78
| 0.62513
|
698db812382f1e3a23f2fa7b39ff96f689930cf8
| 40,774
|
py
|
Python
|
tensorflow_probability/python/distributions/vector_diffeomixture.py
|
timudk/probability
|
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/vector_diffeomixture.py
|
timudk/probability
|
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/vector_diffeomixture.py
|
timudk/probability
|
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The VectorDiffeomixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import affine_linear_operator as affine_linear_operator_bijector
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import seed_stream
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.ops.linalg import linear_operator_addition as linop_add_lib # pylint: disable=g-direct-tensorflow-import
__all__ = [
"VectorDiffeomixture",
"quadrature_scheme_softmaxnormal_gauss_hermite",
"quadrature_scheme_softmaxnormal_quantiles",
]
def quadrature_scheme_softmaxnormal_gauss_hermite(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_softmaxnormal_quantiles`.
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with tf.name_scope(
name or "quadrature_scheme_softmaxnormal_gauss_hermite"):
normal_loc = tf.convert_to_tensor(value=normal_loc, name="normal_loc")
npdt = dtype_util.as_numpy_dtype(normal_loc.dtype)
normal_scale = tf.convert_to_tensor(
value=normal_scale, dtype=npdt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(npdt)
probs = probs.astype(npdt)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=npdt)
grid = softmax(
-distribution_util.pad(
(normal_loc[..., tf.newaxis] +
np.sqrt(2.) * normal_scale[..., tf.newaxis] * grid),
axis=-2,
front=True),
axis=-2) # shape: [B, components, deg]
return grid, probs
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use SoftmaxNormal quantiles to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with tf.name_scope(name or "softmax_normal_grid_and_probs"):
normal_loc = tf.convert_to_tensor(value=normal_loc, name="normal_loc")
dt = dtype_util.base_dtype(normal_loc.dtype)
normal_scale = tf.convert_to_tensor(
value=normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get rank(dist.batch_shape), statically if possible."""
ndims = tensorshape_util.rank(dist.batch_shape)
if ndims is None:
ndims = tf.shape(input=dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = tensorshape_util.with_rank_at_least(dist.batch_shape, 1)
num_components = tf.compat.dimension_value(bs[-1])
if num_components is not None:
num_components += 1
tail = tf.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = softmax_centered_bijector.SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
tensorshape_util.set_shape(
quantiles, _get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
tensorshape_util.set_shape(grid, _get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
class VectorDiffeomixture(distribution_lib.Distribution):
"""VectorDiffeomixture distribution.
A vector diffeomixture (VDM) is a distribution parameterized by a convex
combination of `K` component `loc` vectors, `loc[k], k = 0,...,K-1`, and `K`
`scale` matrices `scale[k], k = 0,..., K-1`. It approximates the following
[compound distribution]
(https://en.wikipedia.org/wiki/Compound_probability_distribution)
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
The integral `int p(x | z) p(z) dz` is approximated with a quadrature scheme
adapted to the mixture density `p(z)`. The `N` quadrature points `z_{N, n}`
and weights `w_{N, n}` (which are non-negative and sum to 1) are chosen
such that
```q_N(x) := sum_{n=1}^N w_{n, N} p(x | z_{N, n}) --> p(x)```
as `N --> infinity`.
Since `q_N(x)` is in fact a mixture (of `N` points), we may sample from
`q_N` exactly. It is important to note that the VDM is *defined* as `q_N`
above, and *not* `p(x)`. Therefore, sampling and pdf may be implemented as
exact (up to floating point error) methods.
A common choice for the conditional `p(x | z)` is a multivariate Normal.
The implemented marginal `p(z)` is the `SoftmaxNormal`, which is a
`K-1` dimensional Normal transformed by a `SoftmaxCentered` bijector, making
it a density on the `K`-simplex. That is,
```
Z = SoftmaxCentered(X),
X = Normal(mix_loc / temperature, 1 / temperature)
```
The default quadrature scheme chooses `z_{N, n}` as `N` midpoints of
the quantiles of `p(z)` (generalized quantiles if `K > 2`).
See [Dillon and Langmore (2018)][1] for more details.
#### About `Vector` distributions in TensorFlow.
The `VectorDiffeomixture` is a non-standard distribution that has properties
particularly useful in [variational Bayesian
methods](https://en.wikipedia.org/wiki/Variational_Bayesian_methods).
Conditioned on a draw from the SoftmaxNormal, `X|z` is a vector whose
components are linear combinations of affine transformations, thus is itself
an affine transformation.
Note: The marginals `X_1|v, ..., X_d|v` are *not* generally identical to some
parameterization of `distribution`. This is due to the fact that the sum of
draws from `distribution` are not generally itself the same `distribution`.
#### About `Diffeomixture`s and reparameterization.
The `VectorDiffeomixture` is designed to be reparameterized, i.e., its
parameters are only used to transform samples from a distribution which has no
trainable parameters. This property is important because backprop stops at
sources of stochasticity. That is, as long as the parameters are used *after*
the underlying source of stochasticity, the computed gradient is accurate.
Reparametrization means that we can use gradient-descent (via backprop) to
optimize Monte-Carlo objectives. Such objectives are a finite-sample
approximation of an expectation and arise throughout scientific computing.
WARNING: If you backprop through a VectorDiffeomixture sample and the "base"
distribution is both: not `FULLY_REPARAMETERIZED` and a function of trainable
variables, then the gradient is not guaranteed correct!
#### Examples
```python
tfd = tfp.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
# transformations involve:
# k=0: loc=zeros(dims) scale=LinearOperatorScaledIdentity
# k=1: loc=[2.]*dims scale=LinOpDiag
dims = 5
vdm = tfd.VectorDiffeomixture(
mix_loc=[[0.], [1]],
temperature=[1.],
distribution=tfd.Normal(loc=0., scale=1.),
loc=[
None, # Equivalent to `np.zeros(dims, dtype=np.float32)`.
np.float32([2.]*dims),
],
scale=[
tf.linalg.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
tf.linalg.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
```
#### References
[1]: Joshua Dillon and Ian Langmore. Quadrature Compound: An approximating
family of distributions. _arXiv preprint arXiv:1801.03080_, 2018.
https://arxiv.org/abs/1801.03080
"""
def __init__(self,
mix_loc,
temperature,
distribution,
loc=None,
scale=None,
quadrature_size=8,
quadrature_fn=quadrature_scheme_softmaxnormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="VectorDiffeomixture"):
"""Constructs the VectorDiffeomixture on `R^d`.
The vector diffeomixture (VDM) approximates the compound distribution
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
Args:
mix_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`.
In terms of samples, larger `mix_loc[..., k]` ==>
`Z` is more likely to put more weight on its `kth` component.
temperature: `float`-like `Tensor`. Broadcastable with `mix_loc`.
In terms of samples, smaller `temperature` means one component is more
likely to dominate. I.e., smaller `temperature` makes the VDM look more
like a standard mixture of `K` components.
distribution: `tfp.distributions.Distribution`-like instance. Distribution
from which `d` iid samples are used as input to the selected affine
transformation. Must be a scalar-batch, scalar-event distribution.
Typically `distribution.reparameterization_type = FULLY_REPARAMETERIZED`
or it is a function of non-trainable parameters. WARNING: If you
backprop through a VectorDiffeomixture sample and the `distribution`
is not `FULLY_REPARAMETERIZED` yet is a function of trainable variables,
then the gradient will be incorrect!
loc: Length-`K` list of `float`-type `Tensor`s. The `k`-th element
represents the `shift` used for the `k`-th affine transformation. If
the `k`-th item is `None`, `loc` is implicitly `0`. When specified,
must have shape `[B1, ..., Bb, d]` where `b >= 0` and `d` is the event
size.
scale: Length-`K` list of `LinearOperator`s. Each should be
positive-definite and operate on a `d`-dimensional vector space. The
`k`-th element represents the `scale` used for the `k`-th affine
transformation. `LinearOperator`s must have shape `[B1, ..., Bb, d, d]`,
`b >= 0`, i.e., characterizes `b`-batches of `d x d` matrices
quadrature_size: Python `int` scalar representing number of
quadrature points. Larger `quadrature_size` means `q_N(x)` better
approximates `p(x)`.
quadrature_fn: Python callable taking `normal_loc`, `normal_scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the SoftmaxNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_softmaxnormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if `not scale or len(scale) < 2`.
ValueError: if `len(loc) != len(scale)`
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
ValueError: if `validate_args` and any not scale.is_positive_definite.
TypeError: if any scale.dtype != scale[0].dtype.
TypeError: if any loc.dtype != scale[0].dtype.
NotImplementedError: if `len(scale) != 2`.
ValueError: if `not distribution.is_scalar_batch`.
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
"num_component >= 2.")
if loc is None:
loc = [None]*len(scale)
if len(loc) != len(scale):
raise ValueError("loc/scale must be same-length lists "
"(or same-length list-like objects).")
dtype = dtype_util.base_dtype(scale[0].dtype)
loc = [
tf.convert_to_tensor(value=loc_, dtype=dtype, name="loc{}".format(k))
if loc_ is not None else None for k, loc_ in enumerate(loc)
]
for k, scale_ in enumerate(scale):
if validate_args and not scale_.is_positive_definite:
raise ValueError("scale[{}].is_positive_definite = {} != True".format(
k, scale_.is_positive_definite))
if dtype_util.base_dtype(scale_.dtype) != dtype:
raise TypeError(
"dtype mismatch; scale[{}].base_dtype=\"{}\" != \"{}\"".format(
k, dtype_util.name(scale_.dtype), dtype_util.name(dtype)))
self._endpoint_affine = [
affine_linear_operator_bijector.AffineLinearOperator( # pylint: disable=g-complex-comprehension
shift=loc_, scale=scale_,
validate_args=validate_args,
name="endpoint_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(loc, scale))]
# TODO(jvdillon): Remove once we support k-mixtures.
# We make this assertion here because otherwise `grid` would need to be a
# vector not a scalar.
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
mix_loc = tf.convert_to_tensor(value=mix_loc, dtype=dtype, name="mix_loc")
temperature = tf.convert_to_tensor(
value=temperature, dtype=dtype, name="temperature")
self._grid, probs = tuple(quadrature_fn(
mix_loc / temperature,
1. / temperature,
quadrature_size,
validate_args))
# Note: by creating the logits as `log(prob)` we ensure that
# `self.mixture_distribution.logits` is equivalent to
# `math_ops.log(self.mixture_distribution.probs)`.
self._mixture_distribution = categorical.Categorical(
logits=tf.math.log(probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
self._grid = distribution_util.with_dependencies(
asserts, self._grid)
self._distribution = distribution
self._interpolated_affine = [
affine_linear_operator_bijector.AffineLinearOperator( # pylint: disable=g-complex-comprehension
shift=loc_, scale=scale_,
validate_args=validate_args,
name="interpolated_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(
interpolate_loc(self._grid, loc),
interpolate_scale(self._grid, scale)))]
[
self._batch_shape_,
self._batch_shape_tensor_,
self._event_shape_,
self._event_shape_tensor_,
] = determine_batch_event_shapes(self._grid,
self._endpoint_affine)
super(VectorDiffeomixture, self).__init__(
dtype=dtype,
# We hard-code `FULLY_REPARAMETERIZED` because when
# `validate_args=True` we verify that indeed
# `distribution.reparameterization_type == FULLY_REPARAMETERIZED`. A
# distribution which is a function of only non-trainable parameters
# also implies we can use `FULLY_REPARAMETERIZED`. However, we cannot
# easily test for that possibility thus we use `validate_args=False`
# as a "back-door" to allow users a way to use non
# `FULLY_REPARAMETERIZED` distribution. In such cases IT IS THE USERS
# RESPONSIBILITY to verify that the base distribution is a function of
# non-trainable parameters.
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
distribution._graph_parents # pylint: disable=protected-access
+ [loc_ for loc_ in loc if loc_ is not None] +
[p for scale_ in scale for p in scale_.graph_parents]), # pylint: disable=g-complex-comprehension
name=name)
@property
def mixture_distribution(self):
"""Distribution used to select a convex combination of affine transforms."""
return self._mixture_distribution
@property
def distribution(self):
"""Base scalar-event, scalar-batch distribution."""
return self._distribution
@property
def grid(self):
"""Grid of mixing probabilities, one for each grid point."""
return self._grid
@property
def endpoint_affine(self):
"""Affine transformation for each of `K` components."""
return self._endpoint_affine
@property
def interpolated_affine(self):
"""Affine transformation for each convex combination of `K` components."""
return self._interpolated_affine
def _batch_shape_tensor(self):
return self._batch_shape_tensor_
def _batch_shape(self):
return self._batch_shape_
def _event_shape_tensor(self):
return self._event_shape_tensor_
def _event_shape(self):
return self._event_shape_
def _sample_n(self, n, seed=None):
stream = seed_stream.SeedStream(seed, salt="VectorDiffeomixture")
x = self.distribution.sample(
sample_shape=concat_vectors(
[n],
self.batch_shape_tensor(),
self.event_shape_tensor()),
seed=stream()) # shape: [n, B, e]
x = [aff.forward(x) for aff in self.endpoint_affine]
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = tensorshape_util.num_elements(self.batch_shape)
if batch_size is None:
batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())
mix_batch_size = tensorshape_util.num_elements(
self.mixture_distribution.batch_shape)
if mix_batch_size is None:
mix_batch_size = tf.reduce_prod(
input_tensor=self.mixture_distribution.batch_shape_tensor())
ids = self.mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size // mix_batch_size])),
seed=stream())
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = tf.reshape(
ids,
shape=concat_vectors([n],
distribution_util.pick_vector(
self.is_scalar_batch(), np.int32([]),
np.int32([-1]))))
# Stride `components * quadrature_size` for `batch_size` number of times.
stride = tensorshape_util.num_elements(
tensorshape_util.with_rank_at_least(self.grid.shape, 2)[-2:])
if stride is None:
stride = tf.reduce_prod(input_tensor=tf.shape(input=self.grid)[-2:])
offset = tf.range(
start=0, limit=batch_size * stride, delta=stride, dtype=ids.dtype)
weight = tf.gather(tf.reshape(self.grid, shape=[-1]), ids + offset)
# At this point, weight flattened all batch dims into one.
# We also need to append a singleton to broadcast with event dims.
if tensorshape_util.is_fully_defined(self.batch_shape):
new_shape = [-1] + tensorshape_util.as_list(self.batch_shape) + [1]
else:
new_shape = tf.concat(([-1], self.batch_shape_tensor(), [1]), axis=0)
weight = tf.reshape(weight, shape=new_shape)
if len(x) != 2:
# We actually should have already triggered this exception. However as a
# policy we're putting this exception wherever we exploit the bimixture
# assumption.
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(x)))
# Alternatively:
# x = weight * x[0] + (1. - weight) * x[1]
x = weight * (x[0] - x[1]) + x[1]
return x
def _log_prob(self, x):
# By convention, we always put the grid points right-most.
y = tf.stack([aff.inverse(x) for aff in self.interpolated_affine], axis=-1)
log_prob = tf.reduce_sum(
input_tensor=self.distribution.log_prob(y), axis=-2)
# Because the affine transformation has a constant Jacobian, it is the case
# that `affine.fldj(x) = -affine.ildj(x)`. This is not true in general.
fldj = tf.stack(
[
aff.forward_log_det_jacobian(
x, event_ndims=tf.rank(self.event_shape_tensor()))
for aff in self.interpolated_affine
],
axis=-1)
return tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits - fldj + log_prob,
axis=-1)
def _mean(self):
p = self._expand_mix_distribution_probs()
m = self._expand_base_distribution_mean()
mean = None
for k, aff in enumerate(self.interpolated_affine):
# aff.forward is going to do this:
# y = tf.squeeze(aff.scale.matmul(m), axis=[-1])
# if aff.shift is not None:
# y += aff.shift
mean = add(mean, p[..., k] * aff.forward(m))
return mean
def _covariance(self):
# Law of total variance:
#
# Cov[Z] = E[Cov[Z | V]] + Cov[E[Z | V]]
#
# where,
#
# E[Cov[Z | V]] = sum_i mix_prob[i] Scale[i]
# Cov[E[Z | V]] = sum_i mix_prob[i] osquare(loc[i])
# - osquare(sum_i mix_prob[i] loc[i])
#
# osquare(x) = x.transpose @ x
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=False),
self._covariance_of_mean_given_quadrature_component(diag_only=False))
def _variance(self):
# Equivalent to: tf.diag_part(self._covariance()),
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=True),
self._covariance_of_mean_given_quadrature_component(diag_only=True))
def _mean_of_covariance_given_quadrature_component(self, diag_only):
p = self.mixture_distribution.probs_parameter()
# To compute E[Cov(Z|V)], we'll add matrices within three categories:
# scaled-identity, diagonal, and full. Then we'll combine these at the end.
scale_identity_multiplier = None
diag = None
full = None
for k, aff in enumerate(self.interpolated_affine):
s = aff.scale # Just in case aff.scale has side-effects, we'll call once.
if (s is None or isinstance(s, tf.linalg.LinearOperatorIdentity)):
scale_identity_multiplier = add(scale_identity_multiplier,
p[..., k, tf.newaxis])
elif isinstance(s, tf.linalg.LinearOperatorScaledIdentity):
scale_identity_multiplier = add(
scale_identity_multiplier,
(p[..., k, tf.newaxis] * tf.square(s.multiplier)))
elif isinstance(s, tf.linalg.LinearOperatorDiag):
diag = add(diag, (p[..., k, tf.newaxis] * tf.square(s.diag_part())))
else:
x = (
p[..., k, tf.newaxis, tf.newaxis] * s.matmul(
s.to_dense(), adjoint_arg=True))
if diag_only:
x = tf.linalg.diag_part(x)
full = add(full, x)
# We must now account for the fact that the base distribution might have a
# non-unity variance. Recall that, since X ~ iid Law(X_0),
# `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
# We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
# samples from a scalar-event distribution.
v = self.distribution.variance()
if scale_identity_multiplier is not None:
scale_identity_multiplier = scale_identity_multiplier * v
if diag is not None:
diag = diag * v[..., tf.newaxis]
if full is not None:
full = full * v[..., tf.newaxis]
if diag_only:
# Apparently we don't need the full matrix, just the diagonal.
r = add(diag, full)
if r is None and scale_identity_multiplier is not None:
ones = tf.ones(self.event_shape_tensor(), dtype=self.dtype)
return scale_identity_multiplier[..., tf.newaxis] * ones
return add(r, scale_identity_multiplier)
# `None` indicates we don't know if the result is positive-definite.
is_positive_definite = (True if all(aff.scale.is_positive_definite
for aff in self.endpoint_affine)
else None)
to_add = []
if diag is not None:
to_add.append(
tf.linalg.LinearOperatorDiag(
diag=diag, is_positive_definite=is_positive_definite))
if full is not None:
to_add.append(
tf.linalg.LinearOperatorFullMatrix(
matrix=full, is_positive_definite=is_positive_definite))
if scale_identity_multiplier is not None:
to_add.append(
tf.linalg.LinearOperatorScaledIdentity(
num_rows=self.event_shape_tensor()[0],
multiplier=scale_identity_multiplier,
is_positive_definite=is_positive_definite))
return (linop_add_lib.add_operators(to_add)[0].to_dense()
if to_add else None)
def _covariance_of_mean_given_quadrature_component(self, diag_only):
square = tf.square if diag_only else vec_osquare
p = self._expand_mix_distribution_probs()
if not diag_only:
p = p[..., tf.newaxis, :] # Assuming tensorshape_util.rank(event)=1.
m = self._expand_base_distribution_mean()
cov_e_z_given_v = None
e_z_given_v = self._mean()
for k, aff in enumerate(self.interpolated_affine):
y = aff.forward(m)
cov_e_z_given_v = add(cov_e_z_given_v,
p[..., k] * square(y - e_z_given_v))
return cov_e_z_given_v
def _expand_base_distribution_mean(self):
"""Ensures `self.distribution.mean()` has `[batch, event]` shape."""
single_draw_shape = concat_vectors(self.batch_shape_tensor(),
self.event_shape_tensor())
m = tf.reshape(
self.distribution.mean(), # A scalar.
shape=tf.ones_like(single_draw_shape, dtype=tf.int32))
m = tf.tile(m, multiples=single_draw_shape)
tensorshape_util.set_shape(
m, tensorshape_util.concatenate(self.batch_shape, self.event_shape))
return m
def _expand_mix_distribution_probs(self):
p = self.mixture_distribution.probs_parameter() # [B, deg]
deg = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(p.shape, 1)[-1])
if deg is None:
deg = tf.shape(input=p)[-1]
event_ndims = tensorshape_util.rank(self.event_shape)
if event_ndims is None:
event_ndims = tf.shape(input=self.event_shape_tensor())[0]
expand_shape = tf.concat(
[
self.mixture_distribution.batch_shape_tensor(),
tf.ones([event_ndims], dtype=tf.int32),
[deg],
],
axis=0)
return tf.reshape(p, shape=expand_shape)
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with tf.name_scope("check_" + name):
assertions = []
if tensorshape_util.rank(param.shape) is not None:
if tensorshape_util.rank(param.shape) == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, tensorshape_util.rank(param.shape)))
elif validate_args:
assertions.append(
assert_util.assert_rank_at_least(
param,
1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if tensorshape_util.with_rank_at_least(param.shape, 1)[-1] is not None:
if tf.compat.dimension_value(param.shape[-1]) != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name,
tf.compat.dimension_value(
param.shape[-1])))
elif validate_args:
assertions.append(
assert_util.assert_equal(
tf.shape(input=param)[-1],
1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return distribution_util.with_dependencies(assertions, param)
return param
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with tf.name_scope("determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = tf.shape(input=grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (tf.broadcast_static_shape(event_shape, shape),
tf.broadcast_dynamic_shape(event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = tf.broadcast_static_shape(batch_shape,
aff.shift.shape[:-1])
batch_shape_tensor = tf.broadcast_dynamic_shape(
batch_shape_tensor,
tf.shape(input=aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:],
tf.shape(input=aff.shift)[-1:])
if aff.scale is not None:
batch_shape = tf.broadcast_static_shape(batch_shape,
aff.scale.batch_shape)
batch_shape_tensor = tf.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tf.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[tf.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
def interpolate_loc(grid, loc):
"""Helper which interpolates between two locs."""
if len(loc) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(loc)))
deg = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(grid.shape, 1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with tf.name_scope("interpolate_loc"):
if loc is None or loc[0] is None and loc[1] is None:
return [None]*deg
# shape: [B, 1, k, deg]
w = grid[..., tf.newaxis, :, :]
loc = [
x[..., tf.newaxis] # shape: [B, e, 1]
if x is not None else None for x in loc
]
if loc[0] is None:
x = w[..., 1, :] * loc[1] # shape: [B, e, deg]
elif loc[1] is None:
x = w[..., 0, :] * loc[0] # shape: [B, e, deg]
else:
delta = loc[0] - loc[1]
x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg]
return [x[..., k] for k in range(deg)] # list(shape:[B, e])
def interpolate_scale(grid, scale):
"""Helper which interpolates between two scales."""
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
deg = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(grid.shape, 1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with tf.name_scope("interpolate_scale"):
return [linop_add_lib.add_operators([ # pylint: disable=g-complex-comprehension
linop_scale(grid[..., k, q], s)
for k, s in enumerate(scale)
])[0] for q in range(deg)]
def linop_scale(w, op):
"""Creates weighted `LinOp` from existing `LinOp`."""
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with tf.name_scope("linop_scale"):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, tf.linalg.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, tf.linalg.LinearOperatorDiag):
return tf.linalg.LinearOperatorDiag(
diag=w[..., tf.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, tf.linalg.LinearOperatorLowerTriangular):
return tf.linalg.LinearOperatorLowerTriangular(
tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [tf.get_static_value(x) for x in args]
if any(vec is None for vec in args_):
return tf.concat(args, axis=0)
return [val for vec in args_ for val in vec] # pylint: disable=g-complex-comprehension
def add(x, y):
"""Adds inputs; interprets `None` as zero."""
if x is None:
return y
if y is None:
return x
return x + y
def vec_osquare(x):
"""Computes the outer-product of a (batch of) vector, i.e., x.T x."""
return x[..., :, tf.newaxis] * x[..., tf.newaxis, :]
def softmax(x, axis, name=None):
"""Equivalent to tf.nn.softmax but works around b/70297725."""
with tf.name_scope(name or "softmax"):
x = tf.convert_to_tensor(value=x, name="x")
ndims = (
tensorshape_util.rank(x.shape)
if tensorshape_util.rank(x.shape) is not None else tf.rank(
x, name="ndims"))
axis = tf.convert_to_tensor(value=axis, dtype=tf.int32, name="axis")
axis_ = tf.get_static_value(axis)
if axis_ is not None:
axis = np.int(ndims + axis_ if axis_ < 0 else axis_)
else:
axis = tf1.where(axis < 0, ndims + axis, axis)
return tf.nn.softmax(x, axis=axis)
| 41.862423
| 128
| 0.658189
|
82201950d4ce249a659d7a43b3faedd9956a3c4f
| 574
|
py
|
Python
|
adventofcode2021/main.py
|
barthogenes/AdventOfCode2021
|
b54ed6dca83df48ede5efff8ed1e3b6d827eac94
|
[
"MIT"
] | null | null | null |
adventofcode2021/main.py
|
barthogenes/AdventOfCode2021
|
b54ed6dca83df48ede5efff8ed1e3b6d827eac94
|
[
"MIT"
] | null | null | null |
adventofcode2021/main.py
|
barthogenes/AdventOfCode2021
|
b54ed6dca83df48ede5efff8ed1e3b6d827eac94
|
[
"MIT"
] | null | null | null |
from dive import dive_part_1, dive_part_2
from get_puzzle_input import get_puzzle_input
from sonar_sweep import sonar_sweep_part_1, sonar_sweep_part_2
if __name__ == "__main__":
puzzle_input = get_puzzle_input(2021, 1)
print("Day 1: Sonar Sweep")
print(f"Part 1 answer = {sonar_sweep_part_1(puzzle_input)}")
print(f"Part 2 answer = {sonar_sweep_part_2(puzzle_input)}")
puzzle_input = get_puzzle_input(2021, 2)
print("Day 2: Dive!")
print(f"Part 1 answer = {dive_part_1(puzzle_input)}")
print(f"Part 2 answer = {dive_part_2(puzzle_input)}")
| 38.266667
| 64
| 0.740418
|
3c3cda57663be55e45eb15947104f25bd3b93659
| 487
|
py
|
Python
|
local.py
|
Alyetama/CarbonDate
|
576f53fa9809a61a896ecf9286f172fded1cb3bf
|
[
"MIT"
] | 69
|
2016-09-27T10:43:28.000Z
|
2022-03-01T22:24:07.000Z
|
local.py
|
Alyetama/CarbonDate
|
576f53fa9809a61a896ecf9286f172fded1cb3bf
|
[
"MIT"
] | 27
|
2016-10-05T20:32:04.000Z
|
2019-01-16T15:02:00.000Z
|
local.py
|
Alyetama/CarbonDate
|
576f53fa9809a61a896ecf9286f172fded1cb3bf
|
[
"MIT"
] | 12
|
2016-05-12T04:10:43.000Z
|
2021-12-13T23:42:28.000Z
|
#!/usr/bin/env python3
import json
import logging
def start(args, mod):
'''
Start local version of Carbon Tool
'''
result = {}
loglevel = logging.WARNING
if args.verbose:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel, format='%(message)s')
logger = logging.getLogger('local')
result['self'] = ""
results = mod.run(args=args, resultDict=result,
logger=logger)
print(json.dumps(results, indent=4))
| 23.190476
| 61
| 0.622177
|
88c91722e0b11da7f83c8e07a5f9db9f40724eb8
| 2,406
|
py
|
Python
|
entertainment_center.py
|
angnaerxisen/Udacity
|
fb2c9ddb16582ec36ec7f400c8b0c1530c769bbc
|
[
"Apache-2.0"
] | null | null | null |
entertainment_center.py
|
angnaerxisen/Udacity
|
fb2c9ddb16582ec36ec7f400c8b0c1530c769bbc
|
[
"Apache-2.0"
] | null | null | null |
entertainment_center.py
|
angnaerxisen/Udacity
|
fb2c9ddb16582ec36ec7f400c8b0c1530c769bbc
|
[
"Apache-2.0"
] | null | null | null |
import fresh_tomatoes
import media
# Dead pool the story line ,poster and treailer find on IMDB ,cool websites
dead_pool = media.Movie(
"Dead Pool",
"A fast-talking mercenary with a morbid sense of humor is subjected to\
a rogue experiment that leaves him with accelerated healing powers and \
a quest for revenge.",
# naqa
"http://bit.ly/2m5Kw07",
"https://www.youtube.com/watch?v=17CUxUduw-g"
)
# Shawshank redepemtion
Shawshank_redepemtion = media.Movie(
"Shawshank redepemtion",
"Two imprisoned men bond over a number of years,\
finding solace and eventual redemption\
through acts of common decency.",
# naqa
"http://bit.ly/2e2Yn28",
"https://www.youtube.com/watch?v=6hB3S9bIaco"
)
# batman VS superman
Batman_VS_Superman = media.Movie(
"Batman VS Superman",
"Two DC superhero fight on the City",
# naqa
"http://bit.ly/2hvTqP9",
"https://www.youtube.com/watch?v=0WWzgGyAH6Y"
)
# killsing Bills Vol 2
Killing_Bills_Vol_2 = media.Movie(
"Killing Bills Vol 2",
"A Woman's revenge her boss",
# napa
"http://bit.ly/2lnggKh",
"https://www.youtube.com/watch?v=WTt8cCIvGYI"
)
# Zootopia
Zootopia = media.Movie("Zootopia",
"In a city of anthropomorphic animals,\
a rookie bunny cop and a cynical con artist \
fox must work together to uncover a conspiracy.",
# naqa
"http://bit.ly/2m5Nphr",
"https://www.youtube.com/watch?v=yCOPJi0Urq4"
)
# Account
The_Accountant = media.Movie("The Accountant",
"As a math savant uncooks the books for a new client, \
the Treasury Department closes in on his activities \
and the body count starts to rise.",
# napa
"http://bit.ly/2mr9QPd",
"http://www.imdb.com/video/imdb/vi2433726233?playlistId=tt2140479&ref_=tt_ov_vi"
)
movies = [dead_pool, Shawshank_redepemtion, Batman_VS_Superman,
Killing_Bills_Vol_2, Zootopia, The_Accountant]
fresh_tomatoes.open_movies_page(movies)
print(media, movie, valid_ratings)
| 33.416667
| 110
| 0.583541
|
c6dccbd1259492fa15f2d9fc161aaef790c4e61a
| 10,594
|
py
|
Python
|
synthetic/orbitreducer.py
|
adrn/GalaxySynth
|
784e99706b6ca9b4d9402da3a453d0066fa76e8b
|
[
"MIT"
] | 1
|
2015-04-11T01:03:33.000Z
|
2015-04-11T01:03:33.000Z
|
synthetic/orbitreducer.py
|
adrn/GalaxySynth
|
784e99706b6ca9b4d9402da3a453d0066fa76e8b
|
[
"MIT"
] | null | null | null |
synthetic/orbitreducer.py
|
adrn/GalaxySynth
|
784e99706b6ca9b4d9402da3a453d0066fa76e8b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
""" Turn a collection of orbits into something we can make into music. """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import gary.dynamics as gd
import numpy as np
from scipy.signal import argrelmin, argrelmax
__all__ = ['cyl_orbit_to_events', 'cyl_orbit_to_events2', 'xyz_orbit_to_events',
'halo_orbit_to_events', 'elliptical_orbit_to_events', 'elliptical_orbit_to_events2']
def quantize(x, nbins, min=None, max=None):
if min is None:
min = x.min()
if max is None:
max = x.max()
if max > min:
q = np.round((x - min) / (max - min) * (nbins-1)).astype(int)
q[x > max] = nbins-1
q[x < min] = 0
else:
max = -max
min = -min
x = -x.copy()
q = np.round((x - min) / (max - min) * (nbins-1)).astype(int)
q[x > max] = nbins-1
q[x < min] = 0
return q
def cyl_orbit_to_events(t, w, midi_pool_hi, midi_pool_lo, time_resolution=None):
"""
Convert an orbit to MIDI events using cylindrical coordinates and rules.
For cylindrical orbits, crossing the disk midplane (x-y plane) triggers a
high note. Crossing the x-z plane triggers a low note. The pitch of the note
is set by the cylindrical radius at the time of either crossing. Smaller
radius triggers a higher pitch note.
Parameters
----------
t : array_like
w : array_like
midi_pool : array_like
"""
R = np.sqrt(w[:,:,0]**2 + w[:,:,1]**2)
phi = np.arctan2(w[:,:,1], w[:,:,0]) % (2*np.pi)
z = w[:,:,2]
# set amplitudes from size of z oscillations
all_amps = np.abs(z).max(axis=0) / 10.
# variable length arrays
phi_cross = np.array([argrelmin(pphi)[0] for pphi in phi.T])
z_cross = np.array([argrelmin(zz**2)[0] for zz in z.T])
# quantize R orbit
RR = np.sqrt(R)
nbins_hi = len(midi_pool_hi)
q_R_hi = quantize(RR, nbins=nbins_hi, min=RR.max(), max=RR.min())
nbins_lo = len(midi_pool_lo)
q_R_lo = quantize(RR, nbins=nbins_lo, min=RR.max(), max=RR.min())
delays = []
notes = []
amps = []
for j in range(w.shape[0]):
_no = []
_amps = []
for i in range(w.shape[1]):
if j in z_cross[i]:
_no.append(midi_pool_hi[q_R_hi[j,i]])
_amps.append(all_amps[i])
if j in phi_cross[i]:
_no.append(midi_pool_lo[q_R_lo[j,i]])
_amps.append(all_amps[i])
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
amps.append(_amps)
delays = np.array(delays)
notes = np.array(notes)
amps = np.array(amps)
return delays, notes, amps
# if time_resolution is None:
# return delays, notes
# new_delays = []
# new_notes = []
# q_delays = quantize(delays, nbins=int(delays.max()/time_resolution))
# for xx in np.unique(q_delays):
# ix = q_delays == xx
# new_delays.append(delays[ix][0])
# new_notes.append([item for sublist in notes[ix] for item in sublist])
# return np.array(new_delays), np.array(new_notes)
def cyl_orbit_to_events2(t, w, midi_pool_hi, midi_pool_lo):
"""
Convert an orbit to MIDI events using cylindrical coordinates and rules.
For cylindrical orbits, crossing the disk midplane (x-y plane) triggers a
high note with pitch set by the vertical oscillation frequency. Crossing
the x-z plane triggers a low note with pitch set by the azimuthal frequency.
The radial oscillations modulate the volume of the note.
Parameters
----------
t : array_like
w : array_like
"""
ntimes,norbits,_ = w.shape
R = np.sqrt(w[:,:,0]**2 + w[:,:,1]**2)
phi = np.arctan2(w[:,:,1], w[:,:,0]) % (2*np.pi)
z = w[:,:,2]
# normalized R for oscillations
normed_R = (R - R.min()) / (R.max() - R.min())
# variable length arrays
phi_cross = np.array([argrelmin(pphi)[0] for pphi in phi.T])
z_cross = np.array([argrelmin(zz**2)[0] for zz in z.T])
# estimate periods
T_z = np.array([gd.peak_to_peak_period(t, z[:,i]) for i in range(norbits)])
T_phi = np.array([gd.peak_to_peak_period(t, phi[:,i]) for i in range(norbits)])
# quantize the periods and map on to notes
# q_z = quantize(T_z, nbins=len(midi_pool_hi), min=T_z.max(), max=T_z.min())
# q_phi = quantize(T_phi, nbins=len(midi_pool_lo), min=T_phi.max(), max=T_phi.min())
q_z = quantize(T_z, nbins=len(midi_pool_hi), min=120., max=30.) # TOTAL HACk
q_phi = quantize(T_phi, nbins=len(midi_pool_lo), min=350., max=50.) # TOTAL HACk
delays = []
notes = []
Rphase = []
for j in range(w.shape[0]):
_no = []
_ph = []
for i in range(w.shape[1]):
if j in z_cross[i]:
_no.append(midi_pool_hi[q_z[i]])
_ph.append(normed_R[j,i])
if j in phi_cross[i]:
_no.append(midi_pool_lo[q_phi[i]])
_ph.append(1.)
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
Rphase.append(_ph)
delays = np.array(delays)
notes = np.array(notes)
Rphase = np.array(Rphase)
return delays, notes, Rphase
def xyz_orbit_to_events(t, w, midi_pool):
"""
Convert an orbit to MIDI events using Cartesian coordinates and rules.
For Cartesian orbits...
Parameters
----------
t : array_like
w : array_like
midi_pool : array_like
"""
x,y,z = w.T[:3]
r = np.sqrt(x**2 + y**2 + z**2)
phi = np.arctan2(y,x)
theta = np.arccos(z/r)
# variable length arrays
per = np.array([argrelmin(rr)[0] for rr in r])
apo = np.array([argrelmax(rr)[0] for rr in r])
# quantize the periods and map on to notes
q_theta = quantize(theta, nbins=len(midi_pool))
q_phi = quantize(phi, nbins=len(midi_pool))
delays = []
notes = []
for j in range(w.shape[0]):
_no = []
for i in range(w.shape[1]):
# if j in per[i]:
# _no.append(midi_pool[q_theta[i,j]])
if j in apo[i]:
_no.append(midi_pool[q_phi[i,j]])
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
delays = np.array(delays)
notes = np.array(notes)
return delays, notes
def halo_orbit_to_events(t, w, midi_pool):
"""
Convert an orbit to MIDI events using Cartesian coordinates and rules.
For Cartesian orbits...
Parameters
----------
t : array_like
w : array_like
midi_pool : array_like
"""
x,y,z = w.T[:3]
r = np.sqrt(x**2 + y**2 + z**2)
# quantize the periods and map on to notes
x_cross = np.array([argrelmin(xx**2)[0] for xx in x])
y_cross = np.array([argrelmin(yy**2)[0] for yy in y])
z_cross = np.array([argrelmin(zz**2)[0] for zz in z])
q_r = quantize(np.sqrt(r), nbins=len(midi_pool),
max=np.sqrt(r).min(), min=np.sqrt(r).max())
delays = []
notes = []
for j in range(w.shape[0]):
_no = []
for i in range(w.shape[1]):
if j in x_cross[i] or j in y_cross[i] or j in z_cross[i]:
_no.append(midi_pool[q_r[i,j]])
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
delays = np.array(delays)
notes = np.array(notes)
return delays, notes
def elliptical_orbit_to_events(t, w):
"""
Convert an orbit to MIDI events using Cartesian coordinates and rules.
Parameters
----------
t : array_like
w : array_like
midi_pool : array_like
"""
loop = gd.classify_orbit(w)
# apocenters
x,y,z = w.T[:3]
r = np.sqrt(x**2 + y**2 + z**2)
apo = np.array([argrelmax(rr)[0] for rr in r])
# get periods
periods = []
for i in range(w.shape[1]):
if np.any(loop[i] == 1):
w2 = gd.align_circulation_with_z(w[:,i], loop[i])
R = np.sqrt(w2[:,0]**2 + w2[:,1]**2)
phi = np.arctan2(w2[:,1], w2[:,0]) % (2*np.pi)
z = w2[:,2]
# loop
T1 = gd.peak_to_peak_period(t, R)
T2 = gd.peak_to_peak_period(t, phi)
T3 = gd.peak_to_peak_period(t, z)
else:
# box
T1 = gd.peak_to_peak_period(t, w[:,i,0])
T2 = gd.peak_to_peak_period(t, w[:,i,1])
T3 = gd.peak_to_peak_period(t, w[:,i,2])
periods.append([T1,T2,T3])
freqs = (2*np.pi / np.array(periods)) * 10000.
delays = []
notes = []
for j in range(w.shape[0]):
_no = []
for i in range(w.shape[1]):
if j in apo[i]:
_no.append(freqs[i].tolist())
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
delays = np.array(delays)
notes = np.array(notes)
return delays, notes
def elliptical_orbit_to_events2(t, w, x_pool, y_pool, z_pool):
"""
Convert an orbit to MIDI events using Cartesian coordinates and rules.
For Cartesian orbits...
Parameters
----------
t : array_like
w : array_like
midi_pool : array_like
"""
x,y,z = w.T[:3]
# quantize the periods and map on to notes
x_cross = np.array([argrelmin(xx**2)[0] for xx in x])
y_cross = np.array([argrelmin(yy**2)[0] for yy in y])
z_cross = np.array([argrelmin(zz**2)[0] for zz in z])
r_x = np.sqrt(y**2 + z**2)
r_y = np.sqrt(x**2 + z**2)
r_z = np.sqrt(x**2 + y**2)
q_r_x = quantize(np.sqrt(r_x), nbins=len(x_pool),
max=np.sqrt(r_x).min(), min=np.sqrt(r_x).max())
q_r_y = quantize(np.sqrt(r_y), nbins=len(y_pool),
max=np.sqrt(r_y).min(), min=np.sqrt(r_y).max())
q_r_z = quantize(np.sqrt(r_z), nbins=len(z_pool),
max=np.sqrt(r_z).min(), min=np.sqrt(r_z).max())
delays = []
notes = []
for j in range(w.shape[0]):
_no = []
for i in range(w.shape[1]):
if j in x_cross[i]:
_no.append(x_pool[q_r_x[i,j]])
if j in y_cross[i]:
_no.append(y_pool[q_r_y[i,j]])
if j in z_cross[i]:
_no.append(z_pool[q_r_z[i,j]])
if len(_no) > 0:
delays.append(t[j])
notes.append(np.unique(_no).tolist())
delays = np.array(delays)
notes = np.array(notes)
return delays, notes
| 27.878947
| 95
| 0.560695
|
ee298c6bbf583e839324553e23380bd69afee7f1
| 41
|
py
|
Python
|
music/__init__.py
|
dkassen/music
|
0a356dcf9891fd1d5a2f07150868c06905de5ef4
|
[
"MIT"
] | null | null | null |
music/__init__.py
|
dkassen/music
|
0a356dcf9891fd1d5a2f07150868c06905de5ef4
|
[
"MIT"
] | null | null | null |
music/__init__.py
|
dkassen/music
|
0a356dcf9891fd1d5a2f07150868c06905de5ef4
|
[
"MIT"
] | null | null | null |
from models.accidentals import Accidental
| 41
| 41
| 0.902439
|
b364bc8a4ae53249b86e3496c38c0799a9b766d1
| 2,939
|
py
|
Python
|
fine-tune/evaluation/eval_gen.py
|
muyeby/AMRBART
|
8f52f9d6a23dcf4132d2cc1272d6605ebc78a16f
|
[
"MIT"
] | 14
|
2022-03-16T02:36:30.000Z
|
2022-03-26T07:46:18.000Z
|
fine-tune/evaluation/eval_gen.py
|
muyeby/AMRBART
|
8f52f9d6a23dcf4132d2cc1272d6605ebc78a16f
|
[
"MIT"
] | 2
|
2022-03-24T12:45:27.000Z
|
2022-03-30T03:25:15.000Z
|
fine-tune/evaluation/eval_gen.py
|
muyeby/AMRBART
|
8f52f9d6a23dcf4132d2cc1272d6605ebc78a16f
|
[
"MIT"
] | 2
|
2022-03-24T06:12:19.000Z
|
2022-03-31T10:44:57.000Z
|
import sys
import os
import argparse
from typing import Iterable, Optional
import datasets
import re
def argument_parser():
parser = argparse.ArgumentParser(description="Preprocess AMR data")
# Multiple input parameters
parser.add_argument("--in-tokens", help="input tokens", required=True, type=str)
parser.add_argument("--in-reference-tokens", help="refrence tokens to compute metric", type=str)
args = parser.parse_args()
return args
def tokenize_sentence(text, debug=False):
text = re.sub(r"('ll|n't|'m|'s|'d|'re)", r" \1", text)
text = re.sub(r"(\s+)", r" ", text)
return text
def raw_corpus_bleu(
hypothesis: Iterable[str], reference: Iterable[str], offset: Optional[float] = 0.01
) -> float:
bleu = datasets.load_metric("bleu")
hypothesis = [itm.strip().split() for itm in hypothesis]
reference = [[itm.strip().split()] for itm in reference]
res = bleu.compute(predictions=hypothesis, references=reference)
return res
def raw_corpus_chrf(hypotheses: Iterable[str], references: Iterable[str]) -> float:
chrf = datasets.load_metric("chrf")
hypotheses = [itm.strip() for itm in hypotheses]
references = [[itm.strip()] for itm in references]
res = chrf.compute(predictions=hypotheses, references=references)
return res
def raw_corpus_meteor(hypotheses: Iterable[str], references: Iterable[str]):
hypotheses = [itm.strip() for itm in hypotheses]
references = [[itm.strip()] for itm in references]
meteor = datasets.load_metric("meteor")
res = meteor.compute(predictions=hypotheses, references=references)
return res
def raw_corpus_bleurt(hypotheses: Iterable[str], references: Iterable[str]):
hypotheses = [itm.strip() for itm in hypotheses]
references = [itm.strip() for itm in references]
bleurt = datasets.load_metric("bleurt", 'bleurt-base-512')
res = bleurt.compute(predictions=hypotheses, references=references)
return res
def read_tokens(in_tokens_file):
with open(in_tokens_file) as fid:
lines = fid.readlines()
return lines
if __name__ == "__main__":
# Argument handlig
args = argument_parser()
# read files
ref = read_tokens(args.in_reference_tokens)
hyp = read_tokens(args.in_tokens)
# Lower evaluation
for i in range(len(ref)):
ref[i] = ref[i].lower()
# Lower case output
for i in range(len(hyp)):
if "<generate>" in hyp[i]:
hyp[i] = hyp[i].split("<generate>")[-1]
hyp[i] = tokenize_sentence(hyp[i].lower())
# results
bleu = raw_corpus_bleu(hyp, ref)
print("BLEU {}".format(bleu))
chrFpp = raw_corpus_chrf(hyp, ref)
print("chrF++ {}".format(chrFpp))
#meteor = raw_corpus_meteor(hyp, ref)
#print("meteor {}".format(meteor))
#bleurt = raw_corpus_bleurt(hyp, ref)
#b_res = sum(bleurt["scores"]) / len(bleurt["scores"])
#print("bleurt {}".format(b_res))
| 29.686869
| 100
| 0.674039
|
569958d364ae0330878f0255f2506ef39c0b709c
| 2,172
|
py
|
Python
|
src/sage/categories/ring_ideals.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | 10
|
2018-06-01T21:54:53.000Z
|
2022-03-14T20:11:34.000Z
|
src/sage/categories/ring_ideals.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | 2
|
2021-04-02T20:43:29.000Z
|
2021-04-05T23:38:58.000Z
|
src/sage/categories/ring_ideals.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | 15
|
2020-07-23T10:46:25.000Z
|
2022-01-25T15:37:24.000Z
|
r"""
Ring ideals
"""
#*****************************************************************************
# Copyright (C) 2005 David Kohel <kohel@maths.usyd.edu>
# William Stein <wstein@math.ucsd.edu>
# 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from .category_types import Category_ideal
from .modules import Modules
from sage.categories.rings import Rings
_Rings = Rings()
class RingIdeals(Category_ideal):
"""
The category of two-sided ideals in a fixed ring.
EXAMPLES::
sage: Ideals(Integers(200))
Category of ring ideals in Ring of integers modulo 200
sage: C = Ideals(IntegerRing()); C
Category of ring ideals in Integer Ring
sage: I = C([8,12,18])
sage: I
Principal ideal (2) of Integer Ring
See also: :class:`CommutativeRingIdeals`.
.. TODO::
- If useful, implement ``RingLeftIdeals`` and ``RingRightIdeals``
of which ``RingIdeals`` would be a subcategory.
- Make ``RingIdeals(R)``, return ``CommutativeRingIdeals(R)``
when ``R`` is commutative.
"""
def __init__(self, R):
"""
EXAMPLES::
sage: RingIdeals(ZZ)
Category of ring ideals in Integer Ring
sage: RingIdeals(3)
Traceback (most recent call last):
...
TypeError: R (=3) must be a ring
TESTS::
sage: TestSuite(RingIdeals(ZZ)).run()
"""
if not R in _Rings:
raise TypeError("R (=%s) must be a ring"%R)
Category_ideal.__init__(self, R)
def super_categories(self):
"""
EXAMPLES::
sage: RingIdeals(ZZ).super_categories()
[Category of modules over Integer Ring]
sage: RingIdeals(QQ).super_categories()
[Category of vector spaces over Rational Field]
"""
R = self.ring()
return [Modules(R)]
| 30.166667
| 79
| 0.533149
|
9161fd3f0a514f37a97cfd735eafbe5ae6ff4b84
| 5,956
|
py
|
Python
|
src/data/account.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | 4
|
2020-05-16T21:58:55.000Z
|
2020-08-29T11:17:31.000Z
|
src/data/account.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | 1
|
2020-12-15T11:22:32.000Z
|
2020-12-15T11:22:32.000Z
|
src/data/account.py
|
vincent-lg/talismud
|
645bdae3d2e71cde51a25fe48c8f1bde15319631
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Account entity.
An account plays the role of an intermediate between a connection
from a player (a session object), and one or more characters. The
account contains the username, hashed password and email address of
the player. One or more characters can be gathered in the same account.
Even if you plan to have only one character per account, this separation
is useful (no need to put passwords in characters, after all).
"""
from datetime import datetime
import hashlib
import os
import pickle
import typing as ty
from pony.orm import Optional, Required, Set
from data.base import db, PicklableEntity
from data.decorators import lazy_property
from data.handlers import BlueprintHandler, OptionHandler
import settings
class Account(PicklableEntity, db.Entity):
"""Account entity, to connect a session to characters."""
username = Required(str, max_len=64, unique=True)
hashed_password = Required(bytes)
email = Optional(str, max_len=320, unique=True, index=True)
created_on = Required(datetime, default=datetime.utcnow)
updated_on = Required(datetime, default=datetime.utcnow)
sessions = Set("Session")
web_sessions = Set("WebSession")
players = Set("Player")
binary_options = Required(bytes, default=pickle.dumps({}))
@lazy_property
def blueprints(self):
return BlueprintHandler(self)
@lazy_property
def options(self):
"""Return the session option handler."""
return OptionHandler(self)
def before_update(self):
"""Change the 'updated_on' datetime."""
self.updated_on = datetime.utcnow()
def is_correct_password(self, password: str) -> bool:
"""
Return whether the given password is correct for this account.
Args:
password (str): the plain text password.
Returns:
correct (bool): whether this password is correct.
"""
return self.test_password(self.hashed_password, password)
def change_password(self, new_password: str):
"""
Change the account password, hashing it.
Args:
new_password (str): the new account password (not hashed).
"""
self.hashed_password = self.hash_password(new_password)
@classmethod
def create_with_password(cls, username: str, plain_password: str,
email: ty.Optional[str]) -> "Account":
"""
Create a new account object and hash its plain password.
Passwords aren't stored in clear in the database. This method
will hash the password according to settings and will create and
return an account object with this hashed password.
Args:
username (str): the username.
plain_password (str): the password (in plain text).
email (str, optional): the optional email address.
Returns:
new_account (Account): the new account with a hashed password.
"""
password = cls.hash_password(plain_password)
return cls(username=username, hashed_password=password, email=email)
@staticmethod
def hash_password(plain_password: str,
salt: ty.Optional[bytes] = None) -> bytes:
"""
Hash the given plain password, return it hashed.
If the salt is provided, it is used for hashing. If not,
it is randomly generated.
Args:
plain_password (str): the plain password.
salt (bytes, optional): the salt to use to hash the password.
Args:
hashed_password (bytes): the hashed passowrd containing
the salt and key.
"""
if salt is None:
# Generate a random salt
salt = os.urandom(settings.SALT_SIZE)
# Hash the password with pbkdf2_hmac
key = hashlib.pbkdf2_hmac(settings.HASH_ALGORITHM,
plain_password.encode("utf-8"), salt,
settings.HASH_ITERATIONS, settings.KEY_SIZE)
return salt + key
@staticmethod
def test_password(hashed_password: bytes, plain_password: str) -> bool:
"""Return whether the hashed and non hashed password match."""
salt = hashed_password[:settings.SALT_SIZE]
key = hashed_password[settings.SALT_SIZE:]
hashed_attempt = Account.hash_password(plain_password, salt)
return hashed_password == hashed_attempt
| 36.765432
| 78
| 0.695601
|
cd879934339a82d66a76c1d28a53db05e5460943
| 900
|
py
|
Python
|
tests/plugins/hold_htlcs.py
|
mosqueiro/lightning
|
793a25a0e5ee0c3c80d1403d556a4a013ee233fd
|
[
"MIT"
] | 2
|
2019-10-03T16:52:59.000Z
|
2019-10-04T14:35:16.000Z
|
tests/plugins/hold_htlcs.py
|
mosqueiro/lightning
|
793a25a0e5ee0c3c80d1403d556a4a013ee233fd
|
[
"MIT"
] | 1
|
2019-09-18T23:38:38.000Z
|
2019-09-18T23:38:38.000Z
|
tests/plugins/hold_htlcs.py
|
jtimon/lightning
|
61383408a45960bf7fd045fc420c95478de699c0
|
[
"MIT"
] | 2
|
2020-01-11T13:14:39.000Z
|
2021-05-10T14:10:36.000Z
|
#!/usr/bin/env python3
"""Plugin that holds on to HTLCs for 10 seconds.
Used to test restarts / crashes while HTLCs were accepted, but not yet
settled/forwarded/
"""
from lightning import Plugin
import json
import os
import tempfile
import time
plugin = Plugin()
@plugin.hook("htlc_accepted")
def on_htlc_accepted(htlc, onion, plugin, **kwargs):
# Stash the onion so the test can check it
fname = os.path.join(tempfile.mkdtemp(), "onion.json")
with open(fname, 'w') as f:
f.write(json.dumps(onion))
plugin.log("Holding onto an incoming htlc for 10 seconds")
time.sleep(10)
print("Onion written to {}".format(fname))
# Give the tester something to look for
plugin.log("htlc_accepted hook called")
return {'result': 'continue'}
@plugin.init()
def init(options, configuration, plugin):
plugin.log("hold_htlcs.py initializing")
plugin.run()
| 20.930233
| 70
| 0.698889
|
b2becbfb76f5d22b95737f1c020cdb78f56491bd
| 1,200
|
py
|
Python
|
fedlearner/common/hooks.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 772
|
2020-01-21T13:59:42.000Z
|
2022-03-30T08:20:16.000Z
|
fedlearner/common/hooks.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 126
|
2020-03-03T07:54:39.000Z
|
2022-03-08T23:24:03.000Z
|
fedlearner/common/hooks.py
|
Hsy-Intel/fedlearner
|
d5d0bb5549e115eaf0dec5a00a78dcb21ac0909d
|
[
"Apache-2.0"
] | 198
|
2020-01-22T02:16:17.000Z
|
2022-03-31T01:13:05.000Z
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import importlib
from typing import Any
def parse_and_call_fn(module_fn_path: str) -> Any:
if module_fn_path.find(':') == -1:
raise RuntimeError(f'Invalid module_fn_path: {module_fn_path}')
module_path, func_name = module_fn_path.split(':')
module = importlib.import_module(module_path)
# Dynamically run the function
return getattr(module, func_name)()
def pre_start_hook() -> Any:
before_hook_path = os.getenv('PRE_START_HOOK', None)
if before_hook_path:
return parse_and_call_fn(before_hook_path)
return None
| 33.333333
| 74
| 0.743333
|
79affd92ba3ca5b74d1bde5149d5507ee187a5a3
| 933
|
py
|
Python
|
file_syncer/constants.py
|
Kami/python-file-syncer
|
4ce8e771653324388d9b0ff5c3b1040089d58525
|
[
"Apache-2.0"
] | 5
|
2015-01-10T18:22:54.000Z
|
2021-07-11T00:11:31.000Z
|
file_syncer/constants.py
|
ogrisel/python-file-syncer
|
4bab4c9a046b61de1b9631a0aa4d3513bc872abe
|
[
"Apache-2.0"
] | null | null | null |
file_syncer/constants.py
|
ogrisel/python-file-syncer
|
4bab4c9a046b61de1b9631a0aa4d3513bc872abe
|
[
"Apache-2.0"
] | 4
|
2016-08-20T04:44:28.000Z
|
2021-11-05T05:38:49.000Z
|
# Licensed to Tomaz Muraus under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Tomaz muraus licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'VALID_LOG_LEVELS',
'MANIFEST_FILE'
]
VALID_LOG_LEVELS = ['DEBUG', 'ERROR', 'FATAL', 'CRITICAL', 'INFO', 'WARNING']
MANIFEST_FILE = 'manifest.json'
| 38.875
| 78
| 0.754555
|
a7f462909a12b385e0135018764efe9d121113e3
| 17,705
|
py
|
Python
|
tests/labeling/text_classification/test_label_models.py
|
InquestGeronimo/rubrix
|
04b6e619c900cc67f79020197d7ef242501b360e
|
[
"Apache-2.0"
] | 888
|
2021-03-26T20:39:21.000Z
|
2022-03-31T18:09:37.000Z
|
tests/labeling/text_classification/test_label_models.py
|
InquestGeronimo/rubrix
|
04b6e619c900cc67f79020197d7ef242501b360e
|
[
"Apache-2.0"
] | 805
|
2021-04-29T09:24:44.000Z
|
2022-03-31T18:29:34.000Z
|
tests/labeling/text_classification/test_label_models.py
|
InquestGeronimo/rubrix
|
04b6e619c900cc67f79020197d7ef242501b360e
|
[
"Apache-2.0"
] | 72
|
2021-04-30T08:53:36.000Z
|
2022-03-31T10:48:33.000Z
|
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import pytest
from rubrix import TextClassificationRecord
from rubrix.labeling.text_classification import FlyingSquid, Snorkel, WeakLabels
from rubrix.labeling.text_classification.label_models import (
LabelModel,
MissingAnnotationError,
NotFittedError,
TieBreakPolicy,
TooFewRulesError,
)
@pytest.fixture
def weak_labels(monkeypatch):
def mock_load(*args, **kwargs):
return [TextClassificationRecord(inputs="test", id=i) for i in range(4)]
monkeypatch.setattr(
"rubrix.labeling.text_classification.weak_labels.load", mock_load
)
def mock_apply(self, *args, **kwargs):
weak_label_matrix = np.array(
[[0, 1, -1], [2, 0, -1], [-1, -1, -1], [0, 2, 2]],
dtype=np.short,
)
annotation_array = np.array([0, 1, -1, 2], dtype=np.short)
label2int = {None: -1, "negative": 0, "positive": 1, "neutral": 2}
return weak_label_matrix, annotation_array, label2int
monkeypatch.setattr(WeakLabels, "_apply_rules", mock_apply)
return WeakLabels(rules=[lambda: None] * 3, dataset="mock")
@pytest.fixture
def weak_labels_from_guide(monkeypatch, resources):
matrix_and_annotation = np.load(
str(resources / "weak-supervision-guide-matrix.npy")
)
matrix, annotation = matrix_and_annotation[:, :-1], matrix_and_annotation[:, -1]
def mock_load(*args, **kwargs):
return [
TextClassificationRecord(inputs="mock", id=i) for i in range(len(matrix))
]
monkeypatch.setattr(
"rubrix.labeling.text_classification.weak_labels.load", mock_load
)
def mock_apply(self, *args, **kwargs):
return matrix, annotation, {None: -1, "SPAM": 0, "HAM": 1}
monkeypatch.setattr(WeakLabels, "_apply_rules", mock_apply)
return WeakLabels(rules=[lambda x: "mock"] * matrix.shape[1], dataset="mock")
def test_tie_break_policy_enum():
with pytest.raises(ValueError, match="mock is not a valid TieBreakPolicy"):
TieBreakPolicy("mock")
class TestLabelModel:
def test_weak_label_property(self):
weak_labels = object()
label_model = LabelModel(weak_labels)
assert label_model.weak_labels is weak_labels
def test_abstract_methods(self):
label_model = LabelModel(None)
with pytest.raises(NotImplementedError):
label_model.fit()
with pytest.raises(NotImplementedError):
label_model.score()
with pytest.raises(NotImplementedError):
label_model.predict()
class TestSnorkel:
def test_not_installed(self, monkeypatch):
monkeypatch.setitem(sys.modules, "snorkel", None)
with pytest.raises(ModuleNotFoundError, match="pip install snorkel"):
Snorkel(None)
def test_init(self, weak_labels):
from snorkel.labeling.model import LabelModel as SnorkelLabelModel
label_model = Snorkel(weak_labels)
assert label_model.weak_labels is weak_labels
assert isinstance(label_model._model, SnorkelLabelModel)
assert label_model._model.cardinality == 3
@pytest.mark.parametrize(
"wrong_mapping,expected",
[
(
{None: -10, "negative": 0, "positive": 1, "neutral": 2},
{-10: -1, 0: 0, 1: 1, 2: 2},
),
(
{None: -1, "negative": 1, "positive": 3, "neutral": 4},
{-1: -1, 1: 0, 3: 1, 4: 2},
),
],
)
def test_init_wrong_mapping(self, weak_labels, wrong_mapping, expected):
weak_labels.change_mapping(wrong_mapping)
label_model = Snorkel(weak_labels)
assert label_model._weaklabels2snorkel == expected
assert label_model._snorkel2weaklabels == {k: v for v, k in expected.items()}
@pytest.mark.parametrize(
"include_annotated_records",
[True, False],
)
def test_fit(self, monkeypatch, weak_labels, include_annotated_records):
def mock_fit(self, L_train, *args, **kwargs):
if include_annotated_records:
assert (L_train == weak_labels.matrix()).all()
else:
assert (L_train == weak_labels.matrix(has_annotation=False)).all()
assert kwargs == {"passed_on": None}
monkeypatch.setattr(
"snorkel.labeling.model.LabelModel.fit",
mock_fit,
)
label_model = Snorkel(weak_labels)
label_model.fit(
include_annotated_records=include_annotated_records, passed_on=None
)
def test_fit_automatically_added_kwargs(self, weak_labels):
label_model = Snorkel(weak_labels)
with pytest.raises(ValueError, match="provided automatically"):
label_model.fit(L_train=None)
@pytest.mark.parametrize(
"policy,include_annotated_records,include_abstentions,expected",
[
("abstain", True, False, (2, ["positive", "negative"], [0.8, 0.9])),
(
"abstain",
True,
True,
(4, [None, None, "positive", "negative"], [None, None, 0.8, 0.9]),
),
("random", False, True, (1, ["positive"], [0.8])),
(
"random",
True,
True,
(
4,
["positive", "negative", "positive", "negative"],
[0.4 + 0.0001, 1.0 / 3 + 0.0001, 0.8, 0.9],
),
),
],
)
def test_predict(
self,
weak_labels,
monkeypatch,
policy,
include_annotated_records,
include_abstentions,
expected,
):
def mock_predict(self, L, return_probs, tie_break_policy, *args, **kwargs):
assert tie_break_policy == policy
assert return_probs is True
if include_annotated_records:
assert len(L) == 4
preds = np.array([-1, -1, 1, 0])
if policy == "random":
preds = np.array([1, 0, 1, 0])
return preds, np.array(
[
[0.4, 0.4, 0.2],
[1.0 / 3, 1.0 / 3, 1.0 / 3],
[0.1, 0.8, 0.1],
[0.9, 0.05, 0.05],
]
)
else:
assert len(L) == 1
return np.array([1]), np.array([[0.1, 0.8, 0.1]])
monkeypatch.setattr(
"snorkel.labeling.model.LabelModel.predict",
mock_predict,
)
label_model = Snorkel(weak_labels)
records = label_model.predict(
tie_break_policy=policy,
include_annotated_records=include_annotated_records,
include_abstentions=include_abstentions,
)
assert len(records) == expected[0]
assert [
rec.prediction[0][0] if rec.prediction else None for rec in records
] == expected[1]
assert [
rec.prediction[0][1] if rec.prediction else None for rec in records
] == expected[2]
@pytest.mark.parametrize("policy,expected", [("abstain", 0.5), ("random", 2.0 / 3)])
def test_score(self, monkeypatch, weak_labels, policy, expected):
def mock_predict(self, L, return_probs, tie_break_policy):
assert (L == weak_labels.matrix(has_annotation=True)).all()
assert return_probs is True
assert tie_break_policy == policy
if policy == "abstain":
predictions = np.array([-1, 1, 0])
elif policy == "random":
predictions = np.array([0, 1, 0])
else:
raise ValueError("Untested policy!")
probabilities = None # accuracy does not need probabs ...
return predictions, probabilities
monkeypatch.setattr(
"snorkel.labeling.model.LabelModel.predict",
mock_predict,
)
label_model = Snorkel(weak_labels)
assert label_model.score(tie_break_policy=policy)["accuracy"] == pytest.approx(
expected
)
def test_score_without_annotations(self, weak_labels):
weak_labels._annotation_array = np.array([], dtype=np.short)
label_model = Snorkel(weak_labels)
with pytest.raises(MissingAnnotationError, match="need annotated records"):
label_model.score()
@pytest.mark.parametrize(
"change_mapping",
[False, True],
)
def test_integration(self, weak_labels_from_guide, change_mapping):
if change_mapping:
weak_labels_from_guide.change_mapping({None: -10, "HAM": 2, "SPAM": 5})
label_model = Snorkel(weak_labels_from_guide)
label_model.fit(seed=43)
metrics = label_model.score()
assert metrics["accuracy"] == pytest.approx(0.8947368421052632)
records = label_model.predict()
assert len(records) == 1177
assert records[0].prediction == [
("SPAM", pytest.approx(0.5633776670811805)),
("HAM", pytest.approx(0.4366223329188196)),
]
class TestFlyingSquid:
def test_not_installed(self, monkeypatch):
monkeypatch.setitem(sys.modules, "flyingsquid", None)
with pytest.raises(ModuleNotFoundError, match="pip install pgmpy flyingsquid"):
FlyingSquid(None)
def test_init(self, weak_labels):
label_model = FlyingSquid(weak_labels)
assert label_model._labels == ["negative", "positive", "neutral"]
with pytest.raises(ValueError, match="must not contain 'm'"):
FlyingSquid(weak_labels, m="mock")
weak_labels._rules = weak_labels.rules[:2]
with pytest.raises(TooFewRulesError, match="at least three"):
FlyingSquid(weak_labels)
@pytest.mark.parametrize("include_annotated,expected", [(False, 1), (True, 4)])
def test_fit(self, monkeypatch, weak_labels, include_annotated, expected):
def mock_fit(*args, **kwargs):
if not include_annotated:
assert (kwargs["L_train"] == np.array([0, 0, 0])).all()
assert len(kwargs["L_train"]) == expected
monkeypatch.setattr(
"flyingsquid.label_model.LabelModel.fit",
mock_fit,
)
label_model = FlyingSquid(weak_labels)
label_model.fit(include_annotated_records=include_annotated)
assert len(label_model._models) == 3
def test_fit_init_kwargs(self, monkeypatch, weak_labels):
class MockLabelModel:
def __init__(self, m, mock):
assert m == len(weak_labels.rules)
assert mock == "mock"
def fit(self, L_train, mock):
assert mock == "mock_fit_kwargs"
monkeypatch.setattr(
"flyingsquid.label_model.LabelModel",
MockLabelModel,
)
label_model = FlyingSquid(weak_labels, mock="mock")
label_model.fit(mock="mock_fit_kwargs")
@pytest.mark.parametrize(
"policy,include_annotated_records,include_abstentions,verbose,expected",
[
(
"abstain",
False,
False,
True,
{
"verbose": True,
"L_matrix_length": 1,
"return": np.array([[0.5, 0.5]]),
"nr_of_records": 0,
},
),
(
"abstain",
True,
True,
False,
{
"verbose": False,
"L_matrix_length": 4,
"return": np.array([[0.5, 0.5] * 4]),
"nr_of_records": 4,
"prediction": None,
},
),
(
"random",
False,
False,
False,
{
"verbose": False,
"L_matrix_length": 1,
"return": np.array([[0.5, 0.5]]),
"nr_of_records": 1,
"prediction": [
("negative", 0.3334333333333333),
("neutral", 0.3332833333333333),
("positive", 0.3332833333333333),
],
},
),
],
)
def test_predict(
self,
weak_labels,
monkeypatch,
policy,
include_annotated_records,
include_abstentions,
verbose,
expected,
):
class MockPredict:
calls_count = 0
@classmethod
def __call__(cls, L_matrix, verbose):
assert verbose is expected["verbose"]
assert len(L_matrix) == expected["L_matrix_length"]
cls.calls_count += 1
return expected["return"]
monkeypatch.setattr(
"flyingsquid.label_model.LabelModel.predict_proba",
MockPredict(),
)
label_model = FlyingSquid(weak_labels)
label_model.fit()
records = label_model.predict(
tie_break_policy=policy,
include_annotated_records=include_annotated_records,
include_abstentions=include_abstentions,
verbose=verbose,
)
assert MockPredict.calls_count == 3
assert len(records) == expected["nr_of_records"]
if records:
assert records[0].prediction == expected["prediction"]
def test_predict_binary(self, monkeypatch, weak_labels):
class MockPredict:
calls_count = 0
@classmethod
def __call__(cls, L_matrix, verbose):
cls.calls_count += 1
return np.array([[0.6, 0.4]])
monkeypatch.setattr(
"flyingsquid.label_model.LabelModel.predict_proba",
MockPredict(),
)
weak_labels._label2int = {None: -1, "negative": 0, "positive": 1}
label_model = FlyingSquid(weak_labels=weak_labels)
label_model.fit()
records = label_model.predict()
assert MockPredict.calls_count == 1
assert len(records) == 1
assert records[0].prediction == [("negative", 0.6), ("positive", 0.4)]
def test_predict_not_implented_tbp(self, weak_labels):
label_model = FlyingSquid(weak_labels)
label_model.fit()
with pytest.raises(NotImplementedError, match="true-random"):
label_model.predict(tie_break_policy="true-random")
def test_predict_not_fitted_error(self, weak_labels):
label_model = FlyingSquid(weak_labels)
with pytest.raises(NotFittedError, match="not fitted yet"):
label_model.predict()
def test_score_not_fitted_error(self, weak_labels):
label_model = FlyingSquid(weak_labels)
with pytest.raises(NotFittedError, match="not fitted yet"):
label_model.score()
def test_score(self, monkeypatch, weak_labels):
def mock_predict(self, weak_label_matrix, verbose):
assert verbose is False
assert len(weak_label_matrix) == 3
return np.array([[0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]])
monkeypatch.setattr(FlyingSquid, "_predict", mock_predict)
label_model = FlyingSquid(weak_labels)
metrics = label_model.score()
assert "accuracy" in metrics
assert metrics["accuracy"] == pytest.approx(1.0)
@pytest.mark.parametrize(
"tbp,vrb,expected", [("abstain", False, 1.0), ("random", True, 2 / 3.0)]
)
def test_score_tbp(self, monkeypatch, weak_labels, tbp, vrb, expected):
def mock_predict(self, weak_label_matrix, verbose):
assert verbose is vrb
assert len(weak_label_matrix) == 3
return np.array(
[[0.8, 0.1, 0.1], [0.4, 0.4, 0.2], [1 / 3.0, 1 / 3.0, 1 / 3.0]]
)
monkeypatch.setattr(FlyingSquid, "_predict", mock_predict)
label_model = FlyingSquid(weak_labels)
metrics = label_model.score(tie_break_policy=tbp, verbose=vrb)
assert metrics["accuracy"] == pytest.approx(expected)
def test_score_not_implemented_tbp(self, weak_labels):
label_model = FlyingSquid(weak_labels)
label_model.fit()
with pytest.raises(NotImplementedError, match="true-random"):
label_model.score(tie_break_policy="true-random")
def test_integration(self, weak_labels_from_guide):
label_model = FlyingSquid(weak_labels_from_guide)
label_model.fit()
metrics = label_model.score()
assert metrics["accuracy"] == pytest.approx(0.9282296650717703)
records = label_model.predict()
assert len(records) == 1177
assert records[0].prediction == [
("SPAM", 0.8236983486087645),
("HAM", 0.17630165139123552),
]
| 33.917625
| 88
| 0.577633
|
2068526162b27630307c7b206043a5d49a87b6bd
| 7,966
|
py
|
Python
|
smartmeter.py
|
BasvanderWorp/smartmeter
|
76fd3a0f55ff8081879f1e7884030e8a2e46d2a4
|
[
"MIT"
] | null | null | null |
smartmeter.py
|
BasvanderWorp/smartmeter
|
76fd3a0f55ff8081879f1e7884030e8a2e46d2a4
|
[
"MIT"
] | null | null | null |
smartmeter.py
|
BasvanderWorp/smartmeter
|
76fd3a0f55ff8081879f1e7884030e8a2e46d2a4
|
[
"MIT"
] | null | null | null |
# Dutch Smart Meter Reader (P1)
import sys
import serial
from util import read_config
import argparse
import os
import logging
from datetime import datetime
import socket
##############################################################################
# Part 0.1: Set global variables
##############################################################################
PROGRAM_NAME = 'dsmr'
PROGRAM_VERSION = "0.01"
PROGRAM_VERSION_DATE = "11-07-2021"
PROGRAM_AUTHOR = "Bas van der Worp"
CONFIG_STORE = '/dsmr/dsmr_config.json'
CONFIG = read_config(CONFIG_STORE)
LOG_PATH_BASE = CONFIG['LOG_PATH_BASE']
OUTPUT_PATH_BASE = CONFIG['OUTPUT_PATH_BASE']
DSMR_PORT = CONFIG['DSMR_PORT']
DSMR_BAUDRATE = eval(CONFIG['DSMR_BAUDRATE'])
DSMR_BYTESIZE = eval(CONFIG['DSMR_BYTESIZE'])
DSMR_PARITY = eval(CONFIG['DSMR_PARITY'])
##############################################################################
# Main program
##############################################################################
if __name__ == '__main__':
##########################################################################
# === Part 0.2: Commandline parameter initialisation
##########################################################################
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument("-d", "--dummy", help="Dummy")
args = parser.parse_args()
if args.dummy:
dummy = args.dummy
else:
dummy = ""
##########################################################################
# Part 0.3: Initialise logging
##########################################################################
# Check whether logfolder exists. If not, write to 'log' folder
LOG_PATH = f'{LOG_PATH_BASE}{PROGRAM_NAME}/'
if not os.path.exists(LOG_PATH):
try:
os.makedirs(LOG_PATH)
except OSError as e:
if e.errno != errno.EEXIST:
LOG_PATH = ""
raise
OS_USER = os.getlogin().lower()
LOGLEVEL_DEBUG = eval('logging.DEBUG')
LOGLEVEL_INFO = eval('logging.INFO')
LOGFILE = os.path.normpath(LOG_PATH + "log"
"_" + "{:%Y%m%d}".format(datetime.now()) +
".log")
logging.basicConfig(
filename=LOGFILE,
level='INFO',
format='%(asctime)s %(levelname)s ' +
'%(name)s %(funcName)s: %(message)s',
datefmt='%d-%m-%Y %H:%M:%S')
logger = logging.getLogger(__name__)
msg_1 = '='*80
logger.info(msg_1)
logger.info(f'Start program : {PROGRAM_NAME}')
logger.info(f'Version : {PROGRAM_VERSION}')
logger.info(f'Version date : {PROGRAM_VERSION_DATE}')
logger.info(f'Host : {socket.gethostname()}')
logger.info(f'parameters : {CONFIG}')
##########################################################################
# Part 0.4: Start logging
##########################################################################
print ("Digital Smart Meter Reader (P1), version ", PROGRAM_VERSION)
print ("Press Control-C to stop")
# Set COM port config
ser = serial.Serial()
ser.port = DSMR_PORT
ser.baudrate = DSMR_BAUDRATE
ser.bytesize = DSMR_BYTESIZE
ser.parity = DSMR_PARITY
# Open COM port
try:
ser.open()
logger.info('Serial port opened!')
except serial.serialutil.SerialException as err1:
if err1.args[0] == 'Port is already open.':
logger.warning(err1)
pass
else:
msg = f'Error opening port {ser.name}, error: {err1}'
logger.warning(msg)
print (msg, ' program terminated')
sys.exit(msg)
# Initialize
telegram_counter = 1
line_counter = 0
while telegram_counter < 25:
p1_line = ''
telegram_start_line_found = False
# Read lines from serial port, search start of telegram
while not telegram_start_line_found:
try:
p1_raw = ser.readline()
line_counter += 1
except:
sys.exit (f"Seriele port {ser.name} cannot be read.")
if p1_raw == b'/ISK5\\2M550T-1011\r\n':
telegram_start_line_found = True
print(f'{str(line_counter).zfill(4)} telegram {telegram_counter}', end="")
else:
# skip line, not a start line
print(f'{str(line_counter).zfill(4)} SKIPPED: {p1_raw}')
if telegram_start_line_found:
# read first line
p1_raw = ser.readline()
p1_str = p1_raw.decode('utf-8')
line_counter += 1
if p1_str == '!':
telegram_last_line_found = True
else:
telegram_last_line_found = False
else:
msg = 'start line not found'
logger.error(msg)
sys.exit(msg)
# Read all telegram lines
while not telegram_last_line_found:
p1_str = p1_raw.decode('utf-8')
msg = f'{str(line_counter).zfill(4)}: RAW: {p1_raw}, DECODE:{p1_str}'
# print(msg, end="")
if p1_str[0] == '!':
# print(f'{str(line_counter).zfill(4)} LAST LINE FOUND: {p1_raw}')
telegram_last_line_found = True
telegram_counter += 1
else:
telegram_last_line_found = False
if p1_str[-2:] == '\r\n':
p1_str = p1_str[:-2]
else:
msg = f'Unexpected end of line in telegram line {p1_raw}'
logger.error(msg)
sys.exit(msg)
if len(p1_str) > 3:
if p1_str[3] == ':':
p1_str = p1_str[4:]
else:
# skip line
try:
# print(f'{str(line_counter).zfill(4)} SKIPPED2: {p1_raw}')
p1_raw = ser.readline()
line_counter += 1
except:
sys.exit (f"Seriele port {ser.name} cannot be read.")
continue
else:
# skip line (maybe error)
try:
# print(f'{str(line_counter).zfill(4)} SKIPPED3: {p1_raw}')
p1_raw = ser.readline()
line_counter += 1
except:
sys.exit (f"Seriele port {ser.name} cannot be read.")
if p1_raw == b'!5AC3\r\n':
telegram_last_line_found = True
telegram_counter += 1
print(f'{str(line_counter).zfill(4)} LAST LINE FOUND: {p1_raw}')
continue
# print(p1_str)
measure = ""
value = ""
obis_code = p1_str.split('(')[0]
p1_value = p1_str.split('(')[1][:-1]
# remove closing bracket
if obis_code == '1.7.0':
measure = "actual_delivery (kW)"
value = p1_value[:-1]
print(f'---- {measure}:{value}')
# print(f'{str(line_counter).zfill(4)}: {p1_raw}, {measure}:{value}')
try:
p1_raw = ser.readline()
line_counter += 1
except:
sys.exit (f"Seriele port {ser.name} cannot be read.")
# Close port and show status
try:
ser.close()
msg = f"Serial port {ser.name} succesfully closed."
logger.info(msg)
except:
msg = f"Oops {ser.name}. Programma terminated. Could not close serial port"
logger.error(msg)
sys.exit (msg)
| 37.051163
| 90
| 0.467989
|
8c66b2ca4c7662321a5ec627105265e1275aa02b
| 218
|
py
|
Python
|
app/logic/bluesteel/controllers/__init__.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | 10
|
2017-01-13T06:28:04.000Z
|
2020-11-18T13:00:26.000Z
|
app/logic/bluesteel/controllers/__init__.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | null | null | null |
app/logic/bluesteel/controllers/__init__.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | 2
|
2018-03-29T14:10:53.000Z
|
2019-11-20T08:21:57.000Z
|
""" Automatic file """
from app.logic.bluesteel.controllers.BluesteelLayoutController import BluesteelLayoutController
from app.logic.bluesteel.controllers.BluesteelProjectController import BluesteelProjectController
| 43.6
| 97
| 0.876147
|
86a0861b7a37452603165bac313e8b230c774011
| 1,474
|
py
|
Python
|
vframe/vframe/commands/save_images.py
|
kant/vframe
|
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
|
[
"MIT"
] | 1
|
2021-04-18T10:42:10.000Z
|
2021-04-18T10:42:10.000Z
|
vframe/vframe/commands/save_images.py
|
vframeio/_vframe_v0_archived
|
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
|
[
"MIT"
] | null | null | null |
vframe/vframe/commands/save_images.py
|
vframeio/_vframe_v0_archived
|
28e49ca62d9036a78a25b26eb0fb7e3cf8c79031
|
[
"MIT"
] | null | null | null |
"""
Generates metadata using Yolo/Darknet Python interface
- about 20-30 FPS on NVIDIA 1080 Ti GPU
- SPP currently not working
- enusre image size matches network image size
"""
import click
from vframe.settings import types
from vframe.utils import click_utils
from vframe.settings import vframe_cfg as cfg
from cli_vframe import processor
@click.command()
@click.option('-o', '--output', 'opt_dir_media', required=True,
help='Path to media folder')
@processor
@click.pass_context
def cli(ctx, sink, opt_dir_media):
"""Saves keyframes for still-frame-video"""
# -------------------------------------------------
# imports
from os.path import join
import cv2 as cv
from vframe.utils import file_utils, logger_utils
from vframe.settings.paths import Paths
# -------------------------------------------------
# initialize
log = logger_utils.Logger.getLogger()
log.debug('init saves images')
file_utils.mkdirs(opt_dir_media)
# -------------------------------------------------
# process
frame_count = 0
while True:
chair_item = yield
for frame_idx, frame in chair_item.drawframes.items():
# save frame to the output folder
fp_im = join(opt_dir_media, 'frame_{}.png'.format(file_utils.zpad(frame_count)))
cv.imwrite(fp_im, frame)
frame_count += 1
# ------------------------------------------------------------
# send back to generator
sink.send(chair_item)
| 23.396825
| 86
| 0.603121
|
175d6896e49edee309c13404160dfc62f4db8196
| 1,471
|
py
|
Python
|
ngram-protein-classifier/generate.py
|
yasithmilinda/uom-projects
|
d1810a78ff8de1c57b760836cf102f63c5945962
|
[
"MIT"
] | null | null | null |
ngram-protein-classifier/generate.py
|
yasithmilinda/uom-projects
|
d1810a78ff8de1c57b760836cf102f63c5945962
|
[
"MIT"
] | null | null | null |
ngram-protein-classifier/generate.py
|
yasithmilinda/uom-projects
|
d1810a78ff8de1c57b760836cf102f63c5945962
|
[
"MIT"
] | null | null | null |
import re
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
ds = pd.read_csv("data/dataset_clean_p1.csv")
# Remove class labels with low frequencies
ds_vc = ds['Classification'].value_counts()
ds_vc = ds_vc[(ds_vc >= ds_vc.mean() + ds_vc.std())].head()
print(ds_vc)
ds_clean = ds[ds['Classification'].isin(ds_vc.index)]
# Data Cleaning
ds_clean['Sequence'] = ds_clean['Sequence'].map(lambda x: re.sub(r'([^A-Z]|\s)+', '-', str(x).upper()))
# Sample Dataset
ds_sampled = ds_clean.groupby('Classification')
ds_sampled = pd.DataFrame(ds_sampled.apply(lambda x: x.sample(ds_sampled.size().min())).reset_index(drop=True))
ds_sampled.insert(len(ds_sampled.columns) - 1, 'Sequence Length', ds_sampled['Sequence'].apply(lambda x: len(x)))
print(ds_sampled)
# Feature expansion
V = CountVectorizer(lowercase=False, ngram_range=(2, 2), tokenizer=lambda x: list(x), dtype=np.int)
X = V.fit_transform(ds_sampled['Sequence'].tolist())
print(len(V.get_feature_names()))
vec_df = pd.DataFrame(X.toarray(), columns=V.get_feature_names())
ds_sampled = ds_sampled.drop(['Sequence'], axis=1)
df = pd.merge(left=vec_df, right=ds_sampled, left_index=True, right_index=True).drop_duplicates()
# Postprocessing
df['Classification'] = df['Classification'].apply(lambda x: str(x).replace(',', '&'))
# Write out
df.to_csv("data/dataset_out.csv", index=False)
| 35.878049
| 113
| 0.738273
|
b3ad70037afe5dfce34b8a0a06b590f171553ffb
| 7,067
|
py
|
Python
|
accelbyte_py_sdk/api/iam/operations/users_v4/public_generate_my_back_da569a.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/iam/operations/users_v4/public_generate_my_back_da569a.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/iam/operations/users_v4/public_generate_my_back_da569a.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-iam-service (5.10.1)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelBackupCodesResponseV4
from ...models import RestErrorResponse
class PublicGenerateMyBackupCodesV4(Operation):
"""Generate backup codes (PublicGenerateMyBackupCodesV4)
This endpoint is used to generate 8-digits backup codes.
Each code is a one-time code and will be deleted once used.
This endpoint Requires valid user access token
Properties:
url: /iam/v4/public/namespaces/{namespace}/users/me/mfa/backupCode
method: POST
tags: ["Users V4"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ModelBackupCodesResponseV4 (Backup codes generated)
400: Bad Request - RestErrorResponse (10191: email address not verified | 10192: factor not enabled | 10171: email address not found)
401: Unauthorized - RestErrorResponse (20001: unauthorized access)
403: Forbidden - RestErrorResponse (20003: forbidden access)
404: Not Found - RestErrorResponse (10139: platform account not found | 20008: user not found)
500: Internal Server Error - RestErrorResponse (20000: internal server error)
"""
# region fields
_url: str = "/iam/v4/public/namespaces/{namespace}/users/me/mfa/backupCode"
_method: str = "POST"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> PublicGenerateMyBackupCodesV4:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelBackupCodesResponseV4], Union[None, HttpResponse, RestErrorResponse]]:
"""Parse the given response.
200: OK - ModelBackupCodesResponseV4 (Backup codes generated)
400: Bad Request - RestErrorResponse (10191: email address not verified | 10192: factor not enabled | 10171: email address not found)
401: Unauthorized - RestErrorResponse (20001: unauthorized access)
403: Forbidden - RestErrorResponse (20003: forbidden access)
404: Not Found - RestErrorResponse (10139: platform account not found | 20008: user not found)
500: Internal Server Error - RestErrorResponse (20000: internal server error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelBackupCodesResponseV4.create_from_dict(content), None
if code == 400:
return None, RestErrorResponse.create_from_dict(content)
if code == 401:
return None, RestErrorResponse.create_from_dict(content)
if code == 403:
return None, RestErrorResponse.create_from_dict(content)
if code == 404:
return None, RestErrorResponse.create_from_dict(content)
if code == 500:
return None, RestErrorResponse.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
) -> PublicGenerateMyBackupCodesV4:
instance = cls()
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PublicGenerateMyBackupCodesV4:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
}
# endregion static methods
| 29.569038
| 169
| 0.660959
|
c289bf3ba0b8566e47489cad6a057ac4349350fb
| 58,232
|
py
|
Python
|
ugali/utils/plotting.py
|
DarkEnergySurvey/ugali
|
82abffcc92bddf830d89f85cb3966870f7d9f720
|
[
"MIT"
] | 12
|
2016-10-26T20:45:33.000Z
|
2021-11-24T04:07:43.000Z
|
ugali/utils/plotting.py
|
DarkEnergySurvey/ugali
|
82abffcc92bddf830d89f85cb3966870f7d9f720
|
[
"MIT"
] | 64
|
2017-04-14T15:04:24.000Z
|
2022-02-03T19:42:57.000Z
|
ugali/utils/plotting.py
|
DarkEnergySurvey/ugali
|
82abffcc92bddf830d89f85cb3966870f7d9f720
|
[
"MIT"
] | 12
|
2016-06-23T21:42:46.000Z
|
2021-06-19T05:29:49.000Z
|
"""
Basic plotting tools.
"""
import os
import collections
import copy
import matplotlib
try: os.environ['DISPLAY']
except KeyError: matplotlib.use('Agg')
import yaml
import numpy as np
import pylab as plt
import healpy as hp
import fitsio
import scipy.ndimage as nd
import scipy.misc
from mpl_toolkits.axes_grid1 import AxesGrid,Grid,ImageGrid, make_axes_locatable
from matplotlib.ticker import MaxNLocator
import mpl_toolkits.axes_grid1.axes_divider as axes_divider
import ugali.utils.config
import ugali.observation.roi
import ugali.observation.catalog
import ugali.utils.skymap
import ugali.utils.projector
import ugali.utils.healpix
import ugali.isochrone
import ugali.analysis.loglike
from ugali.utils import fileio
from ugali.utils.healpix import ang2pix, get_nside
from ugali.utils.projector import mod2dist,gal2cel,cel2gal
from ugali.utils.projector import sphere2image,image2sphere
from ugali.utils.config import Config
from ugali.utils.logger import logger
from ugali.utils.mlab import isstring
params = {
#'backend': 'eps',
'axes.labelsize': 12,
#'text.fontsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'xtick.major.size': 3, # major tick size in points
'xtick.minor.size': 1.5, # minor tick size in points
'xtick.major.size': 3, # major tick size in points
'xtick.minor.size': 1.5, # minor tick size in points
#'text.usetex': True,
##'figure.figsize': fig_size,
#'font.family':'serif',
#'font.serif':'Computer Modern Roman',
#'font.size': 10
}
matplotlib.rcParams.update(params)
############################################################
def histogram(title, title_x, title_y,
x, bins_x):
"""
Plot a basic histogram.
"""
plt.figure()
plt.hist(x, bins_x)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
############################################################
def twoDimensionalHistogram(title, title_x, title_y,
z, bins_x, bins_y,
lim_x=None, lim_y=None,
vmin=None, vmax=None):
"""
Create a two-dimension histogram plot or binned map.
If using the outputs of np.histogram2d, remember to transpose the histogram.
INPUTS
"""
plt.figure()
mesh_x, mesh_y = np.meshgrid(bins_x, bins_y)
if vmin != None and vmin == vmax:
plt.pcolor(mesh_x, mesh_y, z)
else:
plt.pcolor(mesh_x, mesh_y, z, vmin=vmin, vmax=vmax)
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1])
############################################################
def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1])
############################################################
def zoomedHealpixMap(title, map, lon, lat, radius,
xsize=1000, **kwargs):
"""
Inputs: lon (deg), lat (deg), radius (deg)
"""
reso = 60. * 2. * radius / xsize # Deg to arcmin
hp.gnomview(map=map, rot=[lon, lat, 0], title=title, xsize=xsize, reso=reso, degree=False, **kwargs)
############################################################
def projScatter(lon, lat, **kwargs):
"""
Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg)
"""
hp.projscatter(lon, lat, lonlat=True, **kwargs)
############################################################
def sparseHealpixFiles(title, infiles, field='MAGLIM',**kwargs):
"""
Inputs: field
"""
#map = ugali.utils.skymap.readSparseHealpixMaps(infiles,field)
map = ugali.utils.skymap.read_partial_map(infiles,field)
ax = hp.mollview(map=map, title=title, **kwargs)
return ax, map
############################################################
def drawHealpixMap(hpxmap, lon, lat, size=1.0, xsize=501, coord='GC', **kwargs):
"""
Draw local projection of healpix map.
"""
ax = plt.gca()
x = np.linspace(-size,size,xsize)
y = np.linspace(-size,size,xsize)
xx, yy = np.meshgrid(x,y)
coord = coord.upper()
if coord == 'GC':
#Assumes map and (lon,lat) are Galactic, but plotting celestial
llon, llat = image2sphere(*gal2cel(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*cel2gal(llon,llat))
elif coord == 'CG':
#Assumes map and (lon,lat) are celestial, but plotting Galactic
llon, llat = image2sphere(*cel2gal(lon,lat),x=xx.flat,y=yy.flat)
pix = ang2pix(get_nside(hpxmap),*gal2cel(llon,llat))
else:
#Assumes plotting the native coordinates
llon, llat = image2sphere(lon,lat,xx.flat,yy.flat)
pix = ang2pix(get_nside(hpxmap),llon,llat)
values = hpxmap[pix].reshape(xx.shape)
zz = np.ma.array(values,mask=(values==hp.UNSEEN),fill_value=np.nan)
return drawProjImage(xx,yy,zz,coord=coord,**kwargs)
def drawProjImage(xx, yy, zz=None, coord='C',**kwargs):
ax = plt.gca()
coord = coord.upper()
if coord[-1] == 'G':
ax.set_xlabel(r'$\Delta \ell\,(\deg)}$')
ax.set_ylabel(r'$\Delta b\,(\deg)$')
elif coord[-1] == 'C':
ax.set_xlabel(r'$\Delta \alpha_{2000}\,(\deg)$')
ax.set_ylabel(r'$\Delta \delta_{2000}\,(\deg)$')
else:
msg = "Unrecognized coordinate: %"%coord
logger.warning(msg)
# Celestial orientation (increasing to the east)
#ax.set_xlim(xx.max(),xx.min())
ax.set_xlim(xx.min(),xx.max())
ax.set_ylim(yy.min(),yy.max())
if zz is None: return None
return ax.pcolormesh(xx,yy,zz,**kwargs)
############################################################
def getSDSSImage(ra,dec,radius=1.0,xsize=800,opt='GML',**kwargs):
"""
Download Sloan Digital Sky Survey images
http://skyserver.sdss3.org/dr9/en/tools/chart/chart.asp
radius (degrees)
opts: (G) Grid, (L) Label, P (PhotoObj), S (SpecObj), O (Outline), (B) Bounding Box,
(F) Fields, (M) Mask, (Q) Plates, (I) Invert
"""
import subprocess
import tempfile
url="http://skyservice.pha.jhu.edu/DR10/ImgCutout/getjpeg.aspx?"
scale = 2. * radius * 3600. / xsize
params=dict(ra=ra,dec=dec,
width=xsize,height=xsize,
scale=scale,opt=opt)
query='&'.join("%s=%s"%(k,v) for k,v in params.items())
tmp = tempfile.NamedTemporaryFile(suffix='.jpeg')
cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query)
subprocess.call(cmd,shell=True)
im = plt.imread(tmp.name)
tmp.close()
return im
def getDSSImage(ra,dec,radius=1.0,xsize=800,**kwargs):
"""
Download Digitized Sky Survey images
https://archive.stsci.edu/cgi-bin/dss_form
https://archive.stsci.edu/cgi-bin/dss_search
Image is in celestial orientation (RA increases to the right)
https://archive.stsci.edu/dss/script_usage.html
ra (r) - right ascension
dec (d) - declination
equinox (e) - equinox (B1950 or J2000; default: J2000)
height (h) - height of image (arcminutes; default: 15.0)
width (w) - width of image (arcminutes; default: 15.0)
format (f) - image format (FITS or GIF; default: FITS)
compression (c) - compression (UNIX, GZIP, or NONE; default: NONE; compression
applies to FITS only)
version (v) - Which version of the survey to use:
1 - First Generation survey (garden variety)
2 - Second generation survey (incomplete)
3 - Check the 2nd generation; if no image is available,
then go to the 1st generation.
4 - The Quick V survey (whence came the Guide Stars Catalog;
used mostly for Phase II proposal submission)
save (s) - Save the file to disk instead of trying to display.
(ON (or anything) or not defined; default: not defined.)
For the skyview service, see:
https://skyview.gsfc.nasa.gov/current/docs/batchpage.html
"""
import subprocess
import tempfile
service = 'skyview'
if service == 'stsci':
url="https://archive.stsci.edu/cgi-bin/dss_search?"
scale = 2.0 * radius * 60.
params=dict(ra='%.3f'%ra,dec='%.3f'%dec,width=scale,height=scale,
format='gif',version=1)
#v='poss2ukstu_red'
elif service == 'skyview':
url="https://skyview.gsfc.nasa.gov/cgi-bin/images?"
params=dict(survey='DSS',position='%.3f,%.3f'%(ra,dec),scaling='Linear',
Return='GIF',size=2*radius,projection='Car',pixels=xsize)
else:
raise Exception("Unrecognized service.")
query='&'.join("%s=%s"%(k,v) for k,v in params.items())
tmp = tempfile.NamedTemporaryFile(suffix='.gif')
cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query)
subprocess.call(cmd,shell=True)
im = plt.imread(tmp.name)
tmp.close()
if service == 'stsci' and xsize:
im = scipy.misc.imresize(im,size=(xsize,xsize))
return im
############################################################
class BasePlotter(object):
def __init__(self,glon,glat,config,radius=1.0):
self.config = ugali.utils.config.Config(config)
self.coordsys = self.config['coords']['coordsys'].lower()
self.coord = 'G' if (self.coordsys=='gal') else 'C'
self.glon,self.glat = glon,glat
self.ra,self.dec = ugali.utils.projector.galToCel(self.glon,self.glat)
if self.coordsys=='gal': self.lon,self.lat = self.glon,self.glat
else: self.lon,self.lat = self.ra,self.dec
self.roi = ugali.observation.roi.ROI(self.config,self.lon,self.lat)
self.nside = self.config.params['coords']['nside_pixel']
self.radius = radius
xsize=800
reso = 60. * 2. * radius / xsize
self.image_kwargs = dict(ra=self.ra,dec=self.dec,radius=self.radius)
self.gnom_kwargs = dict(rot=[self.ra,self.dec],reso=reso,xsize=xsize,coord=self.coord,
return_projected_map=True,hold=True)
self.label_kwargs = dict(xy=(0.05,0.05),xycoords='axes fraction', xytext=(0, 0),
textcoords='offset points',ha='left', va='bottom',size=10,
bbox={'boxstyle':"round",'fc':'1'}, zorder=10)
def _create_catalog(self):
if hasattr(self,'catalog'): return
self.catalog = self.get_stars()
def get_objects(self,select=None):
config = copy.deepcopy(self.config)
config['catalog']['selection'] = select
#catalog = ugali.observation.catalog.Catalog(config,roi=self.roi)
catalog = ugali.analysis.loglike.createCatalog(config,roi=self.roi)
mask = ugali.analysis.loglike.createMask(config,roi=self.roi)
cut = mask.restrictCatalogToObservableSpace(catalog)
catalog = catalog.applyCut(cut)
sep = ugali.utils.projector.angsep(self.lon, self.lat, catalog.lon, catalog.lat)
radius = self.radius*np.sqrt(2)
cut = (sep < radius)
return catalog.applyCut(cut)
def get_stars(self,select=None):
if hasattr(self,'stars'): return self.stars
if select is None: select = self.config['catalog']['selection']
self.stars = self.get_objects(select)
return self.stars
def get_galaxies(self,select=None):
if hasattr(self,'galaxies'): return self.galaxies
if select is not None:
self.galaxies = self.get_objects(select)
else:
catalog = self.get_objects()
stars = self.get_stars()
cut = ~np.in1d(catalog.objid,stars.objid)
self.galaxies = catalog.applyCut(cut)
return self.galaxies
def get_likelihood(self,select=None):
nside = self.config.params['coords']['nside_merge']
pixel = ang2pix(nside, self.lon, self.lat)
pixels = np.append([pixel],hp.get_all_neighbours(nside,pixel))
filenames = []
for p in pixels:
f = self.config.mergefile%p
if os.path.exists(f): filenames.append(f)
return ugali.utils.healpix.merge_partial_maps(filenames,None)
def drawSmoothCatalog(self, catalog, label=None, **kwargs):
ax = plt.gca()
ra,dec = catalog.ra_dec
x, y = sphere2image(self.ra,self.dec,ra,dec)
delta_x = self.radius/100.
smoothing = 2*delta_x
bins = np.arange(-self.radius, self.radius + 1.e-10, delta_x)
h, xbins, ybins = np.histogram2d(x, y, bins=[bins, bins])
blur = nd.filters.gaussian_filter(h.T, smoothing / delta_x)
defaults = dict(cmap='gray_r',rasterized=True)
kwargs = dict(list(defaults.items())+list(kwargs.items()))
xx,yy = np.meshgrid(xbins,ybins)
im = drawProjImage(xx,yy,blur,coord='C',**kwargs)
if label:
plt.text(0.05, 0.95, label, fontsize=10, ha='left', va='top',
color='k', transform=plt.gca().transAxes,
bbox=dict(facecolor='white', alpha=1., edgecolor='none'))
def drawROI(self, ax=None, value=None, pixel=None):
if not ax: ax = plt.gca()
roi_map = np.array(hp.UNSEEN*np.ones(hp.nside2npix(self.nside)))
if value is None:
roi_map[self.roi.pixels] = 1
roi_map[self.roi.pixels_annulus] = 0
roi_map[self.roi.pixels_target] = 2
elif value is not None and pixel is None:
roi_map[self.pixels] = value
elif value is not None and pixel is not None:
roi_map[pixel] = value
else:
logger.warning('Unable to parse input')
#im = hp.gnomview(roi_map,**self.gnom_kwargs)
im = drawHealpixMap(roi_map,self.lon,self.lat,self.radius,coord=self.coord)
return im
def drawImage(self,ax=None,invert=True):
if not ax: ax = plt.gca()
if self.config['data']['survey']=='sdss':
# Optical Image
im = ugali.utils.plotting.getSDSSImage(**self.image_kwargs)
# Flipping JPEG:
# https://github.com/matplotlib/matplotlib/issues/101
im = im[::-1]
ax.annotate("SDSS Image",**self.label_kwargs)
else:
im = ugali.utils.plotting.getDSSImage(**self.image_kwargs)
im = im[::-1,::-1]
ax.annotate("DSS Image",**self.label_kwargs)
size=self.image_kwargs.get('radius',1.0)
# Celestial coordinates
x = np.linspace(-size,size,im.shape[0])
y = np.linspace(-size,size,im.shape[1])
xx, yy = np.meshgrid(x,y)
#kwargs = dict(cmap='gray',interpolation='none')
kwargs = dict(cmap='gray',coord='C')
im = drawProjImage(xx,yy,im,**kwargs)
try: plt.gcf().delaxes(ax.cax)
except AttributeError: pass
return im
def drawStellarDensity(self,ax=None,nside=None):
if not ax: ax = plt.gca()
if nside is None: nside = self.nside
# Stellar Catalog
self._create_catalog()
catalog = self.catalog
#catalog=ugali.observation.catalog.Catalog(self.config,roi=self.roi)
pix = ang2pix(nside, catalog.lon, catalog.lat)
counts = collections.Counter(pix)
pixels, number = np.array(sorted(counts.items())).T
star_map = hp.UNSEEN * np.ones(hp.nside2npix(nside))
star_map[pixels] = number
star_map[star_map == 0] = hp.UNSEEN
#im = hp.gnomview(star_map,**self.gnom_kwargs)
#hp.graticule(dpar=1,dmer=1,color='0.5',verbose=False)
#plt.close()
im = drawHealpixMap(star_map,self.lon,self.lat,self.radius,coord=self.coord)
#im = ax.imshow(im,origin='bottom')
try: ax.cax.colorbar(im)
except: plt.colorbar(im,ax=ax)
ax.annotate("Stars",**self.label_kwargs)
return im
def drawMask(self,ax=None, mask=None, mtype='maglim'):
""" Draw the maglim from the mask. """
if not ax: ax = plt.gca()
if mask is None:
mask = ugali.analysis.loglike.createMask(self.config,roi=self.roi)
mask_map = hp.UNSEEN*np.ones(hp.nside2npix(self.nside))
if mtype.lower() == 'maglim':
mask_map[mask.roi.pixels] = mask.mask_1.mask_roi_sparse
elif mtype.lower() == 'fracdet':
mask_map[mask.roi.pixels] = mask.mask_1.frac_roi_sparse
else:
raise Exception("Unrecognized type: %s"%mtype)
masked = (mask_map==hp.UNSEEN) | (mask_map==0)
mask_map = np.ma.array(mask_map,mask=masked,fill_value=np.nan)
im = drawHealpixMap(mask_map,self.lon,self.lat,self.radius,coord=self.coord)
try: cbar = ax.cax.colorbar(im)
except: cbar = plt.colorbar(im)
cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(),rotation=90)
ax.annotate(mtype,**self.label_kwargs)
return im
def drawMaglim(self,ax=None, mask=None):
""" Draw the maglim from the mask. """
return self.drawMask(ax,mask,mtype='maglim')
def drawFracdet(self,ax=None, mask=None):
""" Draw the fracdet map from the mask. """
return self.drawMask(ax,mask,mtype='fracdet')
def drawTS(self,ax=None, filename=None, zidx=0):
if not ax: ax = plt.gca()
if filename:
data = fitsio.read(filename)
else:
data = self.get_likelihood()[1]
pixels = data['PIXEL']
values = 2*data['LOG_LIKELIHOOD']
if values.ndim == 1: values = values.reshape(-1,1)
ts_map = hp.UNSEEN * np.ones(hp.nside2npix(self.nside))
# Sum through all distance_moduli
#ts_map[pixels] = values.sum(axis=1)
# Just at maximum slice from object
ts_map[pixels] = values[:,zidx]
im = drawHealpixMap(ts_map,self.lon,self.lat,self.radius,coord=self.coord)
try: ax.cax.colorbar(im)
except: plt.colorbar(im)
ax.annotate("TS",**self.label_kwargs)
return im
def drawCatalog(self, ax=None):
if not ax: ax = plt.gca()
# Stellar Catalog
self._create_catalog()
hp.projscatter(self.catalog.lon,self.catalog.lat,c='k',marker='.',lonlat=True,coord=self.gnom_kwargs['coord'])
ax.annotate("Stars",**self.label_kwargs)
def drawSpatial(self, ax=None):
if not ax: ax = plt.gca()
# Stellar Catalog
self._create_catalog()
cut = (self.catalog.color > 0) & (self.catalog.color < 1)
catalog = self.catalog.applyCut(cut)
ax.scatter(catalog.lon,catalog.lat,c='k',marker='.',s=1)
ax.set_xlim(self.lon-0.5,self.lon+0.5)
ax.set_ylim(self.lat-0.5,self.lat+0.5)
if self.coordsys == 'gal':
ax.set_xlabel('GLON (deg)'); ax.set_ylabel('GLAT (deg)')
else:
ax.set_xlabel('RA (deg)'); ax.set_ylabel('DEC (deg)')
ax.invert_xaxis()
def drawCMD(self, ax=None, radius=None, zidx=None):
""" Draw color magnitude diagram with isochrone
Parameters:
ax : matplotlib axis
radius : selection radius
zidx : distance modulus index
Returns:
None
"""
if not ax: ax = plt.gca()
import ugali.isochrone
if zidx is not None:
distance_modulus = self.get_likelihood()[2][zidx]
iso = ugali.isochrone.Padova(age=12,z=0.0002,mod=distance_modulus)
#drawIsochrone(iso,ls='',marker='.',ms=1,c='k')
drawIsochrone(iso)
# Stellar Catalog
self._create_catalog()
if radius is not None:
sep = ugali.utils.projector.angsep(self.lon,self.lat,
self.catalog.lon,self.catalog.lat)
cut = (sep < radius)
catalog_cmd = self.catalog.applyCut(cut)
else:
catalog_cmd = self.catalog
ax.scatter(catalog_cmd.color, catalog_cmd.mag,color='b',marker='.',s=1)
ax.set_xlim(self.roi.bins_color[0],self.roi.bins_color[-1])
ax.set_ylim(self.roi.bins_mag[-1],self.roi.bins_mag[0])
ax.set_xlabel('Color (mag)')
ax.set_ylabel('Magnitude (mag)')
ax.annotate("Stars",**self.label_kwargs)
def drawMembership(self, ax=None, radius=None, zidx=0, mc_source_id=1):
if not ax: ax = plt.gca()
import ugali.analysis.scan
distance_modulus = self.get_likelihood()[2]
for ii, name in enumerate(self.config.params['isochrone']['infiles']):
logger.info('%s %s'%(ii, name))
isochrone = ugali.isochrone.Isochrone(self.config, name)
mag = isochrone.mag + distance_modulus
ax.scatter(isochrone.color,mag, color='0.5', s=800, zorder=0)
pix = ang2pix(self.nside, self.lon, self.lat)
likelihood_pix = ugali.utils.skymap.superpixel(pix,self.nside,self.config.params['coords']['nside_likelihood'])
config = self.config
scan = ugali.analysis.scan.Scan(self.config,likelihood_pix)
likelihood = scan.likelihood
distance_modulus_array = [self.config.params['scan']['distance_modulus_array'][zidx]]
likelihood.precomputeGridSearch(distance_modulus_array)
likelihood.gridSearch()
p = likelihood.membershipGridSearch()
sep = ugali.utils.projector.angsep(self.lon, self.lat, likelihood.catalog.lon, likelihood.catalog.lat)
radius = self.radius if radius is None else radius
cut = (sep < radius)
catalog = likelihood.catalog.applyCut(cut)
p = p[cut]
cut_mc_source_id = (catalog.mc_source_id == mc_source_id)
ax.scatter(catalog.color[cut_mc_source_id], catalog.mag[cut_mc_source_id], c='gray', s=100, edgecolors='none')
sc = ax.scatter(catalog.color, catalog.mag, c=p, edgecolors='none')
ax.set_xlim(likelihood.roi.bins_color[0], likelihood.roi.bins_color[-1])
ax.set_ylim(likelihood.roi.bins_mag[-1], likelihood.roi.bins_mag[0])
ax.set_xlabel('Color (mag)')
ax.set_ylabel('Magnitude (mag)')
try: ax.cax.colorbar(sc)
except: plt.colorbar(sc)
def plotDistance(self):
_,d,distances = self.get_likelihood()
pixels,values = d['PIXEL'],2*d['LOG_LIKELIHOOD']
if values.ndim == 1: values = values.reshape(-1,1)
if distances.ndim == 1: distances = distances.reshape(-1,1)
ts_map = hp.UNSEEN * np.ones(hp.nside2npix(self.nside))
ndim = len(distances)
nrows = int(np.sqrt(ndim))
ncols = ndim // nrows + (ndim%nrows > 0)
# Create the healpy images, but close the figures
images = []
for i,val in enumerate(values.T):
ts_map[pixels] = val
im = hp.gnomview(ts_map,**self.gnom_kwargs)
plt.close()
images.append(im)
data = np.array(images); mask = (data == hp.UNSEEN)
images = np.ma.array(data=data,mask=mask)
vmin = np.ma.min(images)
vmax = np.ma.max(images)
# Create the image grid
fig = plt.figure()
axes = AxesGrid(fig, 111, nrows_ncols = (nrows, ncols),
axes_pad=0,label_mode='1',
cbar_mode='single',cbar_pad=0,cbar_size='5%',
share_all=True,add_all=False)
for i,val in enumerate(values.T):
ax = axes[i]
#https://github.com/matplotlib/matplotlib/issues/9720/
im = ax.imshow(images[i].data,origin='bottom',vmin=vmin,vmax=vmax)
try:
ax.cax.colorbar(im)
except TypeError as e:
print(e)
#ax.annotate(r"$\mu = %g$"%distances[i],**self.label_kwargs)
ax.annotate(r"$d = %.0f$ kpc"%mod2dist(distances[i]),**self.label_kwargs)
ax.axis["left"].major_ticklabels.set_visible(False)
ax.axis["bottom"].major_ticklabels.set_visible(False)
fig.add_axes(ax)
fig.add_axes(ax.cax)
plt.draw()
return fig,axes
def plot3(self):
fig = plt.figure(figsize=(8,4))
axes = AxesGrid(fig, 111,nrows_ncols = (1, 3),axes_pad=0.1,
cbar_mode='each',cbar_pad=0,cbar_size='5%',
cbar_location='top',share_all=True)
for ax in axes:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
self.drawImage(axes[0])
self.drawTS(axes[1])
#self.drawStellarDensity(axes[1])
self.drawMaglim(axes[2])
return fig,axes
def plot4(self):
fig = plt.figure(figsize=(10,8))
axes = AxesGrid(fig, 111,nrows_ncols=(2, 2), axes_pad=0.35,
cbar_mode='each',cbar_pad=0,cbar_size='5%',
share_all=True,aspect=True,
label_mode='L')
#fig,axes = plt.subplots(2,2)
#axes = axes.flatten()
#for ax in axes:
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
#plt.sca(axes[0]); self.drawImage(axes[0])
#plt.sca(axes[1]); self.drawStellarDensity(axes[1])
#plt.sca(axes[2]); self.drawMask(axes[2])
#plt.sca(axes[3]); self.drawTS(axes[3])
# To draw DSS image
#try: plt.sca(axes[0]); self.drawImage()
#except IOError as e: logger.warn(str(e))
plt.sca(axes[0]); self.drawFracdet()
plt.sca(axes[1]); self.drawStellarDensity()
plt.sca(axes[2]); self.drawMaglim()
try: plt.sca(axes[3]); self.drawTS()
except IOError as e: logger.warn(str(e))
axes[0].set_xlim(self.radius,-self.radius)
axes[0].set_ylim(-self.radius,self.radius)
plt.subplots_adjust(wspace=0.2)
return fig,axes
plot = plot3
class ObjectPlotter(BasePlotter):
""" For plotting 'Objects' identified through candidate search. """
def __init__(self,obj,config,radius=1.0):
self.obj = obj
glon,glat = self.obj['GLON'],self.obj['GLAT']
super(ObjectPlotter,self).__init__(glon,glat,config,radius)
self.set_zidx()
def set_zidx(self):
#names = [n.upper() for n in self.obj.array.dtype.names]
names = [n.upper() for n in self.obj.dtype.names]
mod = np.array(self.config['scan']['distance_modulus_array'])
if 'ZIDX_MAX' in names:
self.zidx = self.obj['ZIDX_MAX']
elif 'DISTANCE_MODULUS' in names:
dist_mod = self.obj['DISTANCE_MODULUS']
self.zidx = np.abs(mod - dist_mod).argmin()
elif 'MODULUS' in names:
dist_mod = self.obj['MODULUS']
self.zidx = np.abs(mod - dist_mod).argmin()
elif 'DISTANCE' in names:
dist_mod = mod2dist(self.obj['DISTANCE'])
self.zidx = np.argmax((mod - dist_mod) > 0)
else:
msg = "Failed to parse distance index"
raise Exception(msg)
def drawTS(self, ax=None, filename=None, zidx=None):
if zidx is None: zidx = self.zidx
super(ObjectPlotter,self).drawTS(ax,filename,zidx)
def drawCMD(self, ax=None, radius=None, zidx=None):
if zidx is None: zidx = self.zidx
super(ObjectPlotter,self).drawCMD(ax,radius,zidx)
def drawMembership(self, ax=None, radius=None, zidx=None, mc_source_id=1):
if zidx is None: zidx = self.zidx
super(ObjectPlotter,self).drawMembership(ax,radius,zidx,mc_source_id)
class SourcePlotter(BasePlotter):
""" For plotting 'Objects' identified through candidate search. """
# Expects GLON, GLAT...
def __init__(self,source,config,radius=1.0):
if Config(config)['coords']['coordsys'].lower()=='gal':
glon,glat = source.lon,source.lat
else:
glon,glat = cel2gal(source.lon,source.lat)
super(SourcePlotter,self).__init__(glon,glat,config,radius)
#self.select = self.config['catalog'].pop('selection')
self.source = source
self.isochrone = self.source.isochrone
self.kernel = self.source.kernel
self.set_zidx()
def isochrone_selection(self,catalog,dist=0.1):
# Cookie cutter
return cutIsochronePath(catalog.mag_1, catalog.mag_2,
catalog.mag_err_1, catalog.mag_err_2,
self.isochrone, radius=dist)
def set_zidx(self):
mod = np.array(self.config['scan']['distance_modulus_array'])
dist_mod = self.isochrone.distance_modulus
self.zidx = np.abs(mod - dist_mod).argmin()
def drawSmoothStars(self,**kwargs):
stars = self.get_stars()
sel = self.isochrone_selection(stars,dist=0.1)
self.drawSmoothCatalog(stars.applyCut(sel),'Filtered Stars',**kwargs)
def drawSmoothGalaxies(self,**kwargs):
galaxies = self.get_galaxies()
sel = self.isochrone_selection(galaxies,dist=0.1)
self.drawSmoothCatalog(galaxies.applyCut(sel),'Filtered Galaxies',**kwargs)
def drawHessDiagram(self,catalog=None):
ax = plt.gca()
if not catalog: catalog = self.get_stars()
r_peak = self.kernel.extension
angsep = ugali.utils.projector.angsep(self.ra, self.dec, catalog.ra, catalog.dec)
cut_inner = (angsep < r_peak)
cut_annulus = (angsep > 0.5) & (angsep < 1.) # deg
mmin, mmax = 16., 24.
cmin, cmax = -0.5, 1.0
mbins = np.linspace(mmin, mmax, 150)
cbins = np.linspace(cmin, cmax, 150)
color = catalog.color[cut_annulus]
mag = catalog.mag[cut_annulus]
h, xbins, ybins = np.histogram2d(color, mag, bins=[cbins,mbins])
blur = nd.filters.gaussian_filter(h.T, 2)
kwargs = dict(extent=[xbins.min(),xbins.max(),ybins.min(),ybins.max()],
cmap='gray_r', aspect='auto', origin='lower',
rasterized=True, interpolation='none')
ax.imshow(blur, **kwargs)
plt.scatter(catalog.color[cut_inner], catalog.mag[cut_inner],
c='red', s=7, edgecolor='none')# label=r'$r < %.2f$ deg'%(r_peak))
ugali.utils.plotting.drawIsochrone(self.isochrone, c='b', zorder=10)
ax.set_xlim(-0.5, 1.)
ax.set_ylim(24., 16.)
plt.xlabel(r'$%s - %s$' % (self.isochrone.band_1, self.isochrone.band_2))
plt.ylabel(r'$%s$' % self.isochrone.band_1)
plt.xticks([-0.5, 0., 0.5, 1.])
plt.yticks(np.arange(mmax - 1., mmin - 1., -1.))
radius_string = (r'${\rm r}<%.1f$ arcmin'%( 60 * r_peak))
plt.text(0.05, 0.95, radius_string,
fontsize=10, ha='left', va='top', color='red',
transform=plt.gca().transAxes,
bbox=dict(facecolor='white', alpha=1., edgecolor='none'))
def drawMembersSpatial(self,data):
ax = plt.gca()
if isstring(data):
filename = data
data = fitsio.read(filename)
xmin, xmax = -0.25,0.25
ymin, ymax = -0.25,0.25
xx,yy = np.meshgrid(np.linspace(xmin,xmax),np.linspace(ymin,ymax))
x_prob, y_prob = sphere2image(self.ra, self.dec, data['RA'], data['DEC'])
sel = (x_prob > xmin)&(x_prob < xmax) & (y_prob > ymin)&(y_prob < ymax)
sel_prob = data['PROB'][sel] > 5.e-2
index_sort = np.argsort(data['PROB'][sel][sel_prob])
plt.scatter(x_prob[sel][~sel_prob], y_prob[sel][~sel_prob],
marker='o', s=2, c='0.75', edgecolor='none')
sc = plt.scatter(x_prob[sel][sel_prob][index_sort],
y_prob[sel][sel_prob][index_sort],
c=data['PROB'][sel][sel_prob][index_sort],
marker='o', s=10, edgecolor='none', cmap='jet', vmin=0., vmax=1.) # Spectral_r
drawProjImage(xx,yy,None,coord='C')
#ax.set_xlim(xmax, xmin)
#ax.set_ylim(ymin, ymax)
#plt.xlabel(r'$\Delta \alpha_{2000}\,(\deg)$')
#plt.ylabel(r'$\Delta \delta_{2000}\,(\deg)$')
plt.xticks([-0.2, 0., 0.2])
plt.yticks([-0.2, 0., 0.2])
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="7%", pad=0.1)
plt.gcf().add_axes(ax_cb)
plt.colorbar(sc, cax=ax_cb, orientation='vertical', ticks=[0, 0.2, 0.4, 0.6, 0.8, 1.0], label='Membership Probability')
ax_cb.yaxis.tick_right()
def drawMembersCMD(self,data):
ax = plt.gca()
if isstring(data):
filename = data
data = fitsio.read(filename)
xmin, xmax = -0.25,0.25
ymin, ymax = -0.25,0.25
mmin, mmax = 16., 24.
cmin, cmax = -0.5, 1.0
mbins = np.linspace(mmin, mmax, 150)
cbins = np.linspace(cmin, cmax, 150)
mag_1 = data[self.config['catalog']['mag_1_field']]
mag_2 = data[self.config['catalog']['mag_2_field']]
x_prob, y_prob = sphere2image(self.ra, self.dec, data['RA'], data['DEC'])
sel = (x_prob > xmin)&(x_prob < xmax) & (y_prob > ymin)&(y_prob < ymax)
sel_prob = data['PROB'][sel] > 5.e-2
index_sort = np.argsort(data['PROB'][sel][sel_prob])
plt.scatter(data['COLOR'][sel][~sel_prob], mag_1[sel][~sel_prob],
marker='o',s=2,c='0.75',edgecolor='none')
sc = plt.scatter(data['COLOR'][sel][sel_prob][index_sort], mag_1[sel][sel_prob][index_sort],
c=data['PROB'][sel][sel_prob][index_sort],
marker='o', s=10, edgecolor='none', cmap='jet', vmin=0., vmax=1)
plt.xlim(cmin, cmax)
plt.ylim(mmax, mmin)
plt.xlabel(r'$%s - %s$' % (self.isochrone.band_1, self.isochrone.band_2))
plt.ylabel(r'$%s$' % self.isochrone.band_1)
#axes[1].yaxis.set_major_locator(MaxNLocator(prune='lower'))
plt.xticks([-0.5, 0., 0.5, 1.])
plt.yticks(np.arange(mmax - 1., mmin - 1., -1.))
ugali.utils.plotting.drawIsochrone(self.isochrone, c='k', zorder=10)
plt.text(0.05, 0.95, r'$\Sigma p_{i} = %i$'%(data['PROB'].sum()),
fontsize=10, horizontalalignment='left', verticalalignment='top', color='k', transform=plt.gca().transAxes,
bbox=dict(facecolor='white', alpha=1., edgecolor='none'))
divider = make_axes_locatable(plt.gca())
ax_cb = divider.new_horizontal(size="7%", pad=0.1)
plt.gcf().add_axes(ax_cb)
plt.colorbar(sc, cax=ax_cb, orientation='vertical', ticks=[0, 0.2, 0.4, 0.6, 0.8, 1.0], label='Membership Probability')
ax_cb.yaxis.tick_right()
def drawDensityProfile(self, catalog=None):
rmax = 24. # arcmin
bins = np.arange(0, rmax + 1.e-10, 2.)
centers = 0.5 * (bins[1:] + bins[0:-1])
area = np.pi * (bins[1:]**2 - bins[0:-1]**2)
r_peak = self.kernel.extension
stars = self.get_stars()
angsep = ugali.utils.projector.angsep(self.ra, self.dec,
stars.ra, stars.dec)
angsep_arcmin = angsep * 60 # arcmin
cut_iso = self.isochrone_selection(stars)
h = np.histogram(angsep_arcmin[(angsep_arcmin < rmax) & cut_iso], bins=bins)[0]
h_out = np.histogram(angsep_arcmin[(angsep_arcmin < rmax) & (~cut_iso)], bins=bins)[0]
gals = self.get_galaxies()
if len(gals):
angsep_gal = ugali.utils.projector.angsep(self.ra, self.dec,
gals.ra, gals.dec)
angsep_gal_arcmin = angsep_gal * 60 # arcmin
cut_iso_gal = self.isochrone_selection(gals)
h_gal = np.histogram(angsep_gal_arcmin[(angsep_gal_arcmin < rmax) & cut_iso_gal], bins=bins)[0]
h_gal_out = np.histogram(angsep_gal_arcmin[(angsep_gal_arcmin < rmax) & (~cut_iso_gal)], bins=bins)[0]
plt.plot(centers, h/area, c='red', label='Filtered Stars')
plt.errorbar(centers, h/area, yerr=(np.sqrt(h) / area), ecolor='red', c='red')
plt.scatter(centers, h/area, edgecolor='none', c='red', zorder=22)
plt.plot(centers, h_out/area, c='gray', label='Unfiltered Stars')
plt.errorbar(centers, h_out/area, yerr=(np.sqrt(h_out) / area), ecolor='gray', c='gray')
plt.scatter(centers, h_out/area, edgecolor='none', c='gray', zorder=21)
if len(gals):
plt.plot(centers, h_gal/area, c='black', label='Filtered Galaxies')
plt.errorbar(centers, h_gal/area, yerr=(np.sqrt(h_gal) / area), ecolor='black', c='black')
plt.scatter(centers, h_gal/area, edgecolor='none', c='black', zorder=20)
plt.xlabel('Angular Separation (arcmin)')
plt.ylabel(r'Density (arcmin$^{-2}$)')
plt.xlim(0., rmax)
ymax = plt.ylim()[1]
#plt.ylim(0, ymax)
plt.ylim(0, 4)
plt.legend(loc='upper right', frameon=False, fontsize=10)
def plot6(self, filename, title=None):
fig = plt.figure('summary', figsize=(11, 6))
fig.subplots_adjust(wspace=0.4, hspace=0.25)
fdg = r'{.}\!^\circ'
coordstring = ('%.2f, %.2f'%(self.ra, self.dec)).replace('.',fdg)
if title is None:
#title = r'%s; ($\alpha_{2000}$, $\delta_{2000}$, $m-M$) = (%s, %.2f)'%(self.source.name, coordstring, self.isochrone.distance_modulus)
title = r'$(\alpha_{2000}, \delta_{2000}, m-M) = (%s, %.1f)$'%(coordstring, self.isochrone.distance_modulus)
if title:
plt.suptitle(title, fontsize=14)
logger.debug("Drawing smooth stars...")
plt.subplot(2, 3, 1)
self.drawSmoothStars()
logger.debug("Drawing density profile...")
plt.subplot(2, 3, 2)
self.drawDensityProfile()
logger.debug("Drawing spatial distribution of members...")
plt.subplot(2, 3, 3)
self.drawMembersSpatial(filename)
logger.debug("Drawing smooth galaxies...")
plt.subplot(2, 3, 4)
self.drawSmoothGalaxies()
logger.debug("Drawing Hess diagram...")
plt.subplot(2,3,5)
self.drawHessDiagram()
logger.debug("Drawing CMD of members...")
plt.subplot(2, 3, 6)
self.drawMembersCMD(filename)
def plot_candidates(candidates, config, ts_min=50, outdir='./'):
for candidate in candidates:
if candidate['TS'] < ts_min: continue
logger.info("Plotting %s (%.2f,%.2f)..."%(candidate['name'],candidate['glon'],candidate['glat']))
plotter = ugali.utils.plotting.ObjectPlotter(candidate,config)
fig,ax = plotter.plot4()
basename = '%s_plot.png'%candidate['name']
outfile = os.path.join(outdir,basename)
plt.savefig(outfile)
###################################################
def draw_slices(hist, func=np.sum, **kwargs):
""" Draw horizontal and vertical slices through histogram """
from mpl_toolkits.axes_grid1 import make_axes_locatable
kwargs.setdefault('ls','-')
ax = plt.gca()
data = hist
# Slices
vslice = func(data,axis=0)
hslice = func(data,axis=1)
npix = np.array(data.shape)
#xlim,ylim = plt.array(zip([0,0],npix-1))
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#extent = ax.get_extent()
#xlim =extent[:2]
#ylim = extent[2:]
# Bin centers
xbin = np.linspace(xlim[0],xlim[1],len(vslice))#+0.5
ybin = np.linspace(ylim[0],ylim[1],len(hslice))#+0.5
divider = make_axes_locatable(ax)
#gh2 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[2, 1])
hax = divider.append_axes("right", size=1.2, pad=0.05,sharey=ax,
axes_class=axes_divider.LocatableAxes)
hax.axis["left"].toggle(label=False, ticklabels=False)
#hax.plot(hslice, plt.arange(*ylim)+0.5,'-') # Bin center
hax.plot(hslice, ybin, **kwargs) # Bin center
hax.xaxis.set_major_locator(MaxNLocator(4,prune='both'))
hax.set_ylim(*ylim)
#gh1 = pywcsgrid2.GridHelperSimple(wcs=self.header, axis_nums=[0, 2])
vax = divider.append_axes("top", size=1.2, pad=0.05, sharex=ax,
axes_class=axes_divider.LocatableAxes)
vax.axis["bottom"].toggle(label=False, ticklabels=False)
vax.plot(xbin, vslice, **kwargs)
vax.yaxis.set_major_locator(MaxNLocator(4,prune='lower'))
vax.set_xlim(*xlim)
return vax,hax
def draw_sum_slices(hist, **kwargs):
return draw_slices(hist,func=np.sum, **kwargs)
def draw_max_slices(hist, **kwargs):
return draw_slices(hist,func=np.max, **kwargs)
def plotKernel(kernel):
fig = plt.figure()
axes = AxesGrid(fig, 111, nrows_ncols = (1,1),
cbar_mode='none',cbar_pad=0,cbar_size='5%',
cbar_location='top', share_all=True)
drawKernel(axes[0],kernel)
def drawKernelHist(ax, kernel):
ext = kernel.extension
theta = kernel.theta
lon, lat = kernel.lon, kernel.lat
xmin,xmax = -5*ext,5*ext
ymin,ymax = -5*ext,5*ext,
x = np.linspace(xmin,xmax,100)+kernel.lon
y = np.linspace(ymin,ymax,100)+kernel.lat
xx,yy = np.meshgrid(x,y)
zz = kernel.pdf(xx,yy)
im = ax.imshow(zz)#,extent=[xmin,xmax,ymin,ymax])
hax,vax = draw_slices(ax,zz,color='k')
mc_lon,mc_lat = kernel.sample(1e5)
hist,xedges,yedges = np.histogram2d(mc_lon,mc_lat,bins=[len(x),len(y)],
range=[[x.min(),x.max()],[y.min(),y.max()]])
xbins,ybins = np.arange(hist.shape[0])+0.5,np.arange(hist.shape[1])+0.5
vzz = zz.sum(axis=0)
hzz = zz.sum(axis=1)
vmc = hist.sum(axis=0)
hmc = hist.sum(axis=1)
vscale = vzz.max()/vmc.max()
hscale = hzz.max()/hmc.max()
kwargs = dict(marker='.',ls='',color='r')
hax.errorbar(hmc*hscale, ybins, xerr=np.sqrt(hmc)*hscale,**kwargs)
vax.errorbar(xbins, vmc*vscale,yerr=np.sqrt(vmc)*vscale,**kwargs)
ax.set_ylim(0,len(y))
ax.set_xlim(0,len(x))
#try: ax.cax.colorbar(im)
#except: plt.colorbar(im)
#a0 = np.array([0.,0.])
#a1 =kernel.a*np.array([np.sin(np.deg2rad(theta)),-np.cos(np.deg2rad(theta))])
#ax.plot([a0[0],a1[0]],[a0[1],a1[1]],'-ob')
#
#b0 = np.array([0.,0.])
#b1 =kernel.b*np.array([np.cos(np.radians(theta)),np.sin(np.radians(theta))])
#ax.plot([b0[0],b1[0]],[b0[1],b1[1]],'-or')
label_kwargs = dict(xy=(0.05,0.05),xycoords='axes fraction', xytext=(0, 0),
textcoords='offset points',ha='left', va='bottom',size=10,
bbox={'boxstyle':"round",'fc':'1'}, zorder=10)
norm = zz.sum() * (x[1]-x[0])**2
ax.annotate("Sum = %.2f"%norm,**label_kwargs)
#ax.set_xlabel(r'$\Delta$ LON (deg)')
#ax.set_ylabel(r'$\Delta$ LAT (deg)')
###################################################
def plotMembership(config, data=None, kernel=None, isochrone=None, **kwargs):
from mpl_toolkits.axes_grid1 import make_axes_locatable
config = ugali.utils.config.Config(config)
if isstring(data):
data,header = fitsio.read(data,header=True)
defaults = dict(s=20,edgecolor='none',vmin=0,vmax=1,zorder=3)
kwargs = dict(list(defaults.items())+list(kwargs.items()))
bkg_kwargs = dict(s=3,zorder=0,c='0.70')
bkg_kwargs = dict(list(kwargs.items())+list(bkg_kwargs.items()))
try:
sort = np.argsort(data['PROB'])
prob = data['PROB'][sort]
except:
prob = np.zeros(len(data['RA']))+1
lon,lat = data['RA'][sort],data['DEC'][sort]
lon0,lat0 = np.median(lon),np.median(lat)
x,y = sphere2image(lon0,lat0,lon,lat)
lon0,lat0 = image2sphere(lon0,lat0,(x.max()+x.min())/2.,(y.max()+y.min())/2.)
lon,lat = sphere2image(lon0,lat0,lon,lat)
color = data['COLOR'][sort]
cut = (prob > 0.01)
# ADW: Sometimes may be mag_2
mag = data[config['catalog']['mag_1_field']][sort]
mag_err_1 = data[config['catalog']['mag_err_1_field']][sort]
mag_err_2 = data[config['catalog']['mag_err_2_field']][sort]
fig,axes = plt.subplots(1,2,figsize=(10,5))
#proj = ugali.utils.projector.Projector(np.median(lon),np.median(lat))
#x,y = proj.sphereToImage(lon,lat)
#sc = axes[0].scatter(x,y,c=prob,vmin=0,vmax=1)
axes[0].scatter(lon[~cut],lat[~cut],**bkg_kwargs)
axes[0].scatter(lon[cut],lat[cut],c=prob[cut],**kwargs)
#if kernel is not None:
# plt.sca(axes[0])
# k = copy.deepcopy(kernel)
# levels=[0,k._pdf(k.extension),np.inf]
# k.lon,k.lat = cel2gal(0,0)
# drawKernel(k,contour=True,linewidths=2,zorder=0,levels=levels)
#axes[0].set_xlim(lon0-0.4,lon0+0.4)
#axes[0].set_ylim(lat0-0.4,lat0+0.4)
#axes[0].set_xlabel('RA (deg)')
#axes[0].set_ylabel('DEC (deg)')
axes[0].set_xlim(lon.min(),lon.max())
axes[0].set_ylim(lat.min(),lat.max())
axes[0].set_ylabel(r'$\Delta$ DEC (deg)')
axes[0].set_xlabel(r'$\Delta$ RA (deg)')
axes[0].xaxis.set_major_locator(MaxNLocator(4))
axes[0].yaxis.set_major_locator(MaxNLocator(4))
axes[0].invert_xaxis()
axes[1].errorbar(color[cut],mag[cut],yerr=mag_err_1[cut],fmt='.',c='k',zorder=0.5)
axes[1].scatter(color[~cut],mag[~cut],**bkg_kwargs)
sc = axes[1].scatter(color[cut],mag[cut],c=prob[cut],**kwargs)
if isochrone is not None:
plt.sca(axes[1])
drawIsochrone(isochrone,cookie=False)
axes[1].set_xlabel(r'$%s - %s$' % (config['catalog']['mag_1_band'], config['catalog']['mag_2_band']))
axes[1].set_ylabel(r'$%s$' % config['catalog']['mag_1_band'])
axes[1].set_ylim(config['mag']['max'],config['mag']['min'])
axes[1].set_xlim(config['color']['min'],config['color']['max'])
axes[1].xaxis.set_major_locator(MaxNLocator(4))
try:
divider = make_axes_locatable(axes[1])
#ax_cb = divider.new_vertical(size="5%", pad=0.05)
ax_cb = divider.new_horizontal(size="7%", pad=0.1)
fig.add_axes(ax_cb)
plt.colorbar(sc, cax=ax_cb, orientation='vertical')
ax_cb.yaxis.tick_right()
except:
logger.warning("No colorbar")
return fig,axes
def drawIsochrone(isochrone, **kwargs):
ax = plt.gca()
logger.debug(str(isochrone))
if kwargs.pop('cookie',None):
# Broad cookie cutter
defaults = dict(alpha=0.5, color='0.5', zorder=0,
linewidth=15, linestyle='-')
else:
# Thin lines
defaults = dict(color='k', linestyle='-')
kwargs = dict(list(defaults.items())+list(kwargs.items()))
isos = isochrone.isochrones if hasattr(isochrone,'isochrones') else [isochrone]
for iso in isos:
iso = copy.deepcopy(iso)
logger.debug(iso.filename)
iso.hb_spread = False
mass_init,mass_pdf,mass_act,mag_1,mag_2 = iso.sample(mass_steps=1e3)
mag = mag_1 + isochrone.distance_modulus
color = mag_1 - mag_2
# Find discontinuities in the color magnitude distributions
dmag = np.fabs(mag[1:]-mag[:-1])
dcolor = np.fabs(color[1:]-color[:-1])
idx = np.where( (dmag>1.0) | (dcolor>0.25))[0]
# +1 to map from difference array to original array
mags = np.split(mag,idx+1)
colors = np.split(color,idx+1)
for i,(c,m) in enumerate(zip(colors,mags)):
msg = '%-4i (%g,%g) -- (%g,%g)'%(i,m[0],c[0],m[-1],c[-1])
logger.debug(msg)
if i > 0:
kwargs['label'] = None
ax.plot(c,m,**kwargs)
return ax
def drawKernel(kernel, contour=False, coords='C', **kwargs):
ax = plt.gca()
if 'colors' not in kwargs:
kwargs.setdefault('cmap',matplotlib.cm.jet)
kwargs.setdefault('origin','lower')
ext = kernel.extension
theta = kernel.theta
xmin,xmax = -kernel.edge,kernel.edge
ymin,ymax = -kernel.edge,kernel.edge
if coords[-1] == 'G':
lon, lat = kernel.lon, kernel.lat
elif coords[-1] == 'C':
lon,lat = gal2cel(kernel.lon, kernel.lat)
else:
msg = 'Unrecognized coordinate: %s'%coords
raise Exception(msg)
x = np.linspace(xmin,xmax,500)+lon
y = np.linspace(ymin,ymax,500)+lat
xx,yy = np.meshgrid(x,y)
extent = [x[0],x[-1],y[0],y[-1]]
kwargs.setdefault('extent',extent)
if coords[-1] == 'C': xx,yy = cel2gal(xx,yy)
zz = kernel.pdf(xx.flat,yy.flat).reshape(xx.shape)
zmax = zz.max()
if contour:
levels = kwargs.pop('levels',10)
#levels = np.logspace(np.log10(zmax)-1,np.log10(zmax),7)
ret = ax.contour(zz,levels,**kwargs)
else:
val = np.ma.array(zz,mask=zz<zz.max()/100.)
ret = ax.imshow(val,**kwargs)
return ret
###################################################
def drawChernoff(ax,ts,bands='smooth',pdf=False,color='r'):
from scipy.stats import chi2
logger.debug("Drawing %i simulations..."%len(ts))
x = plt.linspace(0.1,50,5000)
bins = np.linspace(-1e-2,50,501)
centers = (bins[1:]+bins[:-1])/2.
ax.set_xscale('linear')
ax.set_yscale('log',nonposy='clip')
dof = 1
patches,labels = [],[]
label = r"$\chi^2_{1} / 2$"
kwargs = dict(label=label, lw=2, c='k',dashes=(5,2))
clip_ts = np.where(ts<1e-4, 0, ts)
if not pdf:
ax.plot(x,(1-chi2.cdf(x,dof))/2.,**kwargs)
#fudge = 1/1.4
#ax.plot(x,(1-chi2.cdf(x,dof))/2.*fudge,**kwargs)
# Histogram is normalized so first bin = 1
n,b,p = ax.hist(clip_ts,cumulative=-1,bins=bins,normed=True,log=True,histtype='step',color=color)
else:
num,b = np.histogram(clip_ts,bins=bins)
c = (b[1:]+b[:-1])/2.
norm = float(num.sum()*(b[1]-b[0]))
n = num/norm
ax.plot(x,(chi2.pdf(x,dof))/2.,**kwargs)
err = np.sqrt(num)/norm
yerr = [np.where(err>=n,0.9999*n,err),err]
# Histogram is normalized so n = num/(len(x)*dbin)
ax.errorbar(c,n,yerr=yerr,fmt='_',color=color,zorder=0)
n,b,p = ax.hist(clip_ts,bins=bins,normed=True,log=True,color=color)
idx = np.argmax(n==0)
n = n[1:idx]; b=b[1:idx+1]
ax.set_xlim([0,np.ceil(ts.max())])
ax.set_ylim([10**np.floor(np.log10(n.min())),1])
if bands != 'none':
if bands == 'smooth':
xvals = np.hstack([b[0],((b[1:]+b[:-1])/2.),b[-1]])
yvals = np.hstack([n[0],n,n[-1]])
elif bands == 'sharp':
xvals = np.repeat(b,2)[1:-1]
yvals = np.repeat(n,2)
else:
msg = 'Unrecognized band type: %s'%bands
raise Exception(msg)
# Bands...
err = np.sqrt(yvals/float(len(ts)))
y_hi = np.clip(yvals+err,1e-32,np.inf)
y_lo = np.clip(yvals-err,1e-32,np.inf)
#cut = (y_lo > 0)
kwargs = dict(color='r', alpha='0.5', zorder=0.5)
#ax.fill_between(c[cut], y_lo[cut], y_hi[cut], **kwargs)
ax.fill_between(xvals, y_lo, y_hi, **kwargs)
ax.add_patch(plt.Rectangle((0,0),0,0, **kwargs)) # Legend
#ax.annotate(r"$N=%i$"%len(ts), xy=(0.15,0.85), xycoords='axes fraction',
# bbox={'boxstyle':"round",'fc':'1'})
ax.set_xlabel('TS')
ax.set_ylabel('PDF' if pdf else 'CDF')
def plotChernoff(ts,bands='smooth',pdf=False):
fig,ax = plt.subplots(1,1)
drawChernoff(ax,ts,bands,pdf)
def plot_chain(chain,burn=None,clip=None):
#import triangle
import corner
from ugali.analysis.mcmc import Samples
samples = Samples(chain)
names = samples.names
results = samples.results(clip=clip,burn=burn)
truths = [results[n][0] for n in names]
data = samples[burn:].view((float,len(names)))
fig = corner.corner(data, labels=names, truths=truths)
return fig
###################################################
def drawSkymapCatalog(ax,lon,lat,**kwargs):
mapping = {
'ait':'aitoff',
'mol':'mollweide',
'lam':'lambert',
'ham':'hammer'
}
kwargs.setdefault('proj','aitoff')
kwargs.setdefault('s',2)
kwargs.setdefault('marker','.')
kwargs.setdefault('c','k')
proj = kwargs.pop('proj')
projection = mapping.get(proj,proj)
#ax.grid()
# Convert from
# [0. < lon < 360] -> [-pi < lon < pi]
# [-90 < lat < 90] -> [-pi/2 < lat < pi/2]
lon,lat= np.radians([lon-360.*(lon>180),lat])
ax.scatter(lon,lat,**kwargs)
def plotSkymap(skymap, proj='mol', **kwargs):
kwargs.setdefault('xsize',1000)
if proj.upper() == 'MOL':
im = hp.mollview(skymap,**kwargs)
elif proj.upper() == 'CAR':
im = hp.cartview(skymap,**kwargs)
return im
def plotTriangle(srcfile,samples,burn=0,**kwargs):
#import triangle
import corner
import ugali.analysis.source
import ugali.analysis.mcmc
#matplotlib.rcParams.update({'text.usetex': True})
source = ugali.analysis.source.Source()
source.load(srcfile,section='source')
params = source.get_params()
results = yaml.load(open(srcfile))['results']
samples = ugali.analysis.mcmc.Samples(samples)
names = samples.names
labels = names
truths = [params[n] for n in names]
chain = samples.get(burn=burn,clip=5)
### Triangle plot
#extents = [[0,15e3],[323.6,323.8],[-59.8,-59.7],[0,0.1],[19.5,20.5]]
kwargs.setdefault('range',None)
kwargs.setdefault('plot_contours',True)
kwargs.setdefault('plot_datapoints',True)
kwargs.setdefault('verbose',False)
kwargs.setdefault('quantiles',[0.16,0.84])
if len(names) > 1:
fig = corner.corner(chain,labels=labels,truths=truths,**kwargs)
else:
fig = plt.figure()
plt.hist(chain,bins=100)
plt.xlabel(names[0])
try:
text = 'RA,DEC = (%.2f,%.2f)\n'%(results['ra'][0],results['dec'][0])
text += '(m-M,D) = (%.1f, %.0f kpc)\n'%(results['distance_modulus'][0],results['distance'][0])
text += r'$r_h$ = %.1f arcmin'%(results['extension_arcmin'][0])+'\n'
text += 'TS = %.1f\n'%results['ts'][0]
text += 'NSamples = %i\n'%(len(chain))
#plt.figtext(0.65,0.90,text,ha='left',va='top')
except KeyError as e:
logger.warning(str(e))
pass
label = list(map(str.capitalize,source.name.split('_')))
label[-1] = label[-1].upper()
title = '%s'%' '.join(label)
plt.suptitle(title)
############################################################
def makePath(x_path, y_path, epsilon=1.e-10):
"""
Create closed path.
"""
x_path_closed = np.concatenate([x_path, x_path[::-1]])
y_path_closed = np.concatenate([y_path, epsilon + y_path[::-1]])
path = matplotlib.path.Path(list(zip(x_path_closed, y_path_closed)))
return path
############################################################
def cutIsochronePath(g, r, g_err, r_err, isochrone, radius=0.1, return_all=False):
"""
Cut to identify objects within isochrone cookie-cutter.
ADW: This should be moved into the isochrone class.
"""
import scipy.interpolate
from ugali.isochrone import CompositeIsochrone
if isinstance(isochrone, CompositeIsochrone):
isochrone = isochrone.isochrones[0]
if len(g) == 0:
return np.array([],dtype=bool)
try:
if np.all(isochrone.stage == 'Main'):
# Dotter case
index_transition = len(isochrone.stage)
else:
# Other cases
index_transition = np.nonzero(isochrone.stage > 3)[0][0] + 1
except AttributeError:
index_transition = 1
mag_1_rgb = isochrone.mag_1[0: index_transition] + isochrone.distance_modulus
mag_2_rgb = isochrone.mag_2[0: index_transition] + isochrone.distance_modulus
mag_1_rgb = mag_1_rgb[::-1]
mag_2_rgb = mag_2_rgb[::-1]
# Cut one way...
f_isochrone = scipy.interpolate.interp1d(mag_2_rgb, mag_1_rgb - mag_2_rgb, bounds_error=False, fill_value = 999.)
color_diff = np.fabs((g - r) - f_isochrone(r))
cut_2 = (color_diff < np.sqrt(0.1**2 + r_err**2 + g_err**2))
# ...and now the other
f_isochrone = scipy.interpolate.interp1d(mag_1_rgb, mag_1_rgb - mag_2_rgb, bounds_error=False, fill_value = 999.)
color_diff = np.fabs((g - r) - f_isochrone(g))
cut_1 = (color_diff < np.sqrt(0.1**2 + r_err**2 + g_err**2))
cut = np.logical_or(cut_1, cut_2)
# Include horizontal branch if it exists
if not np.any(isochrone.stage == isochrone.hb_stage):
index_transition = np.nonzero(isochrone.stage==isochrone.hb_stage)[0][0]+1
mag_1_hb = isochrone.mag_1[index_transition:] + isochrone.distance_modulus
mag_2_hb = isochrone.mag_2[index_transition:] + isochrone.distance_modulus
path_hb = makePath(mag_1_hb, mag_2_hb)
cut_hb = path_hb.contains_points(list(zip(g, r)), radius=0.1)
logger.debug('Applying HB selection')
logger.debug(np.sum(cut))
cut = np.logical_or(cut, cut_hb)
logger.debug(np.sum(cut))
mag_bins = np.arange(16., 24.1, 0.1)
mag_centers = 0.5 * (mag_bins[1:] + mag_bins[0:-1])
magerr = np.tile(0., len(mag_centers))
for ii in range(0, len(mag_bins) - 1):
cut_mag_bin = (g > mag_bins[ii]) & (g < mag_bins[ii + 1])
magerr[ii] = np.median(np.sqrt(0.1**2 + r_err[cut_mag_bin]**2 + g_err[cut_mag_bin]**2))
if return_all:
return cut, mag_centers[f_isochrone(mag_centers) < 100], (f_isochrone(mag_centers) + magerr)[f_isochrone(mag_centers) < 100], (f_isochrone(mag_centers) - magerr)[f_isochrone(mag_centers) < 100]
else:
return cut
############################################################
| 36.902408
| 201
| 0.586018
|
288d0e694f21a4992a0c4f5f5d094fba80417cab
| 17,637
|
py
|
Python
|
tensorflow/python/data/kernel_tests/from_generator_test.py
|
xuzhezhaozhao/tensorflow
|
282828af67de29d13dd2c69d96413c030b02543c
|
[
"Apache-2.0"
] | 1
|
2020-03-25T01:18:44.000Z
|
2020-03-25T01:18:44.000Z
|
tensorflow/python/data/kernel_tests/from_generator_test.py
|
xuzhezhaozhao/tensorflow
|
282828af67de29d13dd2c69d96413c030b02543c
|
[
"Apache-2.0"
] | 2
|
2020-03-21T20:23:54.000Z
|
2020-03-21T20:25:05.000Z
|
tensorflow/python/data/kernel_tests/from_generator_test.py
|
xuzhezhaozhao/tensorflow
|
282828af67de29d13dd2c69d96413c030b02543c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data.Dataset.from_generator()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
class FromGeneratorTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testFromGenerator(self, generator, elem_sequence, num_repeats,
requires_initialization):
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64).repeat(num_repeats).prefetch(5)
self.assertDatasetProduces(
dataset,
elem_sequence * num_repeats,
requires_initialization=requires_initialization,
num_test_iterations=2)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_repeats=[1, 5], requires_initialization=[True, False])))
def testFromGeneratorUsingFn(self, num_repeats, requires_initialization):
def generator():
for i in range(1, 100):
yield [i] * i
elem_sequence = list(generator())
self._testFromGenerator(
generator,
elem_sequence,
num_repeats=num_repeats,
requires_initialization=requires_initialization)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_repeats=[1, 5], requires_initialization=[True, False])))
def testFromGeneratorUsingList(self, num_repeats, requires_initialization):
generator = lambda: [[i] * i for i in range(1, 100)]
elem_sequence = list(generator())
self._testFromGenerator(
generator,
elem_sequence,
num_repeats=num_repeats,
requires_initialization=requires_initialization)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_repeats=[1, 5], requires_initialization=[True, False])))
def testFromGeneratorUsingNdarray(self, num_repeats, requires_initialization):
generator = lambda: np.arange(100, dtype=np.int64)
elem_sequence = list(generator())
self._testFromGenerator(
generator,
elem_sequence,
num_repeats=num_repeats,
requires_initialization=requires_initialization)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_repeats=[1, 5], requires_initialization=[True, False])))
def testFromGeneratorUsingGeneratorExpression(self, num_repeats,
requires_initialization):
# NOTE(mrry): Generator *expressions* are not repeatable (or in general
# reusable), because they eagerly evaluate the `for` expression as
# `iter(range(1, 100))` and discard the means of reconstructing
# `range(1, 100)`. Wrapping the generator expression in a `lambda` makes
# it repeatable.
generator = lambda: ([i] * i for i in range(1, 100))
elem_sequence = list(generator())
self._testFromGenerator(
generator,
elem_sequence,
num_repeats=num_repeats,
requires_initialization=requires_initialization)
@combinations.generate(test_base.default_test_combinations())
def testFromMultipleConcurrentGenerators(self):
num_inner_repeats = 5
num_outer_repeats = 100
def generator():
for i in range(1, 10):
yield ([i] * i, [i, i ** 2, i ** 3])
input_list = list(generator())
# The interleave transformation is essentially a flat map that
# draws from multiple input datasets concurrently (in a cyclic
# fashion). By placing `Dataset.from_generator()` inside an
# interleave, we test its behavior when multiple iterators are
# active at the same time; by additionally prefetching inside the
# interleave, we create the possibility of parallel (modulo GIL)
# invocations to several iterators created by the same dataset.
def interleave_fn(_):
return (dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64),
output_shapes=([None], [3]))
.repeat(num_inner_repeats).prefetch(5))
dataset = dataset_ops.Dataset.range(num_outer_repeats).interleave(
interleave_fn, cycle_length=10, block_length=len(input_list))
get_next = self.getNext(dataset)
for _ in range(num_inner_repeats * num_outer_repeats):
for elem in input_list:
val0, val1 = self.evaluate(get_next())
self.assertAllEqual(elem[0], val0)
self.assertAllEqual(elem[1], val1)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/67868766): Reenable this when the source of flakiness is discovered.
def _testFromGeneratorsRunningInParallel(self):
num_parallel_iterators = 3
# Define shared state that multiple iterator instances will access to
# demonstrate their concurrent activity.
lock = threading.Lock()
condition = threading.Condition(lock)
next_ticket = [0] # GUARDED_BY(lock)
def generator():
# NOTE(mrry): We yield one element before the barrier, because
# the current implementation of `Dataset.interleave()` must
# fetch one element from each incoming dataset to start the
# prefetching.
yield 0
# Define a barrier that `num_parallel_iterators` iterators must enter
# before any can proceed. Demonstrates that multiple iterators may be
# active at the same time.
condition.acquire()
ticket = next_ticket[0]
next_ticket[0] += 1
if ticket == num_parallel_iterators - 1:
# The last iterator to join the barrier notifies the others.
condition.notify_all()
else:
# Wait until the last iterator enters the barrier.
while next_ticket[0] < num_parallel_iterators:
condition.wait()
condition.release()
yield 1
# As in `testFromMultipleConcurrentGenerators()`, we use a combination of
# `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple
# iterators to be active concurrently.
def interleave_fn(_):
return dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)
dataset = dataset_ops.Dataset.range(num_parallel_iterators).interleave(
interleave_fn, cycle_length=num_parallel_iterators, block_length=1)
get_next = self.getNext(dataset)
for elem in [0, 1]:
for _ in range(num_parallel_iterators):
self.assertAllEqual(elem, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorImplicitConversion(self):
def generator():
yield [1]
yield [2]
yield [3]
for dtype in [dtypes.int8, dtypes.int32, dtypes.int64]:
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtype, output_shapes=[1])
get_next = self.getNext(dataset)
for expected in [[1], [2], [3]]:
next_val = self.evaluate(get_next())
self.assertEqual(dtype.as_numpy_dtype, next_val.dtype)
self.assertAllEqual(expected, next_val)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorString(self):
def generator():
yield "foo"
yield b"bar"
yield u"baz"
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.string, output_shapes=[])
self.assertDatasetProduces(
dataset, expected_output=[b"foo", b"bar", b"baz"])
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorTypeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield "ERROR"
yield np.array([7, 8, 9], dtype=np.int64)
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
get_next = self.getNext(dataset)
self.assertAllEqual([1, 2, 3], self.evaluate(get_next()))
self.assertAllEqual([4, 5, 6], self.evaluate(get_next()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.assertAllEqual([7, 8, 9], self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorShapeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield np.array([7, 8, 9, 10], dtype=np.int64)
yield np.array([11, 12, 13], dtype=np.int64)
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
get_next = self.getNext(dataset)
self.assertAllEqual([1, 2, 3], self.evaluate(get_next()))
self.assertAllEqual([4, 5, 6], self.evaluate(get_next()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.assertAllEqual([11, 12, 13], self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorStructureError(self):
def generator():
yield 1, 2
yield 3, 4
yield 5
yield 6, 7, 8
yield 9, 10
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64))
get_next = self.getNext(dataset)
self.assertEqual((1, 2), self.evaluate(get_next()))
self.assertEqual((3, 4), self.evaluate(get_next()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.assertEqual((9, 10), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorHeterogeneous(self):
def generator():
yield 1
yield [2, 3]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64)
self.assertDatasetProduces(dataset, expected_output=[1, [2, 3]])
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorStopShort(self):
def generator():
yield 0
yield 1
yield 2
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64)
get_next = self.getNext(dataset)
self.assertAllEqual(0, self.evaluate(get_next()))
self.assertAllEqual(1, self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorDestructorCalled(self):
# Use an `Event` to signal that the generator has been deleted.
event = threading.Event()
class GeneratorWrapper(object):
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
return 42
def __del__(self):
event.set()
dataset = dataset_ops.Dataset.from_generator(
GeneratorWrapper, output_types=dtypes.int64).take(2)
get_next = self.getNext(dataset)
self.assertAllEqual(42, self.evaluate(get_next()))
self.assertAllEqual(42, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `GeneratorWrapper` object is destroyed when the
# iterator terminates (and the generator iterator is deleted).
self.assertTrue(event.is_set())
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorWithArgs(self):
def flat_map_fn(elem):
def generator_with_arg(n):
for _ in range(n):
yield np.array(n, dtype=np.int64)
return dataset_ops.Dataset.from_generator(
generator_with_arg, output_types=dtypes.int64, output_shapes=(),
args=(elem,))
dataset = dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.assertDatasetProduces(
dataset, expected_output=[1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorWithTwoArgs(self):
def flat_map_fn(elem, message):
def generator_with_arg(n, msg):
for i in range(n):
yield i, msg
return dataset_ops.Dataset.from_generator(
generator_with_arg, output_types=(dtypes.int64, dtypes.string),
output_shapes=((), ()), args=(elem, message))
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(5),
dataset_ops.Dataset.from_tensors("Hi!").repeat(None)
)).flat_map(flat_map_fn)
self.assertDatasetProduces(
dataset,
expected_output=[(0, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"), (0, b"Hi!"),
(1, b"Hi!"), (2, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"),
(2, b"Hi!"), (3, b"Hi!")])
@combinations.generate(test_base.default_test_combinations())
def testGeneratorDatasetFinalizeFunctionCalled(self):
# NOTE(mrry): This test tests the internal `_GeneratorDataset`,
# which affords more control over what the finalize function can do than
# the `Dataset.from_generator()` wrapper.
# Use an `Event` to signal that the generator has been deleted.
event = threading.Event()
def finalize_fn(_):
def finalize_py_func():
event.set()
return 0
return script_ops.py_func(finalize_py_func, [], [dtypes.int64],
stateful=True)
dummy = constant_op.constant(37)
dataset = dataset_ops._GeneratorDataset(
dummy, lambda x: x, lambda x: x, finalize_fn,
tensor_spec.TensorSpec((), dtypes.int32))
dataset = dataset.take(2)
get_next = self.getNext(dataset)
self.assertAllEqual(37, self.evaluate(get_next()))
self.assertAllEqual(37, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testSharedName(self):
def generator():
for _ in range(10):
yield [20]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64))
get_next = self.getNext(
dataset, requires_initialization=True, shared_name="shared_dataset")
self.assertAllEqual([20], self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorRaggedTensor(self):
def generator():
yield ragged_factory_ops.constant([[1, 2], [3]],
dtype=dtypes.int64,
ragged_rank=1)
dataset = dataset_ops.Dataset.from_generator(
generator,
output_signature=ragged_tensor.RaggedTensorSpec(
shape=(2, None), dtype=dtypes.int64))
get_next = self.getNext(dataset)
ret = get_next()
self.assertIsInstance(ret, ragged_tensor.RaggedTensor)
self.assertAllEqual([1, 2, 3], ret.values)
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorSparseTensor(self):
def generator():
yield sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]],
values=constant_op.constant([1, 2], dtype=dtypes.int64),
dense_shape=[3, 4])
dataset = dataset_ops.Dataset.from_generator(
generator,
output_signature=sparse_tensor.SparseTensorSpec([3, 4], dtypes.int64))
get_next = self.getNext(dataset)
ret = get_next()
self.assertIsInstance(ret, sparse_tensor.SparseTensor)
self.assertAllEqual([[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]],
sparse_ops.sparse_tensor_to_dense(ret))
if __name__ == "__main__":
test.main()
| 36.66736
| 80
| 0.688212
|
eff830e824fc5425b224ac3c81a82be04ee6f22d
| 1,047
|
py
|
Python
|
beamit/resources/signup.py
|
ksweta/BeamIt-Server
|
0678bab9fce6427c5af45c85e24d851ccd5fbdfb
|
[
"Apache-2.0"
] | null | null | null |
beamit/resources/signup.py
|
ksweta/BeamIt-Server
|
0678bab9fce6427c5af45c85e24d851ccd5fbdfb
|
[
"Apache-2.0"
] | null | null | null |
beamit/resources/signup.py
|
ksweta/BeamIt-Server
|
0678bab9fce6427c5af45c85e24d851ccd5fbdfb
|
[
"Apache-2.0"
] | null | null | null |
from beamit.resources.base import Resource
class SignupRequest(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signup.request+json'
def __init__(self, email, password):
self.email = email
self.password = password
def __repr__(self):
return "<SignupRequest email: {}, password: {}>".format(
self.email,
self.password,
)
def to_dict(self):
return dict(email=self.email, password=self.password)
@classmethod
def from_dict(cls, dct):
return cls(
email=dct.get("email"),
password=dct.get("password"),
)
class SignupResponse(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signup.response+json'
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<SignupResponse user_id: {}>".format(self.user_id)
def to_dict(self):
return dict(user_id=self.user_id)
@classmethod
def from_dict(cls, dct):
return cls(user_id=dct.get("user_id"))
| 23.795455
| 66
| 0.624642
|
bd91533376a2bd7230bfda81126a33d879bed4f2
| 1,992
|
py
|
Python
|
tests/common/test_run/sigmoid_run.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
tests/common/test_run/sigmoid_run.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
tests/common/test_run/sigmoid_run.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from tests.common.test_op import sigmoid
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
def sigmoid_run(shape, dtype, kernel_name, attrs):
input_shape = [shape]
input_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(sigmoid.sigmoid, input_shape, input_dtype, attrs=attrs,
kernel_name=kernel_name,
tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
expect, input, output = gen_data(dtype, shape)
mod = utils.op_build_test(sigmoid.sigmoid, input_shape, input_dtype, kernel_name=kernel_name, attrs=attrs)
accOutput = utils.mod_launch(mod, (input, output), expect=expect)
return input, accOutput, expect, compare_tensor(accOutput, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=0, sigma=0.5).astype(dtype)
expect = 1. / (1. + np.exp(-input))
output = np.full(shape, np.nan, dtype)
return expect, input, output
| 41.5
| 115
| 0.670683
|
23ea30f38330d86ee376b5d2fbea7c49c138da7a
| 34,957
|
py
|
Python
|
src/AeroelasticSE/CaseLibrary.py
|
WISDEM/AeroelasticSE
|
486e89ce129f97796fd9b64e2c3562102287403d
|
[
"Apache-2.0"
] | 7
|
2015-01-19T18:22:32.000Z
|
2022-02-26T10:04:16.000Z
|
src/AeroelasticSE/CaseLibrary.py
|
WISDEM/AeroelasticSE
|
486e89ce129f97796fd9b64e2c3562102287403d
|
[
"Apache-2.0"
] | 3
|
2016-07-25T19:23:54.000Z
|
2018-09-24T16:09:26.000Z
|
src/AeroelasticSE/CaseLibrary.py
|
WISDEM/AeroelasticSE
|
486e89ce129f97796fd9b64e2c3562102287403d
|
[
"Apache-2.0"
] | 15
|
2015-03-03T17:38:40.000Z
|
2021-04-19T18:39:45.000Z
|
import os
import numpy as np
from AeroelasticSE.CaseGen_General import CaseGen_General
from AeroelasticSE.CaseGen_IEC import CaseGen_IEC
# def power_curve_fit(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, Vrated, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe='', ptfm_U_init=[], ptfm_pitch_init=[], ptfm_surge_init=[], ptfm_heave_init=[], metocean_U_init=[], metocean_Hs_init=[], metocean_Tp_init=[]):
# # Default Runtime
# T = 240.
# TStart = 120.
# # T = 120.
# # TStart = 60.
# # Overwrite for testing
# if TMax < T:
# T = TMax
# TStart = 0.
# # Run conditions for points which will be used for a cubic polynomial fit
# # U = [10.]
# U = [4.,8.,9.,10.]
# omega = np.interp(U, U_init, Omega_init)
# pitch = np.interp(U, U_init, pitch_init)
# # Check if floating
# floating_dof = [fst_vt['ElastoDyn']['PtfmSgDOF'], fst_vt['ElastoDyn']['PtfmSwDOF'], fst_vt['ElastoDyn']['PtfmHvDOF'], fst_vt['ElastoDyn']['PtfmRDOF'], fst_vt['ElastoDyn']['PtfmPDOF'], fst_vt['ElastoDyn']['PtfmYDOF']]
# if any(floating_dof):
# floating = True
# if ptfm_U_init == []:
# ptfm_U_init = [4., 5., 6., 7., 8., 9., 10., 10.5, 11., 12., 14., 19., 24.]
# ptfm_surge_init = [3.8758245863838807, 5.57895688031965, 7.619719770801395, 9.974666446553552, 12.675469235464321, 16.173740623041965, 20.069526574594757, 22.141906121375552, 23.835466098954708, 22.976075549477354, 17.742743260748373, 14.464576583154068, 14.430969814391759]
# ptfm_heave_init = [0.030777174904620515, 0.008329930604820483, -0.022973502300090893, -0.06506947653943342, -0.12101317451310406, -0.20589689839069836, -0.3169518280533253, -0.3831692055885472, -0.4409624802614755, -0.41411738171337675, -0.2375323506471747, -0.1156867221814119, -0.07029955933167854]
# ptfm_pitch_init = [0.7519976895165884, 1.104483050851386, 1.5180416334025146, 1.9864587671004394, 2.5152769741130134, 3.1937704945765795, 3.951314212429935, 4.357929703098016, 4.693765745171944, 4.568760630312074, 3.495057478277534, 2.779958240049992, 2.69008798174216]
# if metocean_U_init == []:
# metocean_U_init = [4.00, 6.00, 8.00, 10.00, 12.00, 14.00, 16.00, 18.00, 20.00, 22.00, 24.00]
# metocean_Hs_init = [1.908567568, 1.960162595, 2.062722244, 2.224539415, 2.489931091, 2.802984019, 3.182301485, 3.652236101, 4.182596165, 4.695439504, 5.422289377]
# metocean_Tp_init = [12.23645701, 12.14497777, 11.90254947, 11.5196666, 11.05403739, 10.65483551, 10.27562225, 10.13693777, 10.27842325, 10.11660396, 10.96177917]
# ptfm_heave = np.interp(U, ptfm_U_init, ptfm_heave_init)
# ptfm_surge = np.interp(U, ptfm_U_init, ptfm_surge_init)
# ptfm_pitch = np.interp(U, ptfm_U_init, ptfm_pitch_init)
# metocean_Hs = np.interp(U, metocean_U_init, metocean_Hs_init)
# metocean_Tp = np.interp(U, metocean_U_init, metocean_Tp_init)
# else:
# floating = False
# case_inputs = {}
# # simulation settings
# # case_inputs[("ElastoDyn","PtfmSgDOF")] = {'vals':['False'], 'group':0}
# # case_inputs[("ElastoDyn","PtfmHvDOF")] = {'vals':['False'], 'group':0}
# # case_inputs[("ElastoDyn","PtfmPDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","PtfmSwDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","PtfmRDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","PtfmYDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
# case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
# case_inputs[("ElastoDyn","YawDOF")] = {'vals':['True'], 'group':0}
# case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['True'], 'group':0}
# case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['True'], 'group':0}
# case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['True'], 'group':0}
# case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True'], 'group':0}
# case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
# case_inputs[("ServoDyn","PCMode")] = {'vals':[5], 'group':0}
# case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
# case_inputs[("ServoDyn","YCMode")] = {'vals':[5], 'group':0}
# case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
# case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[2], 'group':0}
# case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
# case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
# case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
# case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
# case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
# case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
# case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
# case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
# case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
# case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
# case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
# case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
# # inital conditions
# case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
# case_inputs[("InflowWind","HWindSpeed")] = {'vals':U, 'group':1}
# case_inputs[("ElastoDyn","RotSpeed")] = {'vals':omega, 'group':1}
# case_inputs[("ElastoDyn","BlPitch1")] = {'vals':pitch, 'group':1}
# case_inputs[("ElastoDyn","BlPitch2")] = case_inputs[("ElastoDyn","BlPitch1")]
# case_inputs[("ElastoDyn","BlPitch3")] = case_inputs[("ElastoDyn","BlPitch1")]
# if floating == True:
# case_inputs[("ElastoDyn","PtfmSurge")] = {'vals':ptfm_surge, 'group':1}
# case_inputs[("ElastoDyn","PtfmHeave")] = {'vals':ptfm_heave, 'group':1}
# case_inputs[("ElastoDyn","PtfmPitch")] = {'vals':ptfm_pitch, 'group':1}
# case_inputs[("HydroDyn","WaveHs")] = {'vals':metocean_Hs, 'group':1}
# case_inputs[("HydroDyn","WaveTp")] = {'vals':metocean_Tp, 'group':1}
# case_inputs[("HydroDyn","RdtnDT")] = {'vals':[fst_vt["Fst"]["DT"]], 'group':0}
# case_inputs[("HydroDyn","WaveMod")] = {'vals':[1], 'group':0}
# from CaseGen_General import CaseGen_General
# case_list, case_name_list = CaseGen_General(case_inputs, dir_matrix=runDir, namebase=namebase)
# channels = ['Wind1VelX','GenPwr']
# return case_list, case_name_list, channels
def power_curve(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, Vrated, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe='', ptfm_U_init=[], ptfm_pitch_init=[], ptfm_surge_init=[], ptfm_heave_init=[], metocean_U_init=[], metocean_Hs_init=[], metocean_Tp_init=[], V_R25=0.):
# Default Runtime
T = 360.
TStart = 120.
# T = 120.
# TStart = 60.
# Overwrite for testing
if TMax < T:
T = TMax
TStart = 0.
# Run conditions
U_all = list(sorted([4., 6., 8., 9., 10., 10.5, 11., 11.5, 11.75, 12., 12.5, 13., 14., 19., 25., Vrated]))
if V_R25 != 0.:
U_all.append(V_R25)
U_all = list(sorted(U_all))
U = [Vi for Vi in U_all if Vi <= Vrated]
# print(U)
# dt = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001]
dt = [0.01]*len(U)
# U = [4.,8.,9.,10.]
omega = np.interp(U, U_init, Omega_init)
pitch = np.interp(U, U_init, pitch_init)
for i, (omegai, pitchi) in enumerate(zip(omega, pitch)):
if pitchi > 0. and omegai < Omega_init[-1]:
pitch[i] = 0.
# Check if floating
floating_dof = [fst_vt['ElastoDyn']['PtfmSgDOF'], fst_vt['ElastoDyn']['PtfmSwDOF'], fst_vt['ElastoDyn']['PtfmHvDOF'], fst_vt['ElastoDyn']['PtfmRDOF'], fst_vt['ElastoDyn']['PtfmPDOF'], fst_vt['ElastoDyn']['PtfmYDOF']]
if any(floating_dof):
floating = True
if ptfm_U_init == []:
ptfm_U_init = [3., 5., 6., 7., 8., 9., 10., 10.5, 11., 12., 14., 19., 25.]
ptfm_surge_init = [3.8758245863838807, 5.57895688031965, 7.619719770801395, 9.974666446553552, 12.675469235464321, 16.173740623041965, 20.069526574594757, 22.141906121375552, 23.835466098954708, 22.976075549477354, 17.742743260748373, 14.464576583154068, 14.430969814391759]
ptfm_heave_init = [0.030777174904620515, 0.008329930604820483, -0.022973502300090893, -0.06506947653943342, -0.12101317451310406, -0.20589689839069836, -0.3169518280533253, -0.3831692055885472, -0.4409624802614755, -0.41411738171337675, -0.2375323506471747, -0.1156867221814119, -0.07029955933167854]
ptfm_pitch_init = [0.7519976895165884, 1.104483050851386, 1.5180416334025146, 1.9864587671004394, 2.5152769741130134, 3.1937704945765795, 3.951314212429935, 4.357929703098016, 4.693765745171944, 4.568760630312074, 3.495057478277534, 2.779958240049992, 2.69008798174216]
if metocean_U_init == []:
metocean_U_init = [3.00, 6.00, 8.00, 10.00, 12.00, 14.00, 16.00, 18.00, 20.00, 22.00, 25.00]
metocean_Hs_init = [1.908567568, 1.960162595, 2.062722244, 2.224539415, 2.489931091, 2.802984019, 3.182301485, 3.652236101, 4.182596165, 4.695439504, 5.422289377]
metocean_Tp_init = [12.23645701, 12.14497777, 11.90254947, 11.5196666, 11.05403739, 10.65483551, 10.27562225, 10.13693777, 10.27842325, 10.11660396, 10.96177917]
ptfm_heave = np.interp(U, ptfm_U_init, ptfm_heave_init)
ptfm_surge = np.interp(U, ptfm_U_init, ptfm_surge_init)
ptfm_pitch = np.interp(U, ptfm_U_init, ptfm_pitch_init)
metocean_Hs = np.interp(U, metocean_U_init, metocean_Hs_init)
metocean_Tp = np.interp(U, metocean_U_init, metocean_Tp_init)
else:
floating = False
case_inputs = {}
# simulation settings
# case_inputs[("ElastoDyn","PtfmSgDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","PtfmHvDOF")] = {'vals':['False'], 'group':0}
# case_inputs[("ElastoDyn","PtfmPDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","PtfmSwDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","PtfmRDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","PtfmYDOF")] = {'vals':['False'], 'group':0}
case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
case_inputs[("Fst","DT")] = {'vals':dt, 'group':1}
case_inputs[("ElastoDyn","YawDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ServoDyn","PCMode")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","YCMode")] = {'vals':[5], 'group':0}
case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[2], 'group':0}
case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
# inital conditions
case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
case_inputs[("InflowWind","HWindSpeed")] = {'vals':U, 'group':1}
case_inputs[("ElastoDyn","RotSpeed")] = {'vals':omega, 'group':1}
case_inputs[("ElastoDyn","BlPitch1")] = {'vals':pitch, 'group':1}
case_inputs[("ElastoDyn","BlPitch2")] = case_inputs[("ElastoDyn","BlPitch1")]
case_inputs[("ElastoDyn","BlPitch3")] = case_inputs[("ElastoDyn","BlPitch1")]
if floating == True:
case_inputs[("ElastoDyn","PtfmSurge")] = {'vals':ptfm_surge, 'group':1}
case_inputs[("ElastoDyn","PtfmHeave")] = {'vals':ptfm_heave, 'group':1}
case_inputs[("ElastoDyn","PtfmPitch")] = {'vals':ptfm_pitch, 'group':1}
case_inputs[("HydroDyn","WaveHs")] = {'vals':metocean_Hs, 'group':1}
case_inputs[("HydroDyn","WaveTp")] = {'vals':metocean_Tp, 'group':1}
case_inputs[("HydroDyn","RdtnDT")] = {'vals':dt, 'group':1}
case_inputs[("HydroDyn","WaveMod")] = {'vals':[1], 'group':0}
from AeroelasticSE.CaseGen_General import CaseGen_General
case_list, case_name_list = CaseGen_General(case_inputs, dir_matrix=runDir, namebase=namebase)
channels = ['Wind1VelX','GenPwr',"RtAeroCp", "RotTorq", "RotThrust", "RotSpeed", "BldPitch1"]
return case_list, case_name_list, channels
def RotorSE_rated(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, Vrated, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe='', ptfm_U_init=[], ptfm_pitch_init=[], ptfm_surge_init=[], ptfm_heave_init=[], metocean_U_init=[], metocean_Hs_init=[], metocean_Tp_init=[]):
# Default Runtime
T = 240.
TStart = 120.
# dt = 0.001
dt = 0.01
# Overwrite for testing
if TMax < T:
T = TMax
TStart = 0.
omega = np.interp(Vrated, U_init, Omega_init)
pitch = np.interp(Vrated, U_init, pitch_init)
# Check if floating
floating_dof = [fst_vt['ElastoDyn']['PtfmSgDOF'], fst_vt['ElastoDyn']['PtfmSwDOF'], fst_vt['ElastoDyn']['PtfmHvDOF'], fst_vt['ElastoDyn']['PtfmRDOF'], fst_vt['ElastoDyn']['PtfmPDOF'], fst_vt['ElastoDyn']['PtfmYDOF']]
if any(floating_dof):
floating = True
if ptfm_U_init == []:
ptfm_U_init = [4., 5., 6., 7., 8., 9., 10., 10.5, 11., 12., 14., 19., 24.]
ptfm_surge_init = [3.8758245863838807, 5.57895688031965, 7.619719770801395, 9.974666446553552, 12.675469235464321, 16.173740623041965, 20.069526574594757, 22.141906121375552, 23.835466098954708, 22.976075549477354, 17.742743260748373, 14.464576583154068, 14.430969814391759]
ptfm_heave_init = [0.030777174904620515, 0.008329930604820483, -0.022973502300090893, -0.06506947653943342, -0.12101317451310406, -0.20589689839069836, -0.3169518280533253, -0.3831692055885472, -0.4409624802614755, -0.41411738171337675, -0.2375323506471747, -0.1156867221814119, -0.07029955933167854]
ptfm_pitch_init = [0.7519976895165884, 1.104483050851386, 1.5180416334025146, 1.9864587671004394, 2.5152769741130134, 3.1937704945765795, 3.951314212429935, 4.357929703098016, 4.693765745171944, 4.568760630312074, 3.495057478277534, 2.779958240049992, 2.69008798174216]
if metocean_U_init == []:
metocean_U_init = [4.00, 6.00, 8.00, 10.00, 12.00, 14.00, 16.00, 18.00, 20.00, 22.00, 24.00]
metocean_Hs_init = [1.908567568, 1.960162595, 2.062722244, 2.224539415, 2.489931091, 2.802984019, 3.182301485, 3.652236101, 4.182596165, 4.695439504, 5.422289377]
metocean_Tp_init = [12.23645701, 12.14497777, 11.90254947, 11.5196666, 11.05403739, 10.65483551, 10.27562225, 10.13693777, 10.27842325, 10.11660396, 10.96177917]
ptfm_heave = [np.interp(Vrated, ptfm_U_init, ptfm_heave_init)]
ptfm_surge = [np.interp(Vrated, ptfm_U_init, ptfm_surge_init)]
ptfm_pitch = [np.interp(Vrated, ptfm_U_init, ptfm_pitch_init)]
metocean_Hs = [np.interp(Vrated, metocean_U_init, metocean_Hs_init)]
metocean_Tp = [np.interp(Vrated, metocean_U_init, metocean_Tp_init)]
else:
floating = False
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
case_inputs[("Fst","DT")] = {'vals':[dt], 'group':0}
case_inputs[("Fst","OutFileFmt")] = {'vals':[2], 'group':0}
case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
case_inputs[("InflowWind","HWindSpeed")] = {'vals':[Vrated], 'group':0}
case_inputs[("ElastoDyn","RotSpeed")] = {'vals':[omega], 'group':0}
case_inputs[("ElastoDyn","BlPitch1")] = {'vals':[pitch], 'group':0}
case_inputs[("ElastoDyn","BlPitch2")] = {'vals':[pitch], 'group':0}
case_inputs[("ElastoDyn","BlPitch3")] = {'vals':[pitch], 'group':0}
case_inputs[("ElastoDyn","YawDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ServoDyn","PCMode")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[2], 'group':0}
case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
if floating == True:
case_inputs[("ElastoDyn","PtfmSurge")] = {'vals':ptfm_surge, 'group':1}
case_inputs[("ElastoDyn","PtfmHeave")] = {'vals':ptfm_heave, 'group':1}
case_inputs[("ElastoDyn","PtfmPitch")] = {'vals':ptfm_pitch, 'group':1}
case_inputs[("HydroDyn","WaveHs")] = {'vals':metocean_Hs, 'group':1}
case_inputs[("HydroDyn","WaveTp")] = {'vals':metocean_Tp, 'group':1}
case_inputs[("HydroDyn","RdtnDT")] = {'vals':[dt], 'group':0}
case_inputs[("HydroDyn","WaveMod")] = {'vals':[1], 'group':0}
namebase += '_rated'
case_list, case_name_list = CaseGen_General(case_inputs, dir_matrix=runDir, namebase=namebase)
channels = ["TipDxc1", "TipDyc1"]
channels += ["RootMxc1", "RootMyc1", "RootMzc1", "RootMxc2", "RootMyc2", "RootMzc2", "RootMxc3", "RootMyc3", "RootMzc3"]
channels += ["RootFxc1", "RootFyc1", "RootFzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootFxc3", "RootFyc3", "RootFzc3"]
channels += ["RtAeroCp", "RotTorq", "RotThrust", "RotSpeed"]
return case_list, case_name_list, channels
def RotorSE_DLC_1_4_Rated(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, Vrated, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe=''):
# Default Runtime
T = 360.
TStart = 60.
# TStart = 0.
# Overwrite for testing
if TMax < T:
T = TMax
TStart = 0.
iec = CaseGen_IEC()
iec.init_cond[("ElastoDyn","RotSpeed")] = {'U': U_init}
iec.init_cond[("ElastoDyn","RotSpeed")]['val'] = Omega_init
iec.init_cond[("ElastoDyn","BlPitch1")] = {'U': U_init}
iec.init_cond[("ElastoDyn","BlPitch1")]['val'] = pitch_init
iec.init_cond[("ElastoDyn","BlPitch2")] = iec.init_cond[("ElastoDyn","BlPitch1")]
iec.init_cond[("ElastoDyn","BlPitch3")] = iec.init_cond[("ElastoDyn","BlPitch1")]
iec.Turbine_Class = turbine_class
iec.Turbulence_Class = turbulence_class
iec.D = fst_vt['ElastoDyn']['TipRad']*2.
iec.z_hub = fst_vt['InflowWind']['RefHt']
iec.TF = T
iec.Tstart = T*3./4.
iec.dlc_inputs = {}
iec.dlc_inputs['DLC'] = [1.4]
iec.dlc_inputs['U'] = [[Vrated]]
iec.dlc_inputs['Seeds'] = [[]]
iec.dlc_inputs['Yaw'] = [[]]
iec.transient_dir_change = '-' # '+','-','both': sign for transient events in EDC, EWS
iec.transient_shear_orientation = 'v' # 'v','h','both': vertical or horizontal shear for EWS
iec.wind_dir = runDir
iec.case_name_base = namebase + '_gust'
iec.Turbsim_exe = ''
iec.debug_level = 0
iec.parallel_windfile_gen = False
iec.run_dir = runDir
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
case_inputs[("Fst","OutFileFmt")] = {'vals':[2], 'group':0}
case_inputs[("ElastoDyn","YawDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ServoDyn","PCMode")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","YCMode")] = {'vals':[5], 'group':0}
case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[2], 'group':0}
case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
case_list, case_name_list = iec.execute(case_inputs=case_inputs)
channels = ["TipDxc1", "TipDyc1", "TipDzc1", "TipDxc2", "TipDyc2", "TipDzc2", "TipDxc3", "TipDyc3", "TipDzc3"]
channels += ["RootMxc1", "RootMyc1", "RootMzc1", "RootMxc2", "RootMyc2", "RootMzc2", "RootMxc3", "RootMyc3", "RootMzc3"]
channels += ["RootFxc1", "RootFyc1", "RootFzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootFxc3", "RootFyc3", "RootFzc3"]
channels += ["RtAeroCp", "RotTorq", "RotThrust", "RotSpeed", "NacYaw", "Wind1VelX"]
channels += ["B1N1Fx", "B1N2Fx", "B1N3Fx", "B1N4Fx", "B1N5Fx", "B1N6Fx", "B1N7Fx", "B1N8Fx", "B1N9Fx"]
channels += ["B1N1Fy", "B1N2Fy", "B1N3Fy", "B1N4Fy", "B1N5Fy", "B1N6Fy", "B1N7Fy", "B1N8Fy", "B1N9Fy"]
return case_list, case_name_list, channels
def RotorSE_DLC_7_1_Steady(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, U, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe=''):
# Extreme 1yr return period wind speed with a power fault resulting in the blade not feathering
# Default Runtime
T = 60.
TStart = 30.
# Overwrite for testing
if TMax < T:
T = TMax
TStart = 0.
Pitch = 0.
Omega = 0.
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
case_inputs[("Fst","OutFileFmt")] = {'vals':[2], 'group':0}
case_inputs[("InflowWind","WindType")] = {'vals':[1], 'group':0}
case_inputs[("InflowWind","HWindSpeed")] = {'vals':[U], 'group':0}
case_inputs[("InflowWind","PLexp")] = {'vals':[0.11], 'group':0}
case_inputs[("ElastoDyn","RotSpeed")] = {'vals':[Omega], 'group':0}
case_inputs[("ElastoDyn","BlPitch1")] = {'vals':[Pitch], 'group':0}
case_inputs[("ElastoDyn","BlPitch2")] = {'vals':[Pitch], 'group':0}
case_inputs[("ElastoDyn","BlPitch3")] = {'vals':[Pitch], 'group':0}
case_inputs[("ElastoDyn","YawDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ServoDyn","PCMode")] = {'vals':[0], 'group':0}
case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","YCMode")] = {'vals':[5], 'group':0}
case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
namebase += '_idle50yr'
case_list, case_name_list = CaseGen_General(case_inputs, namebase=namebase, save_matrix=False)
channels = ["TipDxc1", "TipDyc1", "TipDzc1", "TipDxc2", "TipDyc2", "TipDzc2", "TipDxc3", "TipDyc3", "TipDzc3"]
channels += ["RootMxc1", "RootMyc1", "RootMzc1", "RootMxc2", "RootMyc2", "RootMzc2", "RootMxc3", "RootMyc3", "RootMzc3"]
channels += ["RootFxc1", "RootFyc1", "RootFzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootFxc3", "RootFyc3", "RootFzc3"]
channels += ["RtAeroCp", "RotTorq", "RotThrust", "RotSpeed", "NacYaw"]
channels += ["B1N1Fx", "B1N2Fx", "B1N3Fx", "B1N4Fx", "B1N5Fx", "B1N6Fx", "B1N7Fx", "B1N8Fx", "B1N9Fx"]
channels += ["B1N1Fy", "B1N2Fy", "B1N3Fy", "B1N4Fy", "B1N5Fy", "B1N6Fy", "B1N7Fy", "B1N8Fy", "B1N9Fy"]
return case_list, case_name_list, channels
def RotorSE_DLC_1_1_Turb(fst_vt, runDir, namebase, TMax, turbine_class, turbulence_class, U, U_init=[], Omega_init=[], pitch_init=[], Turbsim_exe='', debug_level=0, cores=0, mpi_run=False, mpi_comm_map_down=[]):
# Default Runtime
T = 630.
TStart = 30.
# Overwrite for testing
if TMax < T:
T = TMax
TStart = 0.
iec = CaseGen_IEC()
iec.init_cond[("ElastoDyn","RotSpeed")] = {'U': U_init}
iec.init_cond[("ElastoDyn","RotSpeed")]['val'] = [0.95*omega_i for omega_i in Omega_init]
iec.init_cond[("ElastoDyn","BlPitch1")] = {'U': U_init}
iec.init_cond[("ElastoDyn","BlPitch1")]['val'] = pitch_init
iec.init_cond[("ElastoDyn","BlPitch2")] = iec.init_cond[("ElastoDyn","BlPitch1")]
iec.init_cond[("ElastoDyn","BlPitch3")] = iec.init_cond[("ElastoDyn","BlPitch1")]
iec.Turbine_Class = turbine_class
iec.Turbulence_Class = turbulence_class
iec.D = fst_vt['ElastoDyn']['TipRad']*2.
iec.z_hub = fst_vt['InflowWind']['RefHt']
iec.dlc_inputs = {}
iec.dlc_inputs['DLC'] = [1.1]
iec.dlc_inputs['U'] = [[U]]
# iec.dlc_inputs['Seeds'] = [[1]]
iec.dlc_inputs['Seeds'] = [[310414237, 1764051066, 1935526301, 333954657, -960771537, 714191176]] # nothing special about these seeds, randomly generated
iec.dlc_inputs['Yaw'] = [[]]
iec.transient_dir_change = '-' # '+','-','both': sign for transient events in EDC, EWS
iec.transient_shear_orientation = 'v' # 'v','h','both': vertical or horizontal shear for EWS
iec.wind_dir = runDir
iec.case_name_base = namebase + '_turb'
iec.Turbsim_exe = Turbsim_exe
iec.debug_level = debug_level
iec.cores = cores
iec.run_dir = runDir
iec.overwrite = True
# iec.overwrite = False
if cores > 1:
iec.parallel_windfile_gen = True
else:
iec.parallel_windfile_gen = False
# mpi_run = False
if mpi_run:
iec.mpi_run = mpi_run
iec.comm_map_down = mpi_comm_map_down
case_inputs = {}
case_inputs[("Fst","TMax")] = {'vals':[T], 'group':0}
case_inputs[("Fst","TStart")] = {'vals':[TStart], 'group':0}
case_inputs[("Fst","OutFileFmt")] = {'vals':[2], 'group':0}
case_inputs[("ElastoDyn","YawDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF1")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","FlapDOF2")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","EdgeDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","DrTrDOF")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","GenDOF")] = {'vals':['True'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwFADOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF1")] = {'vals':['False'], 'group':0}
case_inputs[("ElastoDyn","TwSSDOF2")] = {'vals':['False'], 'group':0}
case_inputs[("ServoDyn","PCMode")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","VSContrl")] = {'vals':[5], 'group':0}
case_inputs[("ServoDyn","YCMode")] = {'vals':[5], 'group':0}
case_inputs[("AeroDyn15","WakeMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","AFAeroMod")] = {'vals':[2], 'group':0}
case_inputs[("AeroDyn15","TwrPotent")] = {'vals':[0], 'group':0}
case_inputs[("AeroDyn15","TwrShadow")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","TwrAero")] = {'vals':['False'], 'group':0}
case_inputs[("AeroDyn15","SkewMod")] = {'vals':[1], 'group':0}
case_inputs[("AeroDyn15","TipLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","HubLoss")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TanInd")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","AIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","TIDrag")] = {'vals':['True'], 'group':0}
case_inputs[("AeroDyn15","IndToler")] = {'vals':[1.e-5], 'group':0}
case_inputs[("AeroDyn15","MaxIter")] = {'vals':[5000], 'group':0}
case_inputs[("AeroDyn15","UseBlCm")] = {'vals':['True'], 'group':0}
case_list, case_name_list = iec.execute(case_inputs=case_inputs)
channels = ["TipDxc1", "TipDyc1", "TipDzc1", "TipDxc2", "TipDyc2", "TipDzc2", "TipDxc3", "TipDyc3", "TipDzc3"]
channels += ["RootMxc1", "RootMyc1", "RootMzc1", "RootMxc2", "RootMyc2", "RootMzc2", "RootMxc3", "RootMyc3", "RootMzc3"]
channels += ["RootFxc1", "RootFyc1", "RootFzc1", "RootFxc2", "RootFyc2", "RootFzc2", "RootFxc3", "RootFyc3", "RootFzc3"]
channels += ["RtAeroCp", "RotTorq", "RotThrust", "RotSpeed", "NacYaw"]
channels += ["B1N1Fx", "B1N2Fx", "B1N3Fx", "B1N4Fx", "B1N5Fx", "B1N6Fx", "B1N7Fx", "B1N8Fx", "B1N9Fx"]
channels += ["B1N1Fy", "B1N2Fy", "B1N3Fy", "B1N4Fy", "B1N5Fy", "B1N6Fy", "B1N7Fy", "B1N8Fy", "B1N9Fy"]
return case_list, case_name_list, channels
if __name__ == "__main__":
# power_curve()
case_list, case_name_list = RotorSE_rated('test', 60., 11., 12.1, 0.)
| 58.751261
| 314
| 0.597935
|
baa32c0ccbac1f941e696bd3040e70b51ad664c0
| 550
|
py
|
Python
|
bin/datapackage-dataset.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | null | null | null |
bin/datapackage-dataset.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | 6
|
2020-07-14T07:55:38.000Z
|
2022-01-11T09:50:59.000Z
|
bin/datapackage-dataset.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | 2
|
2022-01-06T14:29:05.000Z
|
2022-01-07T09:52:52.000Z
|
#!/usr/bin/env python3
import sys
import csv
import frontmatter
fieldnames = ["datapackage", "dataset"]
w = csv.DictWriter(open(sys.argv[1], "w", newline=""), fieldnames=fieldnames, extrasaction='ignore')
w.writeheader()
for row in csv.DictReader(open("specification/datapackage.csv", newline="")):
datapackage = row["datapackage"]
path = "content/datapackage/%s.md" % datapackage
post = frontmatter.load(path)
for dataset in post.metadata.get("datasets", []):
w.writerow({"datapackage": datapackage, "dataset": dataset})
| 30.555556
| 100
| 0.701818
|
03b3505855d7610ca829c8d18ee7ba367efb94f7
| 450
|
py
|
Python
|
tests/test_toggle_fullscreen.py
|
vault-the/pywebview
|
8608378ab62732d52640478c572c4f83994c0cbf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_toggle_fullscreen.py
|
vault-the/pywebview
|
8608378ab62732d52640478c572c4f83994c0cbf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_toggle_fullscreen.py
|
vault-the/pywebview
|
8608378ab62732d52640478c572c4f83994c0cbf
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import threading
from .util import run_test, destroy_window
def toggle_fullscreen():
import webview
def _toggle_fullscreen(webview):
webview.toggle_fullscreen()
t = threading.Thread(target=_toggle_fullscreen, args=(webview,))
t.start()
destroy_window(webview)
webview.create_window('Toggle fullscreen test', 'https://www.example.org')
def test_toggle_fullscreen():
run_test(toggle_fullscreen)
| 21.428571
| 78
| 0.742222
|
168068cf53acdfef09cd6f2571184f5e8e420004
| 2,980
|
py
|
Python
|
mlearn_for_image.py
|
busyyang/easy12306
|
707d34f7a868fd6155ed6e7a1703b79ef5f62c73
|
[
"Artistic-2.0"
] | null | null | null |
mlearn_for_image.py
|
busyyang/easy12306
|
707d34f7a868fd6155ed6e7a1703b79ef5f62c73
|
[
"Artistic-2.0"
] | null | null | null |
mlearn_for_image.py
|
busyyang/easy12306
|
707d34f7a868fd6155ed6e7a1703b79ef5f62c73
|
[
"Artistic-2.0"
] | null | null | null |
# coding: utf-8
import sys
import cv2
import numpy as np
from keras import models
from keras import layers
from keras import optimizers
from keras.applications import VGG16
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
def preprocess_input(x):
x = x.astype('float32')
# ๆๆฏ็จcv2ๆฅ่ฏปๅ็ๅพ็๏ผๅ
ถๅทฒ็ปๆฏBGRๆ ผๅผไบ
mean = [103.939, 116.779, 123.68]
x -= mean
return x
def load_data():
# ่ฟๆฏ็ป่ฎกๅญฆไธๅฎถๆไพ็่ฎญ็ป้
data = np.load('./data/captcha.npz')
train_x, train_y = data['images'], data['labels']
train_x = preprocess_input(train_x)
# ็ฑไบๆฏ็ป่ฎกๅพๆฅ็ไฟกๆฏ๏ผๆไปฅๅจๆญค็ปๅฎๅฏไฟกๅบฆ
sample_weight = train_y.max(axis=1) / np.sqrt(train_y.sum(axis=1))
sample_weight /= sample_weight.mean()
train_y = train_y.argmax(axis=1)
# ่ฟๆฏไบบๅทฅๆไพ็้ช่ฏ้
data = np.load('./data/captcha.test.npz')
test_x, test_y = data['images'], data['labels']
test_x = preprocess_input(test_x)
return (train_x, train_y, sample_weight), (test_x, test_y)
def learn():
(train_x, train_y, sample_weight), (test_x, test_y) = load_data()
datagen = ImageDataGenerator(horizontal_flip=True,
vertical_flip=True)
train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight)
base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3))
for layer in base.layers[:-4]:
layer.trainable = False
model = models.Sequential([
base,
layers.BatchNormalization(),
layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
layers.GlobalAveragePooling2D(),
layers.BatchNormalization(),
layers.Dense(64, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.20),
layers.Dense(80, activation='softmax')
])
model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
reduce_lr = ReduceLROnPlateau(verbose=1)
model.fit_generator(train_generator, epochs=400,
steps_per_epoch=100,
validation_data=(test_x[:800], test_y[:800]),
callbacks=[reduce_lr], verbose=2)
result = model.evaluate(test_x, test_y)
print(result)
model.save('./models/12306.image.model.h5', include_optimizer=False)
def predict(imgs):
imgs = preprocess_input(imgs)
model = models.load_model('./models/12306.image.model.h5')
labels = model.predict(imgs)
return labels
def _predict(fn):
imgs = cv2.imread(fn)
imgs = cv2.resize(imgs, (67, 67))
imgs.shape = (-1, 67, 67, 3)
labels = predict(imgs)
print(labels.max(axis=1))
print(labels.argmax(axis=1))
if __name__ == '__main__':
if len(sys.argv) >= 2:
_predict(sys.argv[1])
else:
learn()
| 32.043011
| 85
| 0.628523
|
382ff03bb286d4cd75a1a8a8d293686eb55bb82f
| 2,734
|
py
|
Python
|
test/test_cashbook_entry.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | 2
|
2022-02-17T08:33:17.000Z
|
2022-03-22T09:27:00.000Z
|
test/test_cashbook_entry.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
test/test_cashbook_entry.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
"""
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.9
Contact: info@fattureincloud.it
Generated by: https://openapi-generator.tech
"""
import json
import sys
import unittest
import datetime
import fattureincloud_python_sdk
from functions import json_serial
from functions import create_from_json
from fattureincloud_python_sdk.model.cashbook_entry_document import CashbookEntryDocument
from fattureincloud_python_sdk.model.cashbook_entry import CashbookEntry
from fattureincloud_python_sdk.model.cashbook_entry_kind import CashbookEntryKind
from fattureincloud_python_sdk.model.cashbook_entry_type import CashbookEntryType
from fattureincloud_python_sdk.model.payment_account import PaymentAccount
globals()['CashbookEntryDocument'] = CashbookEntryDocument
globals()['CashbookEntry'] = CashbookEntry
globals()['CashbookEntryKind'] = CashbookEntryKind
globals()['CashbookEntryType'] = CashbookEntryType
globals()['PaymentAccount'] = PaymentAccount
class TestCashbookEntry(unittest.TestCase):
"""CashbookEntry unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCashbookEntry(self):
"""Test CashbookEntry"""
model = CashbookEntry(
id="1",
date=datetime.datetime.strptime("2022-02-02", '%Y-%m-%d').date(),
description="description",
kind=CashbookEntryKind("cashbook"),
type=CashbookEntryType("in"),
entity_name="name",
document=CashbookEntryDocument(
id=1,
path="/path",
type="doc"
),
amount_in=10.0,
payment_account_in=PaymentAccount(
id=1,
name="banca"
),
amount_out=0.0,
payment_account_out=PaymentAccount(
id=1,
name="banca"
)
)
expected_json = '{"id": "1", "date": "2022-02-02", "description": "description", "kind": "cashbook", "type": "in", "entity_name": "name", "document": {"id": 1, "path": "/path", "type": "doc"}, "amount_in": 10.0, "payment_account_in": {"id": 1, "name": "banca"}, "amount_out": 0.0, "payment_account_out": {"id": 1, "name": "banca"}}'
actual_json = json.dumps(model.to_dict(), default=json_serial)
assert actual_json == expected_json
if __name__ == '__main__':
unittest.main()
| 37.972222
| 340
| 0.66496
|
56f2f0dfea4f614ed6626a23e9b6b182c6203646
| 649
|
py
|
Python
|
eggs/docutils-0.7-py2.6.egg/EGG-INFO/scripts/rst2xml.py
|
psnehal/MethylSig
|
5efad71e71ff2515feff2e49579c856ef9a1bbd8
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/docutils-0.7-py2.6.egg/EGG-INFO/scripts/rst2xml.py
|
psnehal/MethylSig
|
5efad71e71ff2515feff2e49579c856ef9a1bbd8
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/docutils-0.7-py2.6.egg/EGG-INFO/scripts/rst2xml.py
|
psnehal/MethylSig
|
5efad71e71ff2515feff2e49579c856ef9a1bbd8
|
[
"CC-BY-3.0"
] | null | null | null |
#!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs4/bin/python2.6
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| 27.041667
| 70
| 0.74114
|
aaaf8bcecbaeb47a96000cf95fc818ddf954c4e3
| 6,113
|
py
|
Python
|
kospeech/models/transformer/model.py
|
daiyaanarfeen/kospeech
|
5aff5c7647e5cceceddf7b22c991777fc3792400
|
[
"Apache-2.0"
] | 257
|
2020-06-06T14:20:47.000Z
|
2021-08-12T05:01:39.000Z
|
kospeech/models/transformer/model.py
|
daiyaanarfeen/kospeech
|
5aff5c7647e5cceceddf7b22c991777fc3792400
|
[
"Apache-2.0"
] | 100
|
2020-06-08T00:39:28.000Z
|
2021-08-04T11:22:02.000Z
|
kospeech/models/transformer/model.py
|
daiyaanarfeen/kospeech
|
5aff5c7647e5cceceddf7b22c991777fc3792400
|
[
"Apache-2.0"
] | 96
|
2020-06-10T06:12:52.000Z
|
2021-08-09T14:40:01.000Z
|
# Copyright (c) 2020, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import Tensor
from typing import Tuple
from kospeech.models.model import EncoderDecoderModel
from kospeech.models.transformer.decoder import TransformerDecoder
from kospeech.models.transformer.encoder import TransformerEncoder
class SpeechTransformer(EncoderDecoderModel):
"""
A Speech Transformer model. User is able to modify the attributes as needed.
The model is based on the paper "Attention Is All You Need".
Args:
input_dim (int): dimension of input vector
num_classes (int): number of classification
extractor (str): type of CNN extractor (default: vgg)
num_encoder_layers (int, optional): number of recurrent layers (default: 12)
num_decoder_layers (int, optional): number of recurrent layers (default: 6)
encoder_dropout_p (float, optional): dropout probability of encoder (default: 0.2)
decoder_dropout_p (float, optional): dropout probability of decoder (default: 0.2)
d_model (int): dimension of model (default: 512)
d_ff (int): dimension of feed forward net (default: 2048)
pad_id (int): identification of <PAD_token> (default: 0)
sos_id (int): identification of <SOS_token> (default: 1)
eos_id (int): identification of <EOS_token> (default: 2)
num_heads (int): number of attention heads (default: 8)
max_length (int, optional): max decoding step (default: 400)
joint_ctc_attention (bool, optional): flag indication joint ctc attention or not (default: False)
Inputs: inputs, input_lengths, targets, teacher_forcing_ratio
- **inputs** (torch.Tensor): tensor of sequences, whose length is the batch size and within which
each sequence is a list of token IDs. This information is forwarded to the encoder.
- **input_lengths** (torch.Tensor): tensor of sequences, whose contains length of inputs.
- **targets** (torch.Tensor): tensor of sequences, whose length is the batch size and within which
each sequence is a list of token IDs. This information is forwarded to the decoder.
Returns:
(Tensor, Tensor, Tensor)
* predicted_log_probs (torch.FloatTensor): Log probability of model predictions.
* encoder_output_lengths: The length of encoder outputs. ``(batch)``
* encoder_log_probs: Log probability of encoder outputs will be passed to CTC Loss.
If joint_ctc_attention is False, return None.
"""
def __init__(
self,
input_dim: int,
num_classes: int,
extractor: str,
num_encoder_layers: int = 12,
num_decoder_layers: int = 6,
encoder_dropout_p: float = 0.2,
decoder_dropout_p: float = 0.2,
d_model: int = 512,
d_ff: int = 2048,
pad_id: int = 0,
sos_id: int = 1,
eos_id: int = 2,
num_heads: int = 8,
joint_ctc_attention: bool = False,
max_length: int = 400,
) -> None:
assert d_model % num_heads == 0, "d_model % num_heads should be zero."
encoder = TransformerEncoder(
input_dim=input_dim,
extractor=extractor,
d_model=d_model,
d_ff=d_ff,
num_layers=num_encoder_layers,
num_heads=num_heads,
dropout_p=encoder_dropout_p,
joint_ctc_attention=joint_ctc_attention,
num_classes=num_classes,
)
decoder = TransformerDecoder(
num_classes=num_classes,
d_model=d_model,
d_ff=d_ff,
num_layers=num_decoder_layers,
num_heads=num_heads,
dropout_p=decoder_dropout_p,
pad_id=pad_id,
sos_id=sos_id,
eos_id=eos_id,
max_length=max_length,
)
super(SpeechTransformer, self).__init__(encoder, decoder)
self.num_classes = num_classes
self.joint_ctc_attention = joint_ctc_attention
self.sos_id = sos_id
self.eos_id = eos_id
self.pad_id = pad_id
self.max_length = max_length
def forward(
self,
inputs: Tensor,
input_lengths: Tensor,
targets: Tensor,
target_lengths: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Forward propagate a `inputs` and `targets` pair for training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
targets (torch.LongTensr): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
Returns:
(Tensor, Tensor, Tensor)
* predicted_log_probs (torch.FloatTensor): Log probability of model predictions.
* encoder_output_lengths: The length of encoder outputs. ``(batch)``
* encoder_log_probs: Log probability of encoder outputs will be passed to CTC Loss.
If joint_ctc_attention is False, return None.
"""
encoder_outputs, output_lengths, encoder_log_probs = self.encoder(inputs, input_lengths)
predicted_log_probs = self.decoder(targets, encoder_outputs, output_lengths, target_lengths)
return predicted_log_probs, output_lengths, encoder_log_probs
| 43.978417
| 119
| 0.653852
|
1a2410005f2532d1763ca1c5df397a343ce96ef8
| 2,545
|
py
|
Python
|
fish_contents.py
|
v1ztep/fish_sale_TG_bot
|
adfb5fbcbea29a5e60f64940ec082ba733196189
|
[
"MIT"
] | null | null | null |
fish_contents.py
|
v1ztep/fish_sale_TG_bot
|
adfb5fbcbea29a5e60f64940ec082ba733196189
|
[
"MIT"
] | null | null | null |
fish_contents.py
|
v1ztep/fish_sale_TG_bot
|
adfb5fbcbea29a5e60f64940ec082ba733196189
|
[
"MIT"
] | null | null | null |
import textwrap
from telegram import InlineKeyboardButton
from telegram import InlineKeyboardMarkup
from moltin import get_all_products
def get_menu_keyboard(context):
all_products = get_all_products(context.bot_data['moltin_token'])
keyboard = []
for product in all_products['data']:
keyboard.append([InlineKeyboardButton(product['name'],
callback_data=product['id'])])
keyboard.append([InlineKeyboardButton("ะะพัะทะธะฝะฐ", callback_data='to_cart')])
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def get_description_text(product):
text = f'''
{product['name']}
{product['meta']['display_price']['with_tax']['formatted']} per kg
{product['meta']['stock']['level']}kg on stock
{product['description']} fish from deep-deep ocean
'''
return textwrap.dedent(text)
def get_description_keyboard(product_id):
keyboard = [[InlineKeyboardButton('1 ะบะณ', callback_data=f'{product_id} 1'),
InlineKeyboardButton('5 ะบะณ', callback_data=f'{product_id} 5'),
InlineKeyboardButton('10 ะบะณ', callback_data=f'{product_id} 10')],
[InlineKeyboardButton("ะะพัะทะธะฝะฐ", callback_data='to_cart')],
[InlineKeyboardButton("ะ ะผะตะฝั", callback_data='to_menu')]
]
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
def get_cart_text(cart_items):
text = ''
for product in cart_items['data']:
text += f'''
{product['name']}
{product['description']} fish from deep-deep ocean
{product['meta']['display_price']['with_tax']['unit']['formatted']} per kg
{product['quantity']}kg in cart for {product['meta']['display_price']
['with_tax']['value']['formatted']}
'''
text += f'''
Total: {cart_items['meta']['display_price']['with_tax']['formatted']}
'''
return textwrap.dedent(text)
def get_cart_keyboard(cart_items):
keyboard = []
for product in cart_items['data']:
keyboard.append([InlineKeyboardButton(f"ะฃะฑัะฐัั ะธะท ะบะพัะทะธะฝั {product['name']}",
callback_data=product['id'])])
if keyboard:
keyboard.append(
[InlineKeyboardButton('ะะฟะปะฐัะฐ', callback_data='to_payment')])
keyboard.append([InlineKeyboardButton('ะ ะผะตะฝั', callback_data='to_menu')])
reply_markup = InlineKeyboardMarkup(keyboard)
return reply_markup
| 36.357143
| 86
| 0.628684
|
8a0956ecf8e08abcbfa8099cfdbe674f32a29ceb
| 3,403
|
py
|
Python
|
airflow/providers/amazon/aws/sensors/sagemaker_base.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
airflow/providers/amazon/aws/sensors/sagemaker_base.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
airflow/providers/amazon/aws/sensors/sagemaker_base.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional, Set
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.sensors.base import BaseSensorOperator
class SageMakerBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for SageMaker.
Subclasses should implement get_sagemaker_response()
and state_from_response() methods.
Subclasses should also implement NON_TERMINAL_STATES and FAILED_STATE methods.
"""
ui_color = '#ededed'
def __init__(self, *, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.hook: Optional[SageMakerHook] = None
def get_hook(self) -> SageMakerHook:
"""Get SageMakerHook"""
if self.hook:
return self.hook
self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
return self.hook
def poke(self, context):
response = self.get_sagemaker_response()
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
self.log.info('Bad HTTP response: %s', response)
return False
state = self.state_from_response(response)
self.log.info('Job currently %s', state)
if state in self.non_terminal_states():
return False
if state in self.failed_states():
failed_reason = self.get_failed_reason_from_response(response)
raise AirflowException(f'Sagemaker job failed for the following reason: {failed_reason}')
return True
def non_terminal_states(self) -> Set[str]:
"""Placeholder for returning states with should not terminate."""
raise NotImplementedError('Please implement non_terminal_states() in subclass')
def failed_states(self) -> Set[str]:
"""Placeholder for returning states with are considered failed."""
raise NotImplementedError('Please implement failed_states() in subclass')
def get_sagemaker_response(self) -> Optional[dict]:
"""Placeholder for checking status of a SageMaker task."""
raise NotImplementedError('Please implement get_sagemaker_response() in subclass')
def get_failed_reason_from_response(self, response: dict) -> str:
"""Placeholder for extracting the reason for failure from an AWS response."""
return 'Unknown'
def state_from_response(self, response: dict) -> str:
"""Placeholder for extracting the state from an AWS response."""
raise NotImplementedError('Please implement state_from_response() in subclass')
| 39.569767
| 101
| 0.714076
|
56d8a0b2eb3ffcb2edd9f3465e463b18202aae96
| 41
|
py
|
Python
|
tests/__init__.py
|
bendhouseart/codecounter
|
e73c2043d33133ab330eb067c348198afda6ce88
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
bendhouseart/codecounter
|
e73c2043d33133ab330eb067c348198afda6ce88
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
bendhouseart/codecounter
|
e73c2043d33133ab330eb067c348198afda6ce88
|
[
"MIT"
] | null | null | null |
"""Unit test package for codecounter."""
| 20.5
| 40
| 0.707317
|
ab6a6140d2c356e59a03ce0d29766ad3285de944
| 4,499
|
py
|
Python
|
leather/scales/base.py
|
nickromano/django-daily-digest
|
8f9a289d772cfd6b6c72536dd40c2012516b9d28
|
[
"MIT"
] | 6
|
2019-03-02T09:16:12.000Z
|
2021-08-17T13:54:49.000Z
|
leather/scales/base.py
|
nickromano/django-daily-digest
|
8f9a289d772cfd6b6c72536dd40c2012516b9d28
|
[
"MIT"
] | 66
|
2018-01-04T07:25:13.000Z
|
2022-03-29T09:19:09.000Z
|
leather/scales/base.py
|
nickromano/django-daily-digest
|
8f9a289d772cfd6b6c72536dd40c2012516b9d28
|
[
"MIT"
] | 2
|
2019-09-03T09:35:44.000Z
|
2021-12-28T15:29:13.000Z
|
#!/usr/bin/env python
from datetime import date, datetime
import six
from leather.data_types import Date, DateTime, Number, Text
from leather.shapes import Bars, Columns
class Scale(object):
"""
Base class for various kinds of scale objects.
"""
@classmethod
def infer(cls, layers, dimension, data_type):
"""
Infer's an appropriate default scale for a given sequence of
:class:`.Series`.
:param chart_series:
A sequence of :class:`.Series` instances
:param dimension:
The dimension, :code:`X` or :code:`Y` of the data to infer for.
:param data_type:
The type of data contained in the series dimension.
"""
from leather.scales.linear import Linear
from leather.scales.ordinal import Ordinal
from leather.scales.temporal import Temporal
# Default Time scale is Temporal
if data_type is Date:
data_min = date.max
data_max = date.min
for series, shape in layers:
data_min = min(data_min, series.min(dimension))
data_max = max(data_max, series.max(dimension))
scale = Temporal(data_min, data_max)
elif data_type is DateTime:
data_min = datetime.max
data_max = datetime.min
for series, shape in layers:
data_min = min(data_min, series.min(dimension))
data_max = max(data_max, series.max(dimension))
scale = Temporal(data_min, data_max)
# Default Number scale is Linear
elif data_type is Number:
force_zero = False
data_min = None
data_max = None
for series, shape in layers:
if isinstance(shape, (Bars, Columns)):
force_zero = True
if data_min is None:
data_min = series.min(dimension)
else:
data_min = min(data_min, series.min(dimension))
if data_max is None:
data_max = series.max(dimension)
else:
data_max = max(data_max, series.max(dimension))
if force_zero:
if data_min > 0:
data_min = 0
if data_max < 0:
data_max = 0
scale = Linear(data_min, data_max)
# Default Text scale is Ordinal
elif data_type is Text:
scale_values = None
# First case: a single set of ordinal labels
if len(layers) == 1:
scale_values = layers[0][0].values(dimension)
else:
first_series = set(layers[0][0].values(dimension))
data_series = [series.values(dimension) for series, shape in layers]
all_same = True
for series in data_series:
if set(series) != first_series:
all_same = False
break
# Second case: multiple identical sets of ordinal labels
if all_same:
scale_values = layers[0][0].values(dimension)
# Third case: multiple different sets of ordinal labels
else:
scale_values = sorted(list(set().union(*data_series)))
scale = Ordinal(scale_values)
return scale
def contains(self, v):
"""
Return :code:`True` if a given value is contained within this scale's
displayed domain.
"""
raise NotImplementedError
def project(self, value, range_min, range_max):
"""
Project a value in this scale's domain to a target range.
"""
raise NotImplementedError
def project_interval(self, value, range_min, range_max):
"""
Project a value in this scale's domain to an interval in the target
range. This is used for places :class:`.Bars` and :class:`.Columns`.
"""
raise NotImplementedError
def ticks(self):
"""
Generate a series of ticks for this scale.
"""
raise NotImplementedError
def format_tick(self, value, i, count):
"""
Format ticks for display.
This method is used as a default which will be ignored if the user
provides a custom tick formatter to the axis.
"""
return six.text_type(value)
| 31.683099
| 84
| 0.555012
|
9f1d9dc058546b8e18a3574c91fb8b66c19d5c97
| 19,020
|
py
|
Python
|
TM1py/Objects/Process.py
|
beckyconning/tm1py
|
1ed0be35c5d6e946ae2f9c110359facfd8c0f269
|
[
"MIT"
] | null | null | null |
TM1py/Objects/Process.py
|
beckyconning/tm1py
|
1ed0be35c5d6e946ae2f9c110359facfd8c0f269
|
[
"MIT"
] | null | null | null |
TM1py/Objects/Process.py
|
beckyconning/tm1py
|
1ed0be35c5d6e946ae2f9c110359facfd8c0f269
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import re
from typing import Optional, Iterable, Dict, List, Union
from TM1py.Objects.TM1Object import TM1Object
class Process(TM1Object):
""" Abstraction of a TM1 Process.
IMPORTANT. doesn't work with Processes that were generated through the Wizard
"""
""" the auto_generated_string code is required to be in all code-tabs. """
BEGIN_GENERATED_STATEMENTS = "#****Begin: Generated Statements***"
END_GENERATED_STATEMENTS = "#****End: Generated Statements****"
AUTO_GENERATED_STATEMENTS = "{}\r\n{}\r\n".format(BEGIN_GENERATED_STATEMENTS, END_GENERATED_STATEMENTS)
MAX_STATEMENTS = 16380
@staticmethod
def add_generated_string_to_code(code: str) -> str:
pattern = r"(?s)#\*\*\*\*Begin: Generated Statements(.*)#\*\*\*\*End: Generated Statements\*\*\*\*"
if re.search(pattern=pattern, string=code):
return code
else:
return Process.AUTO_GENERATED_STATEMENTS + code
def __init__(self,
name: str,
has_security_access: Optional[bool] = False,
ui_data: str = "CubeAction=1511โฌDataAction=1503โฌCubeLogChanges=0โฌ",
parameters: Iterable = None,
variables: Iterable = None,
variables_ui_data: Iterable = None,
prolog_procedure: str = '',
metadata_procedure: str = '',
data_procedure: str = '',
epilog_procedure: str = '',
datasource_type: str = 'None',
datasource_ascii_decimal_separator: str = '.',
datasource_ascii_delimiter_char: str = ';',
datasource_ascii_delimiter_type: str = 'Character',
datasource_ascii_header_records: int = 1,
datasource_ascii_quote_character: str = '',
datasource_ascii_thousand_separator: str = ',',
datasource_data_source_name_for_client: str = '',
datasource_data_source_name_for_server: str = '',
datasource_password: str = '',
datasource_user_name: str = '',
datasource_query: str = '',
datasource_uses_unicode: bool = True,
datasource_view: str = '',
datasource_subset: str = ''):
""" Default construcor
:param name: name of the process - mandatory
:param has_security_access:
:param ui_data:
:param parameters:
:param variables:
:param variables_ui_data:
:param prolog_procedure:
:param metadata_procedure:
:param data_procedure:
:param epilog_procedure:
:param datasource_type:
:param datasource_ascii_decimal_separator:
:param datasource_ascii_delimiter_char:
:param datasource_ascii_delimiter_type:
:param datasource_ascii_header_records:
:param datasource_ascii_quote_character:
:param datasource_ascii_thousand_separator:
:param datasource_data_source_name_for_client:
:param datasource_data_source_name_for_server:
:param datasource_password:
:param datasource_user_name:
:param datasource_query:
:param datasource_uses_unicode:
:param datasource_view:
:param datasource_subset:
"""
self._name = name
self._has_security_access = has_security_access
self._ui_data = ui_data
self._parameters = list(parameters) if parameters else []
self._variables = list(variables) if variables else []
if variables_ui_data:
# Handle encoding issue in variable_ui_data for async requests
self._variables_ui_data = [entry.replace("โฌ", "\f") for entry in variables_ui_data]
else:
self._variables_ui_data = []
self._prolog_procedure = Process.add_generated_string_to_code(prolog_procedure)
self._metadata_procedure = Process.add_generated_string_to_code(metadata_procedure)
self._data_procedure = Process.add_generated_string_to_code(data_procedure)
self._epilog_procedure = Process.add_generated_string_to_code(epilog_procedure)
self._datasource_type = datasource_type
self._datasource_ascii_decimal_separator = datasource_ascii_decimal_separator
self._datasource_ascii_delimiter_char = datasource_ascii_delimiter_char
self._datasource_ascii_delimiter_type = datasource_ascii_delimiter_type
self._datasource_ascii_header_records = datasource_ascii_header_records
self._datasource_ascii_quote_character = datasource_ascii_quote_character
self._datasource_ascii_thousand_separator = datasource_ascii_thousand_separator
self._datasource_data_source_name_for_client = datasource_data_source_name_for_client
self._datasource_data_source_name_for_server = datasource_data_source_name_for_server
self._datasource_password = datasource_password
self._datasource_user_name = datasource_user_name
self._datasource_query = datasource_query
self._datasource_uses_unicode = datasource_uses_unicode
self._datasource_view = datasource_view
self._datasource_subset = datasource_subset
@classmethod
def from_json(cls, process_as_json: str) -> 'Process':
"""
:param process_as_json: response of /api/v1/Processes('x')?$expand=*
:return: an instance of this class
"""
process_as_dict = json.loads(process_as_json)
return cls.from_dict(process_as_dict)
@classmethod
def from_dict(cls, process_as_dict: Dict) -> 'Process':
"""
:param process_as_dict: Dictionary, process as dictionary
:return: an instance of this class
"""
return cls(name=process_as_dict['Name'],
has_security_access=process_as_dict['HasSecurityAccess'],
ui_data=process_as_dict['UIData'],
parameters=process_as_dict['Parameters'],
variables=process_as_dict['Variables'],
variables_ui_data=process_as_dict['VariablesUIData'],
prolog_procedure=process_as_dict['PrologProcedure'],
metadata_procedure=process_as_dict['MetadataProcedure'],
data_procedure=process_as_dict['DataProcedure'],
epilog_procedure=process_as_dict['EpilogProcedure'],
datasource_type=process_as_dict['DataSource'].get('Type', ''),
datasource_ascii_decimal_separator=process_as_dict['DataSource'].get('asciiDecimalSeparator', ''),
datasource_ascii_delimiter_char=process_as_dict['DataSource'].get('asciiDelimiterChar', ''),
datasource_ascii_delimiter_type=process_as_dict['DataSource'].get('asciiDelimiterType', ''),
datasource_ascii_header_records=process_as_dict['DataSource'].get('asciiHeaderRecords', ''),
datasource_ascii_quote_character=process_as_dict['DataSource'].get('asciiQuoteCharacter', ''),
datasource_ascii_thousand_separator=process_as_dict['DataSource'].get('asciiThousandSeparator', ''),
datasource_data_source_name_for_client=process_as_dict['DataSource'].get('dataSourceNameForClient',''),
datasource_data_source_name_for_server=process_as_dict['DataSource'].get('dataSourceNameForServer',''),
datasource_password=process_as_dict['DataSource'].get('password', ''),
datasource_user_name=process_as_dict['DataSource'].get('userName', ''),
datasource_query=process_as_dict['DataSource'].get('query', ''),
datasource_uses_unicode=process_as_dict['DataSource'].get('usesUnicode', ''),
datasource_view=process_as_dict['DataSource'].get('view', ''),
datasource_subset=process_as_dict['DataSource'].get('subset', ''))
@property
def body(self) -> str:
return self._construct_body()
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str):
self._name = value
@property
def has_security_access(self) -> bool:
return self._has_security_access
@has_security_access.setter
def has_security_access(self, value: bool):
self._has_security_access = value
@property
def variables(self) -> List:
return self._variables
@property
def parameters(self) -> List:
return self._parameters
@property
def prolog_procedure(self) -> str:
return self._prolog_procedure
@prolog_procedure.setter
def prolog_procedure(self, value: str):
self._prolog_procedure = Process.add_generated_string_to_code(value)
@property
def metadata_procedure(self) -> str:
return self._metadata_procedure
@metadata_procedure.setter
def metadata_procedure(self, value: str):
self._metadata_procedure = Process.add_generated_string_to_code(value)
@property
def data_procedure(self) -> str:
return self._data_procedure
@data_procedure.setter
def data_procedure(self, value: str):
self._data_procedure = Process.add_generated_string_to_code(value)
@property
def epilog_procedure(self) -> str:
return self._epilog_procedure
@epilog_procedure.setter
def epilog_procedure(self, value: str):
self._epilog_procedure = Process.add_generated_string_to_code(value)
@property
def datasource_type(self) -> str:
return self._datasource_type
@datasource_type.setter
def datasource_type(self, value: str):
self._datasource_type = value
@property
def datasource_ascii_decimal_separator(self) -> str:
return self._datasource_ascii_decimal_separator
@datasource_ascii_decimal_separator.setter
def datasource_ascii_decimal_separator(self, value: str):
self._datasource_ascii_decimal_separator = value
@property
def datasource_ascii_delimiter_char(self) -> str:
return self._datasource_ascii_delimiter_char
@datasource_ascii_delimiter_char.setter
def datasource_ascii_delimiter_char(self, value: str):
self._datasource_ascii_delimiter_char = value
@property
def datasource_ascii_delimiter_type(self) -> str:
return self._datasource_ascii_delimiter_type
@datasource_ascii_delimiter_type.setter
def datasource_ascii_delimiter_type(self, value: str):
self._datasource_ascii_delimiter_type = value
@property
def datasource_ascii_header_records(self) -> int:
return self._datasource_ascii_header_records
@datasource_ascii_header_records.setter
def datasource_ascii_header_records(self, value: int):
self._datasource_ascii_header_records = value
@property
def datasource_ascii_quote_character(self) -> str:
return self._datasource_ascii_quote_character
@datasource_ascii_quote_character.setter
def datasource_ascii_quote_character(self, value: str):
self._datasource_ascii_quote_character = value
@property
def datasource_ascii_thousand_separator(self) -> str:
return self._datasource_ascii_thousand_separator
@datasource_ascii_thousand_separator.setter
def datasource_ascii_thousand_separator(self, value: str):
self._datasource_ascii_thousand_separator = value
@property
def datasource_data_source_name_for_client(self) -> str:
return self._datasource_data_source_name_for_client
@datasource_data_source_name_for_client.setter
def datasource_data_source_name_for_client(self, value: str):
self._datasource_data_source_name_for_client = value
@property
def datasource_data_source_name_for_server(self) -> str:
return self._datasource_data_source_name_for_server
@datasource_data_source_name_for_server.setter
def datasource_data_source_name_for_server(self, value: str):
self._datasource_data_source_name_for_server = value
@property
def datasource_password(self) -> str:
return self._datasource_password
@datasource_password.setter
def datasource_password(self, value: str):
self._datasource_password = value
@property
def datasource_user_name(self) -> str:
return self._datasource_user_name
@datasource_user_name.setter
def datasource_user_name(self, value: str):
self._datasource_user_name = value
@property
def datasource_query(self) -> str:
return self._datasource_query
@datasource_query.setter
def datasource_query(self, value: str):
self._datasource_query = value
@property
def datasource_uses_unicode(self) -> bool:
return self._datasource_uses_unicode
@datasource_uses_unicode.setter
def datasource_uses_unicode(self, value: bool):
self._datasource_uses_unicode = value
@property
def datasource_view(self) -> str:
return self._datasource_view
@datasource_view.setter
def datasource_view(self, value: str):
self._datasource_view = value
@property
def datasource_subset(self) -> str:
return self._datasource_subset
@datasource_subset.setter
def datasource_subset(self, value: str):
self._datasource_subset = value
def add_variable(self, name: str, variable_type: str):
""" add variable to the process
:param name: -
:param variable_type: 'String' or 'Numeric'
:return:
"""
# variable consists of actual variable and UI-Information ('ignore','other', etc.)
# 1. handle Variable info
variable = {'Name': name,
'Type': variable_type,
'Position': len(self._variables) + 1,
'StartByte': 0,
'EndByte': 0}
self._variables.append(variable)
# 2. handle UI info
var_type = 33 if variable_type == 'Numeric' else 32
# '\f' !
variable_ui_data = 'VarType=' + str(var_type) + '\f' + 'ColType=' + str(827) + '\f'
"""
mapping VariableUIData:
VarType 33 -> Numeric
VarType 32 -> String
ColType 827 -> Other
"""
self._variables_ui_data.append(variable_ui_data)
def remove_variable(self, name: str):
for variable in self.variables[:]:
if variable['Name'] == name:
vuid = self._variables_ui_data[self._variables.index(variable)]
self._variables_ui_data.remove(vuid)
self._variables.remove(variable)
def add_parameter(self, name: str, prompt: str, value: Union[str, int, float],
parameter_type: Optional[str] = None):
"""
:param name:
:param prompt:
:param value:
:param parameter_type: introduced in TM1 11 REST API, therefor optional. if Not given type is derived from value
:return:
"""
if not parameter_type:
parameter_type = 'String' if isinstance(value, str) else 'Numeric'
parameter = {'Name': name,
'Prompt': prompt,
'Value': value,
'Type': parameter_type}
self._parameters.append(parameter)
def remove_parameter(self, name: str):
for parameter in self.parameters[:]:
if parameter['Name'] == name:
self._parameters.remove(parameter)
def drop_parameter_types(self):
for p in range(len(self.parameters)):
if 'Type' in self.parameters[p]:
del self.parameters[p]['Type']
# construct self.body (json) from the class-attributes
def _construct_body(self) -> str:
# general parameters
body_as_dict = {
'Name': self._name,
'PrologProcedure': self._prolog_procedure,
'MetadataProcedure': self._metadata_procedure,
'DataProcedure': self._data_procedure,
'EpilogProcedure': self._epilog_procedure,
'HasSecurityAccess': self._has_security_access,
'UIData': self._ui_data,
'DataSource': {},
'Parameters': self._parameters,
'Variables': self._variables,
'VariablesUIData': self._variables_ui_data}
# specific parameters (depending on datasource type)
if self._datasource_type == 'ASCII':
body_as_dict['DataSource'] = {
"Type": self._datasource_type,
"asciiDecimalSeparator": self._datasource_ascii_decimal_separator,
"asciiDelimiterChar": self._datasource_ascii_delimiter_char,
"asciiDelimiterType": self._datasource_ascii_delimiter_type,
"asciiHeaderRecords": self._datasource_ascii_header_records,
"asciiQuoteCharacter": self._datasource_ascii_quote_character,
"asciiThousandSeparator": self._datasource_ascii_thousand_separator,
"dataSourceNameForClient": self._datasource_data_source_name_for_client,
"dataSourceNameForServer": self._datasource_data_source_name_for_server
}
if self._datasource_ascii_delimiter_type == 'FixedWidth':
del body_as_dict['DataSource']['asciiDelimiterChar']
elif self._datasource_type == 'None':
body_as_dict['DataSource'] = {
"Type": "None"
}
elif self._datasource_type == 'ODBC':
body_as_dict['DataSource'] = {
"Type": self._datasource_type,
"dataSourceNameForClient": self._datasource_data_source_name_for_client,
"dataSourceNameForServer": self._datasource_data_source_name_for_server,
"userName": self._datasource_user_name,
"password": self._datasource_password,
"query": self._datasource_query,
"usesUnicode": self._datasource_uses_unicode
}
elif self._datasource_type == 'TM1CubeView':
body_as_dict['DataSource'] = {
"Type": self._datasource_type,
"dataSourceNameForClient": self._datasource_data_source_name_for_server,
"dataSourceNameForServer": self._datasource_data_source_name_for_server,
"view": self._datasource_view
}
elif self._datasource_type == 'TM1DimensionSubset':
body_as_dict['DataSource'] = {
"Type": self._datasource_type,
"dataSourceNameForClient": self._datasource_data_source_name_for_server,
"dataSourceNameForServer": self._datasource_data_source_name_for_server,
"subset": self._datasource_subset
}
return json.dumps(body_as_dict, ensure_ascii=False)
| 41.528384
| 122
| 0.660673
|
31bb9d4d2dc19de248be9f248d996c214009c0a1
| 2,228
|
py
|
Python
|
cloudly/cache.py
|
ooda/cloudly
|
42f28ebe1ec732ddd801f204cb33201a8ce192e9
|
[
"MIT"
] | null | null | null |
cloudly/cache.py
|
ooda/cloudly
|
42f28ebe1ec732ddd801f204cb33201a8ce192e9
|
[
"MIT"
] | null | null | null |
cloudly/cache.py
|
ooda/cloudly
|
42f28ebe1ec732ddd801f204cb33201a8ce192e9
|
[
"MIT"
] | null | null | null |
"""This module provide access to redis and memcache servers with some sugar
coating.
"""
import os
import json
import memcache
import redis
from cloudly.aws import ec2
from cloudly.decorators import Memoized
import cloudly.logger as logger
log = logger.init(__name__)
@Memoized
def get_redis_connection(hostname=None, port=None):
""" Get a connection to a Redis server. The priority is:
- look for an environment variable REDISTOGO_URL (Heroku), else
- look for an environment variable REDIS_HOST, else
- look for an EC2 hosted server offering the service 'redis', else
- use localhost, 127.0.0.1.
"""
host = (
hostname or
os.environ.get("REDIS_HOST") or
ec2.get_hostname("redis") or
"127.0.0.1"
)
port = port or os.environ.get("REDIS_PORT", 6379)
url = os.environ.get('REDISTOGO_URL', # Set when on Heroku.
'redis://{}:{}'.format(host, port))
log.info("Connecting to Redis server at {}".format(url))
server = redis.from_url(url)
# Add some utility function. These functions first
# serialize to/from JSON the given object, then call redis get/set.
def redis_jget(key):
value = server.get(key)
return json.loads(value) if value else None
server.jset = lambda key, obj: server.set(key, json.dumps(obj))
server.jget = redis_jget
return server
@Memoized
def get_memcache_connection():
return memcache.Client(['127.0.0.1:11211'], debug=0)
class MemProxy(object):
def __init__(self):
self.cache = get_memcache_connection()
def set(self, key, obj, time=0):
if key.find(" ") > -1:
raise ValueError("A memcached key cannot contain spaces.")
return self.cache.set(key.encode("utf-8"), obj, time=time,
min_compress_len=1024)
def get(self, key):
return self.cache.get(key.encode("utf-8"))
def set_multi(self, mapping, time=0):
return self.cache.set_multi(mapping, time, min_compress_len=1024)
def get_multi(self, keys):
return self.cache.get_multi(keys)
def delete(self, key):
self.cache.delete(key)
memcache = MemProxy() # noqa
| 27.85
| 75
| 0.647666
|
ffcce65f2e710b49bafff39157801551b410f55b
| 2,671
|
py
|
Python
|
basics of python/loops.py
|
devnetlearning/python
|
6f03027bb8e4eb1003ac766a72922f29ed03fec4
|
[
"MIT"
] | null | null | null |
basics of python/loops.py
|
devnetlearning/python
|
6f03027bb8e4eb1003ac766a72922f29ed03fec4
|
[
"MIT"
] | null | null | null |
basics of python/loops.py
|
devnetlearning/python
|
6f03027bb8e4eb1003ac766a72922f29ed03fec4
|
[
"MIT"
] | null | null | null |
"""
for - loop that is making specific action for certain number of times.
range - set specific number of times. Allows to choose at which number starts
and how long do the loop
"""
#for example1 - print every number in range of 200 which could be devided
#by 5 but not by 7 without the remainder
for i in range(200):
if ( i % 5 == 0 and i % 7 != 0):
print (i)
#for example2 - multiplies each by each numbers between 2 provided values in set.
a = int(input(" 1st number: "))
b = int(input(" 2st number: "))
multiply = 1
for i in range (a,b+1):
alist =[i]
multiply = multiply * i
print(multiply)
#for example3 - aritmetic average (compare with the same example using while)
values = 0
i = 0
for i in range(5):
value = int(input(" Input value: "))
if value > 0:
values = values + value
i += 1
else:
print("Value must be positive number" )
continue
print(values/i)
"""
while - conditional loop
break - exiting the loop
"""
#while example1 - same function like in if statement but shorter and easier to expand
test = input(" What's your name?\n")
tries = 0
if test == "mark":
print("Access Granted")
elif test != "mark":
while(test != "mark"):
print("Access deneid. Type your name again." ,2 - tries, "tries left")
test = input(" What's your name?\n")
if (tries >= 1 and test!="mark"):
print("Access deneid. Contact with your admin")
break
tries = tries + 1
else:
print("Access granted")
#wghile example2 arithmetic average with loop while
values = 0
i = 0
while i < 5:
value = int(input(" Input value"))
if value > 0:
values = values + value
i += 1
else:
print("value must be positive number" )
continue
print("arithmetic average: ", values/i)
#while example3 adding 3 positive and even numbers. Inform if numbor is not positive or even
summ = 0
i = 0
while i < 3:
number = int(input("Number: "))
if number % 2 == 0 and number > 0:
summ = summ + number
i += 1
else:
print(" This number is not even or positve. Try again")
print(" Sum: ", summ)
#while example4 guessing correct number. 3 tries to gues
number = 17
i = 0
print("You have 3 tries to guess the number")
while i <= 2:
guess = int(input("Your number: "))
i+=1
if guess > number:
print(" Wrong number. Correct number is lower!")
elif guess < number:
print(" Wrong number. Correct number is higher!")
else:
print(" Great! You guessed it")
if guess != number:
print("Out of tries. You loose it!")
| 20.867188
| 92
| 0.610258
|
79740ed0c103e841b6e280af920f8be65e3d1d0d
| 9,893
|
py
|
Python
|
charmhelpers/contrib/openstack/audits/openstack_security_guide.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 19
|
2016-04-17T04:00:53.000Z
|
2020-05-06T14:18:16.000Z
|
charmhelpers/contrib/openstack/audits/openstack_security_guide.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 313
|
2017-09-15T13:22:58.000Z
|
2022-02-25T17:55:01.000Z
|
charmhelpers/contrib/openstack/audits/openstack_security_guide.py
|
AurelienLourot/charm-helpers
|
b5725ac546372e7d4004d15095f79cdd5e7da687
|
[
"Apache-2.0"
] | 136
|
2017-09-19T13:37:33.000Z
|
2022-03-29T11:08:00.000Z
|
# Copyright 2019 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import configparser
import glob
import os.path
import subprocess
from charmhelpers.contrib.openstack.audits import (
audit,
AuditType,
# filters
is_audit_type,
it_has_config,
)
from charmhelpers.core.hookenv import (
cached,
)
"""
The Security Guide suggests a specific list of files inside the
config directory for the service having 640 specifically, but
by ensuring the containing directory is 750, only the owner can
write, and only the group can read files within the directory.
By restricting access to the containing directory, we can more
effectively ensure that there is no accidental leakage if a new
file is added to the service without being added to the security
guide, and to this check.
"""
FILE_ASSERTIONS = {
'barbican': {
'/etc/barbican': {'group': 'barbican', 'mode': '750'},
},
'ceph-mon': {
'/var/lib/charm/ceph-mon/ceph.conf':
{'owner': 'root', 'group': 'root', 'mode': '644'},
'/etc/ceph/ceph.client.admin.keyring':
{'owner': 'ceph', 'group': 'ceph'},
'/etc/ceph/rbdmap': {'mode': '644'},
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
'/var/lib/ceph/bootstrap-*/ceph.keyring':
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
},
'ceph-osd': {
'/var/lib/charm/ceph-osd/ceph.conf':
{'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
'/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
'/var/lib/ceph/bootstrap-*/ceph.keyring':
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
'/var/lib/ceph/radosgw':
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
},
'cinder': {
'/etc/cinder': {'group': 'cinder', 'mode': '750'},
},
'glance': {
'/etc/glance': {'group': 'glance', 'mode': '750'},
},
'keystone': {
'/etc/keystone':
{'owner': 'keystone', 'group': 'keystone', 'mode': '750'},
},
'manilla': {
'/etc/manila': {'group': 'manilla', 'mode': '750'},
},
'neutron-gateway': {
'/etc/neutron': {'group': 'neutron', 'mode': '750'},
},
'neutron-api': {
'/etc/neutron/': {'group': 'neutron', 'mode': '750'},
},
'nova-cloud-controller': {
'/etc/nova': {'group': 'nova', 'mode': '750'},
},
'nova-compute': {
'/etc/nova/': {'group': 'nova', 'mode': '750'},
},
'openstack-dashboard': {
# From security guide
'/etc/openstack-dashboard/local_settings.py':
{'group': 'horizon', 'mode': '640'},
},
}
Ownership = collections.namedtuple('Ownership', 'owner group mode')
@cached
def _stat(file):
"""
Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails
"""
out = subprocess.check_output(
['stat', '-c', '%U %G %a', file]).decode('utf-8')
return Ownership(*out.strip().split(' '))
@cached
def _config_ini(path):
"""
Parse an ini file
:param path: The path to a file to parse
:type file: str
:returns: Configuration contained in path
:rtype: Dict
"""
# When strict is enabled, duplicate options are not allowed in the
# parsed INI; however, Oslo allows duplicate values. This change
# causes us to ignore the duplicate values which is acceptable as
# long as we don't validate any multi-value options
conf = configparser.ConfigParser(strict=False)
conf.read(path)
return dict(conf)
def _validate_file_ownership(owner, group, file_name, optional=False):
"""
Validate that a specified file is owned by `owner:group`.
:param owner: Name of the owner
:type owner: str
:param group: Name of the group
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert owner == ownership.owner, \
"{} has an incorrect owner: {} should be {}".format(
file_name, ownership.owner, owner)
assert group == ownership.group, \
"{} has an incorrect group: {} should be {}".format(
file_name, ownership.group, group)
print("Validate ownership of {}: PASS".format(file_name))
def _validate_file_mode(mode, file_name, optional=False):
"""
Validate that a specified file has the specified permissions.
:param mode: file mode that is desires
:type owner: str
:param file_name: Path to the file to verify
:type file_name: str
:param optional: Is this file optional,
ie: Should this test fail when it's missing
:type optional: bool
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
if not optional:
assert False, "Specified file does not exist: {}".format(file_name)
assert mode == ownership.mode, \
"{} has an incorrect mode: {} should be {}".format(
file_name, ownership.mode, mode)
print("Validate mode of {}: PASS".format(file_name))
@cached
def _config_section(config, section):
"""Read the configuration file and return a section."""
path = os.path.join(config.get('config_path'), config.get('config_file'))
conf = _config_ini(path)
return conf.get(section)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
it_has_config('files'))
def validate_file_ownership(config):
"""Verify that configuration files are owned by the correct user/group."""
files = config.get('files', {})
for file_name, options in files.items():
for key in options.keys():
if key not in ["owner", "group", "mode"]:
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
owner = options.get('owner', config.get('owner', 'root'))
group = options.get('group', config.get('group', 'root'))
optional = options.get('optional', config.get('optional', False))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_ownership(owner, group, file, optional)
else:
if os.path.isfile(file_name):
_validate_file_ownership(owner, group, file_name, optional)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
it_has_config('files'))
def validate_file_permissions(config):
"""Verify that permissions on configuration files are secure enough."""
files = config.get('files', {})
for file_name, options in files.items():
for key in options.keys():
if key not in ["owner", "group", "mode"]:
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600'))
optional = options.get('optional', config.get('optional', False))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_mode(mode, file, optional)
else:
if os.path.isfile(file_name):
_validate_file_mode(mode, file_name, optional)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_keystone(audit_options):
"""Validate that the service uses Keystone for authentication."""
section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT')
assert section is not None, "Missing section 'api / DEFAULT'"
assert section.get('auth_strategy') == "keystone", \
"Application is not using Keystone"
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_tls_for_keystone(audit_options):
"""Verify that TLS is used to communicate with Keystone."""
section = _config_section(audit_options, 'keystone_authtoken')
assert section is not None, "Missing section 'keystone_authtoken'"
assert not section.get('insecure') and \
"https://" in section.get("auth_uri"), \
"TLS is not used for Keystone"
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_tls_for_glance(audit_options):
"""Verify that TLS is used to communicate with Glance."""
section = _config_section(audit_options, 'glance')
assert section is not None, "Missing section 'glance'"
assert not section.get('insecure') and \
"https://" in section.get("api_servers"), \
"TLS is not used for Glance"
| 36.505535
| 96
| 0.625998
|
1f7b4d18a8a4632a54cb377bdeaa0a6ad36a7ad6
| 2,251
|
py
|
Python
|
jogo_da_forca.py
|
gialencar/jogo-da-forca
|
0e6d9f4298b069ed59c2070ed499c6275e04267f
|
[
"MIT"
] | null | null | null |
jogo_da_forca.py
|
gialencar/jogo-da-forca
|
0e6d9f4298b069ed59c2070ed499c6275e04267f
|
[
"MIT"
] | null | null | null |
jogo_da_forca.py
|
gialencar/jogo-da-forca
|
0e6d9f4298b069ed59c2070ed499c6275e04267f
|
[
"MIT"
] | null | null | null |
from random import randint
from unicodedata import normalize
def remover_acentos(string):
"""Recebe uma string e retorna a versรฃo dela sem acentos ortogrรกficos e em lowercase."""
normalizado = normalize('NFD', string)
return normalizado.encode('ascii', 'ignore').decode('utf8').lower()
def validar_entrada(string):
"""Recebe uma string e retorna True se ela tiver 1 caractere."""
return len(string) == 1
def obter_palavra():
"""Abre um arquivo com as palavras, armazena em uma lista; obtรฉm um nรบmero aleatรณrio de 0 a x, onde x รฉ o nรบmero
de itens da lista; e usa esse nรบmero para escolher e retornar uma palavra aleatรณria da lista."""
arquivo = open('palavras_faceis.txt', 'r', encoding='UTF-8')
lista_de_palavras = arquivo.read().split('\n')
arquivo.close()
sorteio = randint(0, len(lista_de_palavras))
return lista_de_palavras[sorteio]
def main():
palavra = obter_palavra()
parcial = "_" * len(palavra) # cria uma string de "_" do tamanho da palavra
n_tentativas = 0
erros = ""
while True:
entrada = input("-- Entre com uma letra --> ").lower()
if validar_entrada(entrada): # se a entrada for vรกlida
n_tentativas += 1
palavra_normalizada = remover_acentos(palavra)
print(f"A palavra รฉ: {palavra}") # Debug
print(f"A letra aparece {palavra_normalizada.count(entrada)} vezes")
print(f"Tentativas = {n_tentativas}")
if remover_acentos(entrada) not in palavra.lower():
erros += entrada + " "
contador = 0
for letra in palavra:
if remover_acentos(letra) == entrada: # se o jogador acertou a letra
parcial_lista = list(parcial) # converte parcial para lista
parcial_lista[contador] = letra # substitui a letra na lista
parcial = "".join(parcial_lista) # converte a lista de volta para string
contador += 1 # adiciona 1 ao contador
print('\n')
print(parcial)
print(f"Erros: {erros.upper()}")
if "_" not in parcial:
print("Game Over\nParabรฉns!")
break
if __name__ == "__main__":
main()
| 35.730159
| 116
| 0.620613
|
75aa73384faa9a15573f12b1b56df11c73f8ba49
| 1,778
|
py
|
Python
|
ament_copyright/setup.py
|
emersonknapp/ament_lint
|
92badcaa44aa0a67a55bbf32dba9ae6adab62597
|
[
"Apache-2.0"
] | null | null | null |
ament_copyright/setup.py
|
emersonknapp/ament_lint
|
92badcaa44aa0a67a55bbf32dba9ae6adab62597
|
[
"Apache-2.0"
] | null | null | null |
ament_copyright/setup.py
|
emersonknapp/ament_lint
|
92badcaa44aa0a67a55bbf32dba9ae6adab62597
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages
from setuptools import setup
package_name = 'ament_copyright'
setup(
name=package_name,
version='0.7.4',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
package_data={'': [
'template/*',
]},
zip_safe=False,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Dirk Thomas',
maintainer_email='dthomas@osrfoundation.org',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check source files for copyright reference.',
long_description="""\
The ability to check sources file for copyright and license information.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ament_copyright.copyright_name': [
'osrf = ament_copyright.copyright_names:osrf',
],
'ament_copyright.license': [
'apache2 = ament_copyright.licenses:apache2',
'bsd2 = ament_copyright.licenses:bsd2',
'mit = ament_copyright.licenses:mit',
'gplv3 = ament_copyright.licenses:gplv3',
'lgplv3 = ament_copyright.licenses:lgplv3',
],
'console_scripts': [
'ament_copyright = ament_copyright.main:main',
],
'pytest11': [
'ament_copyright = ament_copyright.pytest_marker',
],
},
)
| 32.327273
| 76
| 0.626547
|
07074b8d72e617e7f5eebc1f905a5756f4d7ccab
| 3,161
|
py
|
Python
|
FindMyNews/settings.py
|
StarkDevHouse/FindMyNews
|
896e3fe19720252b224574dccf12a7ded9ef0ef0
|
[
"MIT"
] | null | null | null |
FindMyNews/settings.py
|
StarkDevHouse/FindMyNews
|
896e3fe19720252b224574dccf12a7ded9ef0ef0
|
[
"MIT"
] | null | null | null |
FindMyNews/settings.py
|
StarkDevHouse/FindMyNews
|
896e3fe19720252b224574dccf12a7ded9ef0ef0
|
[
"MIT"
] | null | null | null |
"""
Django settings for FindMyNews project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vvp&u)jnd6l#omn(oi*u+2jk!*ehnc2#$9gx+0808u7ht$l+6f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FindMyNews.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FindMyNews.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| 26.123967
| 91
| 0.698829
|
78b547a7d1aee3a6fee9f6d83049364269911468
| 10,990
|
py
|
Python
|
tests/scripts/thread-cert/Cert_9_2_03_ActiveDatasetGet.py
|
paragdixit-g/openthread
|
a41a1f3fcae3351c9216e4ba02e44c8f9d3ea0d3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_9_2_03_ActiveDatasetGet.py
|
paragdixit-g/openthread
|
a41a1f3fcae3351c9216e4ba02e44c8f9d3ea0d3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_9_2_03_ActiveDatasetGet.py
|
paragdixit-g/openthread
|
a41a1f3fcae3351c9216e4ba02e44c8f9d3ea0d3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import mesh_cop
import thread_cert
from pktverify.consts import MLE_DATA_RESPONSE, MGMT_ACTIVE_GET_URI, NM_CHANNEL_TLV, NM_COMMISSIONER_ID_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_STEERING_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, NM_PAN_ID_TLV, NM_NETWORK_NAME_TLV, NM_NETWORK_MESH_LOCAL_PREFIX_TLV, NM_PSKC_TLV, NM_SCAN_DURATION, NM_ENERGY_LIST_TLV, NM_ACTIVE_TIMESTAMP_TLV, NM_CHANNEL_MASK_TLV, NM_EXTENDED_PAN_ID_TLV, NM_NETWORK_MASTER_KEY_TLV, NM_SECURITY_POLICY_TLV, LEADER_ALOC
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
COMMISSIONER = 1
LEADER = 2
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to verify Leader's and active Commissioner's behavior via
# MGMT_ACTIVE_GET request and response
#
# Test Topology:
# -------------
# Commissioner
# |
# Leader
#
# DUT Types:
# ----------
# Leader
# Commissioner
class Cert_9_2_03_ActiveDatasetGet(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [COMMISSIONER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.simulator.get_messages_sent_by(LEADER)
self.collect_rlocs()
self.collect_rloc16s()
leader_rloc = self.nodes[LEADER].get_rloc()
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[COMMISSIONER].send_mgmt_active_get()
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_active_get(
leader_rloc,
[mesh_cop.TlvType.CHANNEL_MASK, mesh_cop.TlvType.NETWORK_MESH_LOCAL_PREFIX, mesh_cop.TlvType.NETWORK_NAME])
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_active_get(leader_rloc, [
mesh_cop.TlvType.CHANNEL, mesh_cop.TlvType.NETWORK_MESH_LOCAL_PREFIX, mesh_cop.TlvType.NETWORK_NAME,
mesh_cop.TlvType.SCAN_DURATION, mesh_cop.TlvType.ENERGY_LIST
])
self.simulator.go(5)
commissioner_rloc = self.nodes[COMMISSIONER].get_rloc()
self.assertTrue(self.nodes[COMMISSIONER].ping(leader_rloc))
self.simulator.go(1)
self.assertTrue(self.nodes[LEADER].ping(commissioner_rloc))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_RLOC = pv.vars['LEADER_RLOC']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
# Step 1: Ensure topology is formed correctly
pv.verify_attached('COMMISSIONER', 'LEADER')
# Step 2: Commissioner sends a MGMT_ACTIVE_GET.req to Leader Anycast
# or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/ag
# CoAP Payload
# <empty> - get all Active Operational Dataset parameters
_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_ACTIVE_GET_URI).\
filter(lambda p: p.coap.tlv.type is nullField).\
must_next()
# Step 3: Leader sends a MGMT_ACTIVE_GET.rsp to Commissioner with
# the following format:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# (entire Active Operational Dataset)
# Active Timestamp TLV
# Channel TLV
# Channel Mask TLV
# Extended PAN ID TLV
# Network Mesh-Local Prefix TLV
# Network Master Key TLV
# Network Name TLV
# PAN ID TLV
# PSKc TLV
# Security Policy TLV
pkts.filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_ACTIVE_GET_URI).\
filter(lambda p: {
NM_ACTIVE_TIMESTAMP_TLV,
NM_CHANNEL_TLV,
NM_CHANNEL_MASK_TLV,
NM_EXTENDED_PAN_ID_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV,
NM_NETWORK_MASTER_KEY_TLV,
NM_NETWORK_NAME_TLV,
NM_PAN_ID_TLV,
NM_PSKC_TLV,
NM_SECURITY_POLICY_TLV
} == set(p.thread_meshcop.tlv.type)
).\
must_next()
# Step 4: Commissioner sends a MGMT_ACTIVE_GET.req to Leader Anycast
# or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/ag
# CoAP Payload
# Channel Mask TLV
# Network Mesh-Local Prefix TLV
# Network Name TLV
pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_ACTIVE_GET_URI).\
filter(lambda p: {
NM_CHANNEL_MASK_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV,
NM_NETWORK_NAME_TLV
} <= set(p.thread_meshcop.tlv.type)
).\
must_next()
# Step 5: Leader sends a MGMT_ACTIVE_GET.rsp to Commissioner with
# the following format:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# Channel Mask TLV
# Network Mesh-Local Prefix TLV
# Network Name TLV
pkts.filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_ACTIVE_GET_URI).\
filter(lambda p: {
NM_CHANNEL_MASK_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV,
NM_NETWORK_NAME_TLV
} == set(p.thread_meshcop.tlv.type)
).\
must_next()
# Step 6: Commissioner sends a MGMT_ACTIVE_GET.req to Leader Anycast
# or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/ag
# CoAP Payload
# Channel TLV
# Network Mesh-Local Prefix TLV
# Network Name TLV
# Scan Duration TLV (not allowed TLV)
# Energy List TLV (not allowed TLV)
pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_ACTIVE_GET_URI).\
filter(lambda p: {
NM_CHANNEL_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV,
NM_NETWORK_NAME_TLV,
NM_SCAN_DURATION,
NM_ENERGY_LIST_TLV
} <= set(p.thread_meshcop.tlv.type)
).\
must_next()
# Step 7: Leader sends a MGMT_ACTIVE_GET.rsp to Commissioner with
# the following format:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# Channel TLV
# Network Mesh-Local Prefix TLV
# Network Name TLV
pkts.filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_ACTIVE_GET_URI).\
filter(lambda p: {
NM_CHANNEL_TLV,
NM_NETWORK_MESH_LOCAL_PREFIX_TLV,
NM_NETWORK_NAME_TLV,
} == set(p.thread_meshcop.tlv.type)
).\
must_next()
# Step 8: Verify connectivity by sending an ICMPv6 Echo Request to the
# DUT mesh local address
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
must_next()
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\
must_next()
if __name__ == '__main__':
unittest.main()
| 40.855019
| 445
| 0.582348
|
cab1abd1382b47693300375333fe7426f0112266
| 1,468
|
py
|
Python
|
qa/rpc-tests/dao/given/iHaveAnAcceptedProposal.py
|
garretlaxton/navcoin-core
|
7a2919b5ac14ba3b8a1dbf5aad08524db8d5ce08
|
[
"MIT"
] | 1
|
2020-08-28T02:32:47.000Z
|
2020-08-28T02:32:47.000Z
|
qa/rpc-tests/dao/given/iHaveAnAcceptedProposal.py
|
garretlaxton/navcoin-core
|
7a2919b5ac14ba3b8a1dbf5aad08524db8d5ce08
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/dao/given/iHaveAnAcceptedProposal.py
|
garretlaxton/navcoin-core
|
7a2919b5ac14ba3b8a1dbf5aad08524db8d5ce08
|
[
"MIT"
] | 1
|
2020-08-26T22:35:06.000Z
|
2020-08-26T22:35:06.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The NavCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Expanded helper routines for regression testing of the NAV Coin community fund
#
import sys, os #include the parent folder so the test_framework is available
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
from test_framework.util import *
from dao.given import (givenIHaveActivatedTheCFund,
givenIHaveDonatedToTheCFund,
givenIHaveCreatedANewAddress,
givenIHaveCreatedAProposal,
givenIHaveVotedOnTheProposal)
from dao.when import *
from dao.then import *
def givenIHaveAnAcceptedProposal(node=None,
address=None,
amount=None,
duration=None,
description=None,
dump=False):
if (node is None
or address is None
or amount is None
or duration is None
or description is None):
print('givenIHaveAnAcceptedProposal: invalid parameters')
assert(False)
givenIHaveActivatedTheCFund(node)
givenIHaveDonatedToTheCFund(node, amount)
if (address == False):
address = givenIHaveCreatedANewAddress(node)["pubkey"]
hash = givenIHaveCreatedAProposal(node, address, amount, duration, description)
givenIHaveVotedOnTheProposal(node, hash, 'yes')
whenTheVotingCycleEnds(node, 2)
thenTheProposalShouldBeAccepted(node, hash)
return {
"hash": hash,
"address": address
}
| 27.185185
| 84
| 0.76703
|
2896382dc93d57a8c3caab3f6cd689f5dd043b05
| 1,515
|
py
|
Python
|
pythonlearn/travelList.py
|
kuljotbiring/Python
|
743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad
|
[
"MIT"
] | null | null | null |
pythonlearn/travelList.py
|
kuljotbiring/Python
|
743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad
|
[
"MIT"
] | null | null | null |
pythonlearn/travelList.py
|
kuljotbiring/Python
|
743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad
|
[
"MIT"
] | null | null | null |
# Think of at least five places in the world youโd like to
# visit.
# โข Store the locations in a list. Make sure the list is not in alphabetical order.
travel_list = ['italy', 'greece', 'switzerland', 'japan', 'thailand', 'new zealand', 'brazil', 'peru', 'hong kong']
# โข Print your list in its original order. Donโt worry about printing the list neatly,
# just print it as a raw Python list.
print(travel_list)
# โข Use sorted() to print your list in alphabetical order without modifying the
# actual list.
print(sorted(travel_list))
# โข Show that your list is still in its original order by printing it again.
print(travel_list)
# โข Use sorted() to print your list in reverse alphabetical order without changing
# the order of the original list.
print(sorted(travel_list, reverse=True))
# โข Show that your list is still in its original order by printing it again.
print(travel_list)
# โข Use reverse() to change the order of your list. Print the list to show that its
# order has changed.
reversed(travel_list)
# โข Use reverse() to change the order of your list again. Print the list to show
# itโs back to its original order.
reversed(travel_list)
# โข Use sort() to change your list so itโs stored in alphabetical order. Print the
# list to show that its order has been changed.
travel_list.sort()
print(travel_list)
# โข Use sort() to change your list so itโs stored in reverse alphabetical order.
# Print the list to show that its order has changed.
travel_list.sort(reverse=True)
print(travel_list)
| 33.666667
| 115
| 0.744554
|
a1044e7a33bd8d90b881d3c3fcc88b0f177c63db
| 4,383
|
py
|
Python
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
rodrigob/beam
|
e2ce4037f85619f946b3d6a3a90955cdf1c19b4a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2018-07-13T02:57:48.000Z
|
2018-07-13T02:57:48.000Z
|
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
rodrigob/beam
|
e2ce4037f85619f946b3d6a3a90955cdf1c19b4a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py
|
rodrigob/beam
|
e2ce4037f85619f946b3d6a3a90955cdf1c19b4a
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2019-09-23T08:45:00.000Z
|
2019-09-23T08:45:00.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
from __future__ import absolute_import
import logging
import unittest
from apache_beam import pvalue
from apache_beam.io import Read
from apache_beam.io import iobase
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
def test_root_transforms(self):
class DummySource(iobase.BoundedSource):
pass
root_read = Read(DummySource())
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(root_transforms, sorted(
[root_read, root_flatten]))
pbegin_consumers = sorted(
[c.transform for c in self.visitor.value_to_consumers[pbegin]])
self.assertEqual(pbegin_consumers, sorted([root_read]))
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.TaggedOutput('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
class DummySource(iobase.BoundedSource):
pass
root_read = Read(DummySource())
result = (self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative',
main='positive'))
positive, negative = result
positive | ParDo(ProcessNumbersFn(), AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(root_transforms, sorted([root_read]))
self.assertEqual(len(self.visitor.step_names), 3)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0],
pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', 'joe@example.com')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 33.976744
| 105
| 0.722336
|
7abce4f0e182cacef57e4d8921d6c65fb9d21b16
| 8,003
|
py
|
Python
|
distances/q_image.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 14
|
2019-02-12T20:30:23.000Z
|
2021-11-04T01:10:34.000Z
|
distances/q_image.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | null | null | null |
distances/q_image.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 7
|
2019-02-20T12:19:28.000Z
|
2021-02-09T10:12:06.000Z
|
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Quantized image representation.
#
import numpy as np
import math
import sys
### class: QuantizedImage
### Represents an image quantized to a given number of equally sized levels
### with related point-sets (with a weight for each point) partitioned by
### level.
###
### Random sampling (with replacement) which preserves the partitioning
### so that a list of randomly selected points for each distinct level
### is obtained.
class QuantizedImage:
def __init__(self, A, alpha_levels, weights, spacing, remove_zero_weight_pnts = True, center_point = None, contraction_factor=1):
if A.dtype == 'int32':
self.im = A
else:
self.im = (1 + np.floor(A * (alpha_levels) - 0.5)).astype('int')
self.alpha_levels = alpha_levels
self.contraction_factor = contraction_factor
self.distance_shape = tuple([int(A.shape[i] / contraction_factor) for i in range(A.ndim)])
self.spacing = spacing
if remove_zero_weight_pnts == True:
self.im[weights <= 0.0] = -1
#linspaces = [np.linspace(0, A.shape[i]*self.spacing[i], A.shape[i], endpoint=False) for i in range(A.ndim)]
linspaces = [make_space_1d(A.shape[i], contraction_factor, self.spacing[i]) for i in range(A.ndim)]
#self.spacing = self.spacing * contraction_factor
if center_point is None:
center_point = self.spacing * (np.array(self.get_image_shape())-1.0) / 2.0
# Store the center point as a member, and
# bake the contraction offset into the stored offset
self.center_point = center_point + get_contraction_offset(contraction_factor, spacing)
grid1 = np.array(np.meshgrid(*linspaces,indexing='ij'))
grid = np.zeros(A.shape + (A.ndim+1,), dtype = 'float64')
for i in range(A.ndim):
grid[..., i] = grid1[i, ...] - center_point[i]
#grid[..., :-1] = grid[..., :-1] - center_point
grid[..., A.ndim] = weights
self.weights = weights
self.dense_point_count = np.prod(A.shape)
self.point_count = 0
self.freq = np.zeros((alpha_levels+1,), dtype = 'int')
self.pnts = []
all_indices = np.arange(np.prod(A.shape)).reshape(self.get_image_shape())
self.indices = []#np.arange(np.prod(A.shape[i]))
self.grid = grid[..., :]
for i in range(alpha_levels+1):
filt = (self.im == i)
if remove_zero_weight_pnts:
filt[weights <= 0.0] = False
cnt = np.count_nonzero(filt)
self.freq[i] = cnt
self.point_count = self.point_count + cnt
filt_pnts = grid[filt]
filt_indices = all_indices[filt]
self.pnts.append(filt_pnts)
self.indices.append(filt_indices)
self.freq = self.freq * (1.0 / self.point_count)
self.grid = self.grid.reshape((self.dense_point_count, A.ndim+1))
def print_image(self):
print(self.im)
def print_point_sets(self):
print(self.pnts)
def get_distance_shape(self):
return self.distance_shape
def get_alpha_levels(self):
return self.alpha_levels
def get_weights(self):
return self.weights
def get_dense_point_count(self):
return self.dense_point_count
def get_image_dim(self):
return self.im.ndim
def get_image_shape(self):
return self.im.shape
def get_image(self):
return self.im
def get_center_point(self):
return self.center_point
def get_sampling_fraction_count(self, f):
return np.int(np.round(f * self.point_count))
def get_grid(self):
return self.grid
def get_indices(self):
return self.indices
def random_from_level(self, n, level):
arr = self.pnts[level]
m = arr.shape[0]
if m == 0:
return arr
else:
return arr[np.random.random_integers(0, m-1, n), :]
def random_integers(self, m, n):
if m > 0 and n > 0:
return np.random.random_integers(0, m, n)
else:
return np.zeros((0, self.pnts[0].shape[1]), dtype='int')
def random_sample(self, n):
if n == self.point_count:
return self.pnts
else:
cnt = np.random.multinomial(n, self.freq)
return [self.pnts[i][self.random_integers(len(self.pnts[i])-1, cnt[i]), :] for i in range(self.alpha_levels+1)]
### Helper functions ###
# Generate a space of n pixels, with given spacing and contraction_factor
# contraction_factor means compressing a number of pixels into the center
# of the super-pixel.
#
# 0, 1, 2, 3, 4, 5, 6 -> 1, 1, 1, 4, 4, 4, 6, 6 (n=7, spacing=1, contraction_factor=3)
def make_space_1d(n, contraction_factor, spacing):
assert(n > 0)
assert(contraction_factor >= 1)
assert(spacing > 0.0)
if contraction_factor == 1:
return np.linspace(0, n*spacing, n, endpoint=False)
superpix_spacing = spacing * contraction_factor
midp = np.mean(np.arange(contraction_factor)*spacing)
whole_pix = int(n / contraction_factor)
rem = n - whole_pix * contraction_factor
seq_whole_pix = midp + (np.arange(whole_pix) * superpix_spacing)
exp_seq = np.repeat(seq_whole_pix, contraction_factor)
if rem > 0:
out = np.empty(shape=[n])
# Fill the first part with the expanded sequence
out[0:exp_seq.size] = exp_seq[:]
# Fill the remainder with constant value
out[exp_seq.size:] = (midp + (whole_pix * superpix_spacing))
return out
else:
return exp_seq
# Compute the offset required for look-ups due to contraction/fusing
# of pixels, which is dependent on contraction_factor and spacing.
def get_contraction_offset(contraction_factor, spacing=None):
if spacing is None:
return -(contraction_factor-1.0) / 2.0
else:
return -(spacing * (contraction_factor-1.0)) / 2.0
# A few tests to exercise the code
def main():
test_space_1d_a = make_space_1d(11, 2.0, 1)
test_space_1d_b = make_space_1d(11, 2.0, 2)
test_space_1d_c = make_space_1d(11, 2.0, 3)
test_space_1d_d = make_space_1d(11, 2.0, 4)
test_space_1d_e = make_space_1d(12, 2.0, 4)
print(test_space_1d_a)
print(test_space_1d_b)
print(test_space_1d_c)
print(test_space_1d_d)
print(test_space_1d_e)
return
im = np.zeros((3, 4, 5))
w = np.ones((3, 4, 5))
im[1, 1, 3] = 1.0
im[1, 2, 3] = 1.0
q_im = QuantizedImage(im, 1, w, [2, 2, 2], True)
np.random.seed(1000)
sampling = q_im.random_sample(13)
print(sampling)
if __name__ == '__main__':
main()
| 35.255507
| 150
| 0.649381
|
a26c35e2837b16fe883d6f59f1b07a63d942add6
| 26,450
|
py
|
Python
|
model/lib/sklearn/decomposition/online_lda.py
|
nelango/ViralityAnalysis
|
1ddbaccb4ea04fc908a01b4964ab080f1d2b2f60
|
[
"MIT"
] | 4
|
2018-01-21T21:18:35.000Z
|
2022-03-23T21:57:56.000Z
|
model/lib/sklearn/decomposition/online_lda.py
|
nelango/ViralityAnalysis
|
1ddbaccb4ea04fc908a01b4964ab080f1d2b2f60
|
[
"MIT"
] | null | null | null |
model/lib/sklearn/decomposition/online_lda.py
|
nelango/ViralityAnalysis
|
1ddbaccb4ea04fc908a01b4964ab080f1d2b2f60
|
[
"MIT"
] | null | null | null |
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import NotFittedError, check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calcuate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or and negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calcuate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
| 37.411598
| 79
| 0.600076
|
8d1f794ad8f00047b23c05e6a1f3e378a06a4f3a
| 3,866
|
py
|
Python
|
commaai_drive.py
|
alanswx/udacity-hw-cloning
|
c5b8f888eeea4368d8ced2ce30a1ea860a7ed872
|
[
"MIT"
] | null | null | null |
commaai_drive.py
|
alanswx/udacity-hw-cloning
|
c5b8f888eeea4368d8ced2ce30a1ea860a7ed872
|
[
"MIT"
] | null | null | null |
commaai_drive.py
|
alanswx/udacity-hw-cloning
|
c5b8f888eeea4368d8ced2ce30a1ea860a7ed872
|
[
"MIT"
] | null | null | null |
import argparse
import base64
import json
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
import scipy.misc
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from keras.optimizers import Adam
import driving_data
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
#print(image_array.shape)
#transformed_image_array=driving_data.process_image_comma_pixels(image_array)
b = image_array[None, :, :, :].transpose(0, 3, 1, 2)
transformed_image_array = b
#print(transformed_image_array.shape)
'''
image_array = image_array[55:135, :, :]
mean=0
image_array=cv2.copyMakeBorder(image_array, top=55, bottom=25 , left=0, right=0, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean] )
#cv2.resize(image, (160,320))
b = image_array[None, :, :, :].transpose(0, 3, 1, 2)
print(b.shape)
transformed_image_array = b
'''
#transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
#print("about to call predict")
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
#print("after predict")
steering_angle = steering_angle * scipy.pi / 180
#steering_angle = steering_angle * scipy.pi / 180
# steering_angle = steering_angle / 2
#print("steering angle"+str(steering_angle))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
speed = float(speed)
# TODO - change this
if speed < 10.0:
throttle = 0.7
elif speed < 15.0:
throttle = 0.4
elif speed < 22.0:
throttle = 0.18
else:
throttle = 0.15
#throttle = 0.2
#print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
#model = model_from_json(json.loads(jfile.read()))
#model.summary()
#
# instead.
model = model_from_json(jfile.read())
model.summary()
learning_rate=0.0001
model.compile(Adam(lr=learning_rate), "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| 30.203125
| 142
| 0.688826
|
d51345ad361b2eb08899e336f924e6455d290fc3
| 1,141
|
py
|
Python
|
test/gst-msdk/decode/12bit/vp9.py
|
Bin-CI/vaapi-fits
|
9c43ea65f7fe99f11fa3c49bb14d30f5de09f010
|
[
"BSD-3-Clause"
] | 19
|
2019-03-05T01:59:05.000Z
|
2022-01-11T15:31:49.000Z
|
test/gst-msdk/decode/12bit/vp9.py
|
Bin-CI/vaapi-fits
|
9c43ea65f7fe99f11fa3c49bb14d30f5de09f010
|
[
"BSD-3-Clause"
] | 213
|
2019-01-29T18:44:05.000Z
|
2022-03-30T05:57:04.000Z
|
test/gst-msdk/decode/12bit/vp9.py
|
Bin-CI/vaapi-fits
|
9c43ea65f7fe99f11fa3c49bb14d30f5de09f010
|
[
"BSD-3-Clause"
] | 26
|
2019-01-29T05:21:22.000Z
|
2022-02-09T00:57:35.000Z
|
###
### Copyright (C) 2018-2020 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from .....lib import *
from .....lib.gstreamer.msdk.util import *
from .....lib.gstreamer.msdk.decoder import DecoderTest
spec = load_test_spec("vp9", "decode", "12bit")
@slash.requires(*platform.have_caps("decode", "vp9_12"))
@slash.requires(*have_gst_element("msdkvp9dec"))
class default(DecoderTest):
def before(self):
super().before()
vars(self).update(
# default metric
metric = dict(type = "ssim", miny = 1.0, minu = 1.0, minv = 1.0),
caps = platform.get_caps("decode", "vp9_12"),
gstdecoder = "msdkvp9dec",
gstparser = "vp9parse",
)
@slash.parametrize(("case"), sorted(spec.keys()))
def test(self, case):
vars(self).update(spec[case].copy())
dxmap = {".ivf" : "ivfparse", ".webm" : "matroskademux", ".mkv" : "matroskademux"}
ext = os.path.splitext(self.source)[1]
assert ext in dxmap.keys(), "Unrecognized source file extension {}".format(ext)
vars(self).update(
case = case,
gstdemuxer = dxmap[ext],
)
self.decode()
| 29.25641
| 86
| 0.618755
|
e05c38a52bb00617c05e399722c2ef1df42a4922
| 4,827
|
py
|
Python
|
adet/modeling/MEInst/pca/pca_valid.py
|
maxpark/ABCNet_Chinese
|
7c7d2f322411ebf933a91b4e9e476562e5d3a012
|
[
"BSD-2-Clause"
] | 27
|
2021-08-05T06:56:17.000Z
|
2022-03-20T06:47:34.000Z
|
adet/modeling/MEInst/pca/pca_valid.py
|
maxpark/ABCNet_Chinese
|
7c7d2f322411ebf933a91b4e9e476562e5d3a012
|
[
"BSD-2-Clause"
] | 6
|
2021-08-09T06:50:30.000Z
|
2021-11-05T09:09:24.000Z
|
adet/modeling/MEInst/pca/pca_valid.py
|
maxpark/ABCNet_Chinese
|
7c7d2f322411ebf933a91b4e9e476562e5d3a012
|
[
"BSD-2-Clause"
] | 8
|
2021-08-05T14:31:30.000Z
|
2022-03-29T07:29:31.000Z
|
# coding:utf-8
import argparse
import os
import time
import numpy as np
from .pca_labels import parse_json
from .pca_utils import transform, inverse_transform, IOUMetric, direct_sigmoid, inverse_sigmoid
VALUE_MAX = 0.05
VALUE_MIN = 0.01
def pca_valid(masks, components_c,
explained_variance_c, mean_c=None,
n_components=60, class_agnostic=True, whiten=True, sigmoid=True, mask_size=28):
mIoU = []
if class_agnostic:
masks = np.concatenate([np.array(mask).astype(np.float32).reshape((-1, mask_size**2)) for mask in masks])
components_c = np.squeeze(components_c)
mean_c = np.squeeze(mean_c)
explained_variance_c = np.squeeze(explained_variance_c)
assert n_components == components_c.shape[0], print(
"The n_components in component_ must equal to the supposed shape.")
# generate the reconstruction mask.
if sigmoid:
value_random = VALUE_MAX * np.random.rand(masks.shape[0], masks.shape[1])
value_random = np.maximum(value_random, VALUE_MIN)
masks_random = np.where(masks > value_random, 1 - value_random, value_random)
masks_random = inverse_sigmoid(masks_random)
else:
masks_random = masks
mask_rc = transform(masks_random, components_=components_c, explained_variance_=explained_variance_c, mean_=mean_c, whiten=whiten)
mask_rc = inverse_transform(mask_rc, components_=components_c, explained_variance_=explained_variance_c, mean_=mean_c, whiten=whiten)
if sigmoid:
mask_rc = direct_sigmoid(mask_rc)
mask_rc = np.where(mask_rc >= 0.5, 1, 0)
IoUevaluate = IOUMetric(2)
IoUevaluate.add_batch(mask_rc, masks)
_, _, _, mean_iu, _ = IoUevaluate.evaluate()
mIoU.append(mean_iu)
else:
# TODO: We have not achieve the function in class-specific.
raise NotImplementedError
return np.mean(mIoU)
if __name__ == '__main__':
# ArgumentParser.
parser = argparse.ArgumentParser()
parser.add_argument('--gt_set', default='/mnt/cephfs_new_wj/mlnlp/zhangrufeng/projects/'
'adet/datasets/28x28/coco_2017_train/coco_2017_train_%s.json', type=str)
parser.add_argument('--output_dir', default='/mnt/cephfs_new_wj/mlnlp/zhangrufeng/projects/'
'adet/datasets/28x28/components/', type=str)
parser.add_argument('--n_split', default=4, type=int)
parser.add_argument('--mask_size', default=28, type=int)
parser.add_argument('--n_components', default=60, type=int)
parser.add_argument('--class_agnostic', default=True, type=bool)
parser.add_argument('--whiten', default=True, type=bool)
parser.add_argument('--sigmoid', default=True, type=bool)
parser.add_argument('--on_val', default=True, type=bool)
args = parser.parse_args()
gt_set = args.gt_set
output_dir = args.output_dir
n_split = args.n_split
mask_size = args.mask_size
n_components = args.n_components
class_agnostic = args.class_agnostic
whiten = args.whiten
sigmoid = args.sigmoid
on_val = args.on_val
# load the parameters
output_path = os.path.join(output_dir, os.path.basename(gt_set).split('.')[0][:-3] + '_class_agnostic' +
str(class_agnostic) + '_whiten' + str(whiten) + '_sigmoid' + str(sigmoid) +
'_' + str(n_components) + '.npz')
print("Load the pca parameters: " + output_path)
tic = time.time()
parameters = np.load(output_path)
components_c = parameters['components_c']
mean_c = parameters['mean_c']
ratio_c = parameters['ratio_c']
explained_variance_c = parameters['explained_variance_c']
toc = time.time() - tic
print("Finish the load in %2fs."%toc)
if on_val:
gt_set = gt_set.replace('train', 'val')
else:
pass
mIoU = []
for split in range(n_split):
print("Start to Load the Segmentation Masks Split %d." % (split + 1))
gt_set_split = gt_set % str(split + 1)
tic = time.time()
_, _, masks = parse_json(gt_set_split)
toc = time.time() - tic
print("Finish the load of split-%d in %2fs." % (split + 1, toc))
print("Start to valid pca of split-%d..."% (split + 1))
mIoU_split = pca_valid(masks=masks, components_c=components_c,
explained_variance_c=explained_variance_c, mean_c=mean_c, n_components=n_components,
class_agnostic=class_agnostic, whiten=whiten, sigmoid=sigmoid, mask_size=mask_size)
mIoU.append(mIoU_split)
print("Finish the valid pca of split-%d"% (split + 1))
print("The mIoU for %s is %f"%(output_path, np.mean(mIoU)))
| 43.098214
| 141
| 0.650715
|
9c8abf7ade4a7ae05d9afbc442dcef180f7f01e4
| 1,954
|
py
|
Python
|
problems/three_qubits_yz/three_q_yz_generators.py
|
JammyL/BayesianOptimization
|
e8deec2136379af875692c455321fbf47f20fb7d
|
[
"MIT"
] | null | null | null |
problems/three_qubits_yz/three_q_yz_generators.py
|
JammyL/BayesianOptimization
|
e8deec2136379af875692c455321fbf47f20fb7d
|
[
"MIT"
] | null | null | null |
problems/three_qubits_yz/three_q_yz_generators.py
|
JammyL/BayesianOptimization
|
e8deec2136379af875692c455321fbf47f20fb7d
|
[
"MIT"
] | null | null | null |
import qutip as qt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from qutip.qip.circuit import QubitCircuit, Gate
from qutip.qip.operations import gate_sequence_product
from qutip.tensor import tensor
def calcStateFidelity(finalState, targetState):
finalState = finalState.dag()
return abs(targetState.overlap(finalState))**2
def calcGateFidelityN(finalGate, targetGate, N):
product = finalGate * targetGate.dag()
trace = np.trace(product)
return (abs(trace)**2)/(4**N)
def N3qubitGateFunc(targetGate):
def testGateParams(a1, a2, a3, a4, a5, a6):
QC = QubitCircuit(3)
QC.add_gate("RY", targets=0, arg_value=a1)
QC.add_gate("RZ", targets=1, arg_value=a2)
QC.add_gate("RY", targets=2, arg_value=a3)
QC.add_gate("CNOT", targets=1, controls=0)
QC.add_gate("CNOT", targets=2, controls=0)
QC.add_gate("RZ", targets=0, arg_value=a4)
QC.add_gate("RY", targets=1, arg_value=a5)
QC.add_gate("RZ", targets=2, arg_value=a6)
U_list = QC.propagators()
finalGate = gate_sequence_product(U_list)
return calcGateFidelityN(finalGate, targetGate, 3)
return testGateParams
def N3qubitStateFunc(initialState, targetState):
def testStateParams(a1, a2, a3, a4, a5, a6):
QC = QubitCircuit(3)
QC.add_gate("RY", targets=0, arg_value=a1)
QC.add_gate("RZ", targets=1, arg_value=a2)
QC.add_gate("RY", targets=2, arg_value=a3)
QC.add_gate("CNOT", targets=1, controls=0)
QC.add_gate("CNOT", targets=2, controls=0)
QC.add_gate("RZ", targets=0, arg_value=a4)
QC.add_gate("RY", targets=1, arg_value=a5)
QC.add_gate("RZ", targets=2, arg_value=a6)
U_list = QC.propagators()
finalGate = gate_sequence_product(U_list)
finalState = finalGate * initialState
return calcStateFidelity(targetState, finalState)
return testStateParams
| 39.08
| 58
| 0.676049
|
b2b839d4b016dcac14d2ab825d02bd98bab236c1
| 12,280
|
py
|
Python
|
qiskit/backends/local/qasm_simulator_cpp.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | null | null | null |
qiskit/backends/local/qasm_simulator_cpp.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | 1
|
2018-08-08T17:56:06.000Z
|
2018-08-08T17:56:06.000Z
|
qiskit/backends/local/qasm_simulator_cpp.py
|
nonhermitian/arrogant_seahorse
|
2be1ff60857c75fcbbb0c23aa594f41e1a33c89c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Interface to C++ quantum circuit simulator with realistic noise.
"""
import json
import logging
import os
import subprocess
from subprocess import PIPE
import platform
import warnings
import numpy as np
from qiskit._result import Result
from qiskit.backends import BaseBackend
from qiskit.backends.local.localjob import LocalJob
from qiskit.cython.qasm_simulator import SimulatorWrapper
logger = logging.getLogger(__name__)
EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Add path to compiled qasm simulator
DEFAULT_SIMULATOR_PATHS = [
# This is the path where Makefile creates the simulator by default
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../../out/src/qasm-simulator-cpp/qasm_simulator_cpp'
+ EXTENSION)),
# This is the path where PIP installs the simulator
os.path.abspath(os.path.join(os.path.dirname(__file__),
'qasm_simulator_cpp' + EXTENSION)),
]
class QasmSimulatorCpp(BaseBackend):
"""C++ quantum circuit simulator with realistic noise"""
DEFAULT_CONFIGURATION = {
'name': 'local_qasm_simulator_cpp',
'url': 'https://github.com/QISKit/qiskit-sdk-py/src/qasm-simulator-cpp',
'simulator': True,
'local': True,
'description': 'A C++ realistic noise simulator for qobj files',
'coupling_map': 'all-to-all',
"basis_gates": 'u0,u1,u2,u3,cx,cz,id,x,y,z,h,s,sdg,t,tdg,rzz,' +
'snapshot,wait,noise,save,load'
}
def __init__(self, configuration=None):
super().__init__(configuration or self.DEFAULT_CONFIGURATION.copy())
# Try to use the default executable if not specified.
def run(self, q_job):
"""Run a QuantumJob on the the backend."""
return LocalJob(self._run_job, q_job)
def _run_job(self, q_job):
qobj = q_job.qobj
self._validate(qobj)
simulator = SimulatorWrapper()
result = simulator.run(json.dumps(qobj, cls=QASMSimulatorEncoder))
result = json.loads(result, cls=QASMSimulatorDecoder)
return Result(result, qobj)
def _validate(self, qobj):
if qobj['config']['shots'] == 1:
warnings.warn('The behavior of getting statevector from simulators '
'by setting shots=1 is deprecated and will be removed. '
'Use the local_statevector_simulator instead, or place '
'explicit snapshot instructions.',
DeprecationWarning)
for circ in qobj['circuits']:
if 'measure' not in [op['name'] for
op in circ['compiled_circuit']['operations']]:
logger.warning("no measurements in circuit '%s', "
"classical register will remain all zeros.", circ['name'])
return
class CliffordSimulatorCpp(BaseBackend):
""""C++ Clifford circuit simulator with realistic noise."""
DEFAULT_CONFIGURATION = {
'name': 'local_clifford_simulator_cpp',
'url': 'https://github.com/QISKit/qiskit-sdk-py/src/qasm-simulator-cpp',
'simulator': True,
'local': True,
'description': 'A C++ Clifford simulator with approximate noise',
'coupling_map': 'all-to-all',
'basis_gates': 'cx,id,x,y,z,h,s,sdg,snapshot,wait,noise,save,load'
}
def __init__(self, configuration=None):
super().__init__(configuration or self.DEFAULT_CONFIGURATION.copy())
def run(self, q_job):
"""Run a QuantumJob on the the backend.
Args:
q_job (QuantumJob): QuantumJob object
Returns:
LocalJob: derived from BaseJob
"""
return LocalJob(self._run_job, q_job)
def _run_job(self, q_job):
qobj = q_job.qobj
self._validate()
# set backend to Clifford simulator
if 'config' in qobj:
qobj['config']['simulator'] = 'clifford'
else:
qobj['config'] = {'simulator': 'clifford'}
simulator = SimulatorWrapper()
result = simulator.run(json.dumps(qobj, cls=QASMSimulatorEncoder))
result = json.loads(result, cls=QASMSimulatorDecoder)
return Result(result, qobj)
def _validate(self):
return
class QASMSimulatorEncoder(json.JSONEncoder):
"""
JSON encoder for NumPy arrays and complex numbers.
This functions as the standard JSON Encoder but adds support
for encoding:
complex numbers z as lists [z.real, z.imag]
ndarrays as nested lists.
"""
# pylint: disable=method-hidden,arguments-differ
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, complex):
return [obj.real, obj.imag]
return json.JSONEncoder.default(self, obj)
class QASMSimulatorDecoder(json.JSONDecoder):
"""
JSON decoder for the output from C++ qasm_simulator.
This converts complex vectors and matrices into numpy arrays
for the following keys.
"""
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
# pylint: disable=method-hidden
def object_hook(self, obj):
"""Special decoding rules for simulator output."""
for key in ['U_error', 'density_matrix']:
# JSON is a complex matrix
if key in obj and isinstance(obj[key], list):
tmp = np.array(obj[key])
obj[key] = tmp[::, ::, 0] + 1j * tmp[::, ::, 1]
for key in ['statevector', 'inner_products']:
# JSON is a list of complex vectors
if key in obj:
for j in range(len(obj[key])):
if isinstance(obj[key][j], list):
tmp = np.array(obj[key][j])
obj[key][j] = tmp[::, 0] + 1j * tmp[::, 1]
return obj
def run(qobj, executable):
"""
Run simulation on C++ simulator inside a subprocess.
Args:
qobj (dict): qobj dictionary defining the simulation to run
executable (string): filename (with path) of the simulator executable
Returns:
dict: A dict of simulation results
"""
# Open subprocess and execute external command
try:
with subprocess.Popen([executable, '-'],
stdin=PIPE, stdout=PIPE, stderr=PIPE) as proc:
cin = json.dumps(qobj, cls=QASMSimulatorEncoder).encode()
cout, cerr = proc.communicate(cin)
if cerr:
logger.error('ERROR: Simulator encountered a runtime error: %s',
cerr.decode())
sim_output = cout.decode()
return json.loads(sim_output, cls=QASMSimulatorDecoder)
except FileNotFoundError:
msg = "ERROR: Simulator exe not found at: %s" % executable
logger.error(msg)
return {"status": msg, "success": False}
def cx_error_matrix(cal_error, zz_error):
"""
Return the coherent error matrix for CR error model of a CNOT gate.
Args:
cal_error (double): calibration error of rotation
zz_error (double): ZZ interaction term error
Returns:
numpy.ndarray: A coherent error matrix U_error for the CNOT gate.
Details:
The ideal cross-resonsance (CR) gate corresponds to a 2-qubit rotation
U_CR_ideal = exp(-1j * (pi/2) * XZ/2)
where qubit-0 is the control, and qubit-1 is the target. This can be
converted to a CNOT gate by single-qubit rotations
U_CX = U_L * U_CR_ideal * U_R.
The noisy rotation is implemented as
U_CR_noise = exp(-1j * (pi/2 + cal_error) * (XZ + zz_error ZZ)/2)
The retured error matrix is given by
U_error = U_L * U_CR_noise * U_R * U_CX^dagger
"""
# pylint: disable=invalid-name
if cal_error == 0 and zz_error == 0:
return np.eye(4)
cx_ideal = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]])
b = np.sqrt(1.0 + zz_error * zz_error)
a = b * (np.pi / 2.0 + cal_error) / 2.0
sp = (1.0 + 1j * zz_error) * np.sin(a) / b
sm = (1.0 - 1j * zz_error) * np.sin(a) / b
c = np.cos(a)
cx_noise = np.array([[c + sm, 0, -1j * (c - sm), 0],
[0, 1j * (c - sm), 0, c + sm],
[-1j * (c - sp), 0, c + sp, 0],
[0, c + sp, 0, 1j * (c - sp)]]) / np.sqrt(2)
return cx_noise.dot(cx_ideal.conj().T)
def x90_error_matrix(cal_error, detuning_error):
"""
Return the coherent error matrix for a X90 rotation gate.
Args:
cal_error (double): calibration error of rotation
detuning_error (double): detuning amount for rotation axis error
Returns:
numpy.ndarray: A coherent error matrix U_error for the X90 gate.
Details:
The ideal X90 rotation is a pi/2 rotation about the X-axis:
U_X90_ideal = exp(-1j (pi/2) X/2)
The noisy rotation is implemented as
U_X90_noise = exp(-1j (pi/2 + cal_error) (cos(d) X + sin(d) Y)/2)
where d is the detuning_error.
The retured error matrix is given by
U_error = U_X90_noise * U_X90_ideal^dagger
"""
# pylint: disable=invalid-name
if cal_error == 0 and detuning_error == 0:
return np.eye(2)
else:
x90_ideal = np.array([[1., -1.j], [-1.j, 1]]) / np.sqrt(2)
c = np.cos(0.5 * cal_error)
s = np.sin(0.5 * cal_error)
gamma = np.exp(-1j * detuning_error)
x90_noise = np.array([[c - s, -1j * (c + s) * gamma],
[-1j * (c + s) * np.conj(gamma), c - s]]) / np.sqrt(2)
return x90_noise.dot(x90_ideal.conj().T)
def _generate_coherent_error_matrix(config):
"""
Generate U_error matrix for CX and X90 gates.
Args:
config (dict): the config of a qobj circuit
This parses the config for the following noise parameter keys and returns a
coherent error matrix for simulation coherent noise.
'CX' gate: 'calibration_error', 'zz_error'
'X90' gate: 'calibration_error', 'detuning_error'
"""
# pylint: disable=invalid-name
if 'noise_params' in config:
# Check for CR coherent error parameters
if 'CX' in config['noise_params']:
noise_cx = config['noise_params']['CX']
cal_error = noise_cx.pop('calibration_error', 0)
zz_error = noise_cx.pop('zz_error', 0)
# Add to current coherent error matrix
if not cal_error == 0 or not zz_error == 0:
u_error = noise_cx.get('U_error', np.eye(4))
u_error = u_error.dot(cx_error_matrix(cal_error, zz_error))
config['noise_params']['CX']['U_error'] = u_error
# Check for X90 coherent error parameters
if 'X90' in config['noise_params']:
noise_x90 = config['noise_params']['X90']
cal_error = noise_x90.pop('calibration_error', 0)
detuning_error = noise_x90.pop('detuning_error', 0)
# Add to current coherent error matrix
if not cal_error == 0 or not detuning_error == 0:
u_error = noise_x90.get('U_error', np.eye(2))
u_error = u_error.dot(x90_error_matrix(cal_error,
detuning_error))
config['noise_params']['X90']['U_error'] = u_error
| 36.656716
| 89
| 0.597883
|
f1b853b4daa6a362a86d83730bfb8ccd32203d31
| 6,668
|
py
|
Python
|
chemprop/features/features_generators.py
|
jasperhyp/Chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | 1
|
2021-12-15T05:18:07.000Z
|
2021-12-15T05:18:07.000Z
|
chemprop/features/features_generators.py
|
jasperhyp/chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | null | null | null |
chemprop/features/features_generators.py
|
jasperhyp/chemprop4SE
|
c02b604b63b6766464db829fea0b306c67302e82
|
[
"MIT"
] | null | null | null |
from typing import Callable, List, Union
import pandas as pd
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
Molecule = Union[str, Chem.Mol]
FeaturesGenerator = Callable[[Molecule], np.ndarray]
FEATURES_GENERATOR_REGISTRY = {}
def register_features_generator(features_generator_name: str) -> Callable[[FeaturesGenerator], FeaturesGenerator]:
"""
Creates a decorator which registers a features generator in a global dictionary to enable access by name.
:param features_generator_name: The name to use to access the features generator.
:return: A decorator which will add a features generator to the registry using the specified name.
"""
def decorator(features_generator: FeaturesGenerator) -> FeaturesGenerator:
FEATURES_GENERATOR_REGISTRY[features_generator_name] = features_generator
return features_generator
return decorator
def get_features_generator(features_generator_name: str) -> FeaturesGenerator:
"""
Gets a registered features generator by name.
:param features_generator_name: The name of the features generator.
:return: The desired features generator.
"""
if features_generator_name not in FEATURES_GENERATOR_REGISTRY:
raise ValueError(f'Features generator "{features_generator_name}" could not be found. '
f'If this generator relies on rdkit features, you may need to install descriptastorus.')
return FEATURES_GENERATOR_REGISTRY[features_generator_name]
def get_available_features_generators() -> List[str]:
"""Returns a list of names of available features generators."""
return list(FEATURES_GENERATOR_REGISTRY.keys())
MORGAN_RADIUS = 2
MORGAN_NUM_BITS = 2048
@register_features_generator('morgan')
def morgan_binary_features_generator(mol: Molecule,
radius: int = MORGAN_RADIUS,
num_bits: int = MORGAN_NUM_BITS) -> np.ndarray:
"""
Generates a binary Morgan fingerprint for a molecule.
:param mol: A molecule (i.e., either a SMILES or an RDKit molecule).
:param radius: Morgan fingerprint radius.
:param num_bits: Number of bits in Morgan fingerprint.
:return: A 1D numpy array containing the binary Morgan fingerprint.
"""
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
features_vec = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=num_bits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features
@register_features_generator('morgan_count')
def morgan_counts_features_generator(mol: Molecule,
radius: int = MORGAN_RADIUS,
num_bits: int = MORGAN_NUM_BITS) -> np.ndarray:
"""
Generates a counts-based Morgan fingerprint for a molecule.
:param mol: A molecule (i.e., either a SMILES or an RDKit molecule).
:param radius: Morgan fingerprint radius.
:param num_bits: Number of bits in Morgan fingerprint.
:return: A 1D numpy array containing the counts-based Morgan fingerprint.
"""
mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
features_vec = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features
try:
from descriptastorus.descriptors import rdDescriptors, rdNormalizedDescriptors
@register_features_generator('rdkit_2d')
def rdkit_2d_features_generator(mol: Molecule) -> np.ndarray:
"""
Generates RDKit 2D features for a molecule.
:param mol: A molecule (i.e., either a SMILES or an RDKit molecule).
:return: A 1D numpy array containing the RDKit 2D features.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
generator = rdDescriptors.RDKit2D()
features = generator.process(smiles)[1:]
return features
@register_features_generator('rdkit_2d_normalized')
def rdkit_2d_normalized_features_generator(mol: Molecule) -> np.ndarray:
"""
Generates RDKit 2D normalized features for a molecule.
:param mol: A molecule (i.e., either a SMILES or an RDKit molecule).
:return: A 1D numpy array containing the RDKit 2D normalized features.
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
generator = rdNormalizedDescriptors.RDKit2DNormalized()
features = generator.process(smiles)[1:]
return features
except ImportError:
@register_features_generator('rdkit_2d')
def rdkit_2d_features_generator(mol: Molecule) -> np.ndarray:
"""Mock implementation raising an ImportError if descriptastorus cannot be imported."""
raise ImportError('Failed to import descriptastorus. Please install descriptastorus '
'(https://github.com/bp-kelley/descriptastorus) to use RDKit 2D features.')
@register_features_generator('rdkit_2d_normalized')
def rdkit_2d_normalized_features_generator(mol: Molecule) -> np.ndarray:
"""Mock implementation raising an ImportError if descriptastorus cannot be imported."""
raise ImportError('Failed to import descriptastorus. Please install descriptastorus '
'(https://github.com/bp-kelley/descriptastorus) to use RDKit 2D normalized features.')
"""
Custom features generator template.
Note: The name you use to register the features generator is the name
you will specify on the command line when using the --features_generator <name> flag.
Ex. python train.py ... --features_generator custom ...
"""
@register_features_generator('mono')
def custom_features_generator(smile: str,
monoSE_dict: dict) -> np.ndarray:
"""
Generates a mono side effect fingerprint for a molecule.
:param mol: A molecule (i.e., either a SMILES or an RDKit molecule).
:param monoSE_dict: monoSE dictionary.
:return: A 1D numpy array containing the mono side effect fingerprint.
"""
# If you want to use the SMILES string
# smiles = Chem.MolToSmiles(mol, isomericSmiles=True) if type(mol) != str else mol
try:
features = monoSE_dict[smile].astype(float)
except:
features = np.linspace(0, 0, len([*monoSE_dict.values()][0]))
# If you want to use the RDKit molecule
# mol = Chem.MolFromSmiles(mol) if type(mol) == str else mol
# Replace this with code which generates features from the molecule
return features
| 39.928144
| 114
| 0.705609
|
ae402ebeab93bdcf524e1dd06d0e20e09af403c2
| 11,254
|
py
|
Python
|
classify/train/dataset/download_and_convert_17flowers.py
|
NobuoTsukamoto/edge_tpu
|
bfde6ba34b11113eb7db28032be74b1eb491efaa
|
[
"MIT"
] | 56
|
2019-05-23T05:05:29.000Z
|
2021-04-26T05:53:17.000Z
|
classify/train/dataset/download_and_convert_17flowers.py
|
PINTO0309/edge_tpu
|
ee82324f810ce64b6f98f40f5679dfbfbd1a4233
|
[
"MIT"
] | 4
|
2020-04-26T15:43:46.000Z
|
2021-04-19T12:41:17.000Z
|
classify/train/dataset/download_and_convert_17flowers.py
|
PINTO0309/edge_tpu
|
ee82324f810ce64b6f98f40f5679dfbfbd1a4233
|
[
"MIT"
] | 10
|
2020-02-01T18:18:35.000Z
|
2021-04-28T17:23:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Nobuo Tsukamoto
This software is released under the MIT License.
See the LICENSE file in the project root for more information.
Downloads and converts Oxford 17 Category Flower dataset to TF-Example protos.
This modult downloads the Oxford 17 Category Flower Dataset, uncompresses it,
reads the files that make up the Flowers data and creates two TFRecord dataset:
one for train and one for test. Each TFRecord dataset is comprised of a set of
TF-Example protocol buffers, each of which contain a single image and label.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import argparse
import tarfile
import glob
import shutil
from PIL import Image, ImageOps
from six.moves import urllib
import tensorflow as tf
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/17/17flowers.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 136
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
# The Class labels
CLASS_NAMES = ['Tulip', 'Snowdrop', 'LilyValley', 'Bluebell', 'Crocus', 'Iris',\
'Tigerlily', 'Daffodil', 'Fritillary', 'Sunflower', 'Daisy', 'ColtsFoot',\
'Dandelion', 'Cowslip', 'Buttercup', 'Widnflower', 'Pansy']
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilites.
"""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir, is_data_augmentation=False):
""" Returns a dictionary of file names and inferred class names.
Args:
dataset_dir: A directory containing a JPG encoded images.
is_data_augmantation: If True, Image data augmantation. flip image horizontal
Returns:
A dictionary of class names and file paths and representing class names.
"""
flower_root = os.path.join(dataset_dir, 'jpg')
# Move files by category.
files = glob.glob(os.path.join(flower_root, '*.jpg'))
files.sort()
par_category = 80
photo_filename = []
for i, class_name in enumerate(CLASS_NAMES):
directory = os.path.join(dataset_dir, class_name)
os.mkdir(directory)
for j in range(par_category):
index = (par_category * i) + j
shutil.move(files[index], directory)
# Data augmantation.
if is_data_augmentation:
file_path = os.path.join(directory, os.path.basename(files[index]))
im = Image.open(file_path)
mirror_im = ImageOps.mirror(im)
flip_path = os.path.join(directory,
os.path.splitext(os.path.basename(files[index]))[0] + '_flip.jpg')
mirror_im.save(flip_path)
# Add photo filename list.
for class_name in CLASS_NAMES:
directory = os.path.join(dataset_dir, class_name)
for file_name in os.listdir(directory):
file_path = os.path.join(directory, file_name)
photo_filename.append(file_path)
return photo_filename, sorted(CLASS_NAMES)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
""" Return TF-Recode file name.
Args:
dataset_dir: dataset directory.
split_name: train or validation.
shard_id: id.
Returns:
TF-Recode file path.
"""
output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def _image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
def _conver_dataset(split_name, file_names, class_names_to_ids, dataset_dir):
"""Converts the given file names to a TF-Recode dataset.
Args:
split_name: The name of thedataset, either 'train' or 'validation'.
file_names: A List of absolute paths to jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids (integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(file_names) / float(_NUM_SHARDS)))
print(num_per_shard)
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_file_name = _get_dataset_filename(dataset_dir,
split_name, shard_id)
with tf.python_io.TFRecordWriter(output_file_name) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(file_names))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' %
(i + 1, len(file_names), shard_id))
sys.stdout.flush()
# Read the file name.
image_data = tf.gfile.GFile(file_names[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(file_names[i]))
class_id = class_names_to_ids[class_name]
example = _image_to_tfexample(image_data, b'jpg',
height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stoerd.
"""
file_name = _DATA_URL.split('/')[-1]
file_path = os.path.join(dataset_dir, file_name)
tf.gfile.Remove(file_path)
tmp_dir = os.path.join(dataset_dir, 'jpg')
tf.gfile.DeleteRecursively(tmp_dir)
for sub_dir in CLASS_NAMES:
tf.gfile.DeleteRecursively(os.path.join(dataset_dir, sub_dir))
def _dataset_exists(dataset_dir):
""" Check if TF-Recoed exists.
Args:
dataset_dir: dataset directory.
Returns:
True: TF-Recode are exist.
False: TF-Recodes are not exist.
"""
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_file_name = _get_dataset_filename(dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_file_name):
return False
return True
def _download_and_uncompress_tarball(tarball_url, dataset_dir):
""" Downloads the 'tarball_url' and uncompresses it locally.
Args:
tarball_url: The URL of a tarball file.
dataset_dir: The directory where the temporary files are stored.
"""
file_name = tarball_url.split('/')[-1]
file_path = os.path.join(dataset_dir, file_name)
def _progress(count, block_size, total_size):
sys.stdout.write('\r >> Downloading %s %.1f%%' % (file_name,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
file_path, _ = urllib.request.urlretrieve(tarball_url, file_path, _progress)
print()
statinfo = os.stat(file_path)
print('Successfully downladed', file_name, statinfo.st_size, 'bytes.')
tarfile.open(file_path, 'r:gz').extractall(dataset_dir)
def _write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):
"""Writes a file with the list of class names.
Args:
labels_to_class_names: A map of (integer) labels to class names.
dataset_dir: The directory in which the labels file should be written.
filename: The filename where the class names are written.
"""
labels_file_name = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_file_name, 'w') as f:
for label in labels_to_class_names:
class_name = labels_to_class_names[label]
f.write('%d:%s\n' % (label, class_name))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_dir', help='Path to dataset dir.')
parser.add_argument('--flip', action='store_true')
args = parser.parse_args()
if not tf.gfile.Exists(args.dataset_dir):
tf.gfile.MakeDirs(args.dataset_dir)
if _dataset_exists(args.dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
_download_and_uncompress_tarball(_DATA_URL, args.dataset_dir)
photo_file_names, CLASS_NAMES = _get_filenames_and_classes(args.dataset_dir, args.flip)
class_names_to_ids = dict(zip(CLASS_NAMES, range(len(CLASS_NAMES))))
# Divide into train and test.
random.seed(_RANDOM_SEED)
random.shuffle(photo_file_names)
num_split = _NUM_VALIDATION
if args.flip:
num_split = _NUM_VALIDATION * 2
training_file_names = photo_file_names[num_split:]
validation_file_names = photo_file_names[:num_split]
# First, convert the training and validation sets.
_conver_dataset('train', training_file_names, class_names_to_ids,
args.dataset_dir)
_conver_dataset('validation', validation_file_names, class_names_to_ids,
args.dataset_dir)
# Finally, write the labels file.
labels_to_class_names = dict(zip(range(len(CLASS_NAMES)), CLASS_NAMES))
_write_label_file(labels_to_class_names, args.dataset_dir)
_clean_up_temporary_files(args.dataset_dir)
print('\nFinished converting the 17 Flowers dataset!')
if __name__ == '__main__':
main()
| 36.186495
| 91
| 0.666252
|
41c055cdba3e07431e85d8a26dd36bfac10cf7c2
| 10,416
|
py
|
Python
|
cumulus/storage.py
|
wearespindle/django-cumulus
|
1c6fc1691ff459b38a3103c020213eb1a6b6f8e5
|
[
"BSD-3-Clause"
] | null | null | null |
cumulus/storage.py
|
wearespindle/django-cumulus
|
1c6fc1691ff459b38a3103c020213eb1a6b6f8e5
|
[
"BSD-3-Clause"
] | null | null | null |
cumulus/storage.py
|
wearespindle/django-cumulus
|
1c6fc1691ff459b38a3103c020213eb1a6b6f8e5
|
[
"BSD-3-Clause"
] | null | null | null |
import mimetypes
import pyrax
import re
import warnings
from gzip import GzipFile
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.core.files.storage import Storage
from django.core.files.base import File, ContentFile
try:
from django.utils.deconstruct import deconstructible
except ImportError:
# Make a no-op decorator to avoid errors
def deconstructible(*args, **kwargs):
def decorator(klass):
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
from cumulus.authentication import Auth
from cumulus.settings import CUMULUS
HEADER_PATTERNS = tuple((re.compile(p), h) for p, h in CUMULUS.get("HEADERS", {}))
def get_content_type(name, content):
"""
Checks if the content_type is already set.
Otherwise uses the mimetypes library to guess.
"""
if hasattr(content, "content_type"):
content_type = content.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
return content_type
def get_headers(name, content_type):
headers = {"Content-Type": content_type}
# gzip the file if its of the right content type
if content_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
headers["Content-Encoding"] = "gzip"
if CUMULUS["HEADERS"]:
for pattern, pattern_headers in HEADER_PATTERNS:
if pattern.match(name):
headers.update(pattern_headers.copy())
return headers
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS):
"""
Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name.
"""
if headers is None:
headers = {}
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata()
def get_gzipped_contents(input_file):
"""
Returns a gzipped version of a previously opened file's buffer.
"""
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue())
@deconstructible
class CumulusStorage(Auth, Storage):
"""
Custom storage for Cumulus.
"""
default_quick_listdir = True
container_name = CUMULUS["CONTAINER"]
container_uri = CUMULUS["CONTAINER_URI"]
container_ssl_uri = CUMULUS["CONTAINER_SSL_URI"]
ttl = CUMULUS["TTL"]
file_ttl = CUMULUS["FILE_TTL"]
use_ssl = CUMULUS["USE_SSL"]
def _open(self, name, mode="rb"):
"""
Returns the CumulusStorageFile.
"""
return ContentFile(self._get_object(name).get())
def _save(self, name, content):
"""
Uses the Cumulus service to write ``content`` to a remote
file (called ``name``).
"""
content_type = get_content_type(name, content.file)
headers = get_headers(name, content_type)
if self.use_pyrax:
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
self.connection.store_object(container=self.container_name,
obj_name=name,
data=content.read(),
content_type=content_type,
content_encoding=headers.get("Content-Encoding", None),
ttl=self.file_ttl,
etag=None)
# set headers/object metadata
self.connection.set_object_metadata(container=self.container_name,
obj=name,
metadata=headers,
prefix='',
clear=True)
else:
# TODO gzipped content when using swift client
self.connection.put_object(self.container_name, name,
content, headers=headers)
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
Deleting a model doesn't delete associated files: bit.ly/12s6Oox
"""
try:
self.connection.delete_object(self.container_name, name)
except pyrax.exceptions.ClientException as exc:
if exc.http_status == 404:
pass
else:
raise
except pyrax.exceptions.NoSuchObject:
pass
def exists(self, name):
"""
Returns True if a file referenced by the given name already
exists in the storage system, or False if the name is
available for a new file.
"""
return bool(self._get_object(name))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
file_object = self._get_object(name)
if file_object:
return file_object.total_bytes
else:
return 0
def url(self, name):
"""
Returns an absolute URL where the content of each file can be
accessed directly by a web browser.
"""
return u"{0}/{1}".format(self.container_url, name)
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple;
the first being an empty list of directories (not available
for quick-listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple
of lists; the first item being directories, the second item
being files.
"""
dirs = set()
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
name = name[path_len:]
slash = name[1:-1].find("/") + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
class CumulusStaticStorage(CumulusStorage):
"""
Subclasses CumulusStorage to automatically set the container
to the one specified in CUMULUS["STATIC_CONTAINER"]. This provides
the ability to specify a separate storage backend for Django's
collectstatic command.
To use, make sure CUMULUS["STATIC_CONTAINER"] is set to something other
than CUMULUS["CONTAINER"]. Then, tell Django's staticfiles app by setting
STATICFILES_STORAGE = "cumulus.storage.CumulusStaticStorage".
"""
container_name = CUMULUS["STATIC_CONTAINER"]
container_uri = CUMULUS["STATIC_CONTAINER_URI"]
container_ssl_uri = CUMULUS["STATIC_CONTAINER_SSL_URI"]
class ThreadSafeCumulusStorage(CumulusStorage):
"""
Extends CumulusStorage to make it mostly thread safe.
As long as you do not pass container or cloud objects between
threads, you will be thread safe.
Uses one connection/container per thread.
"""
def __init__(self, *args, **kwargs):
super(ThreadSafeCumulusStorage, self).__init__(*args, **kwargs)
import threading
self.local_cache = threading.local()
def _get_connection(self):
if not hasattr(self.local_cache, "connection"):
super(ThreadSafeSwiftclientStorage, self)._get_connection()
self.local_cache.connection = connection
return self.local_cache.connection
connection = property(_get_connection, CumulusStorage._set_connection)
def _get_container(self):
if not hasattr(self.local_cache, "container"):
container = self.connection.create_container(self.container_name)
self.local_cache.container = container
return self.local_cache.container
container = property(_get_container, CumulusStorage._set_container)
class SwiftclientStorage(CumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStorage instead.", DeprecationWarning)
super(SwiftclientStorage, self).__init__()
class SwiftclientStaticStorage(CumulusStaticStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStaticStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStaticStorage instead.", DeprecationWarning)
super(SwiftclientStaticStorage, self).__init__()
class ThreadSafeSwiftclientStorage(ThreadSafeCumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("ThreadSafeSwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use ThreadSafeCumulusStorage instead.", DeprecationWarning)
super(ThreadSafeSwiftclientStorage, self).__init__()
| 34.72
| 111
| 0.628168
|
5457a1983edba1eceaeb6934cf39fff9fad03f32
| 2,578
|
py
|
Python
|
lib/googlecloudsdk/command_lib/compute/http_health_checks/flags.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/compute/http_health_checks/flags.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/compute/http_health_checks/flags.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the compute http-health-checks commands."""
from googlecloudsdk.command_lib.compute import completers as compute_completers
from googlecloudsdk.command_lib.compute import flags as compute_flags
DEFAULT_LIST_FORMAT = """\
table(
name,
host,
port,
requestPath
)"""
def HttpHealthCheckArgument(required=True, plural=False):
return compute_flags.ResourceArgument(
resource_name='HTTP health check',
completer=compute_completers.HttpHealthChecksCompleter,
plural=plural,
required=required,
global_collection='compute.httpHealthChecks')
def HttpHealthCheckArgumentForTargetPool(action, required=True):
return compute_flags.ResourceArgument(
resource_name='http health check',
name='--http-health-check',
completer=compute_completers.HttpHealthChecksCompleter,
plural=False,
required=required,
global_collection='compute.httpHealthChecks',
short_help=('Specifies an HTTP health check object to {0} the '
'target pool.'.format(action)))
def HttpHealthCheckArgumentForTargetPoolCreate(required=True):
return compute_flags.ResourceArgument(
resource_name='http health check',
name='--http-health-check',
completer=compute_completers.HttpHealthChecksCompleter,
plural=False,
required=required,
global_collection='compute.httpHealthChecks',
short_help=(
'Specifies HttpHealthCheck to determine the health of instances '
'in the pool.'),
detailed_help="""\
Specifies an HTTP health check resource to use to determine the health
of instances in this pool. If no health check is specified, traffic will
be sent to all instances in this target pool as if the instances
were healthy, but the health status of this pool will appear as
unhealthy as a warning that this target pool does not have a health
check.
""")
| 37.362319
| 80
| 0.726532
|
f3363c2d8952dca6c59f93b5240d050900b1322a
| 989
|
py
|
Python
|
selfdrive/controls/lib/fingerprints.py
|
skynetera/openpilot
|
a7e099c946800c7a8b60c47678801d9a95f95549
|
[
"MIT"
] | 4
|
2017-02-17T07:02:37.000Z
|
2022-03-17T13:50:00.000Z
|
selfdrive/controls/lib/fingerprints.py
|
mjiujiang/openpilot
|
a7e099c946800c7a8b60c47678801d9a95f95549
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/fingerprints.py
|
mjiujiang/openpilot
|
a7e099c946800c7a8b60c47678801d9a95f95549
|
[
"MIT"
] | 3
|
2017-10-12T23:31:24.000Z
|
2022-01-26T09:59:28.000Z
|
fingerprints = {
"ACURA ILX 2016 ACURAWATCH PLUS": {
1024L: 5, 513L: 5, 1027L: 5, 1029L: 8, 929L: 4, 1057L: 5, 777L: 8, 1034L: 5, 1036L: 8, 398L: 3, 399L: 7, 145L: 8, 660L: 8, 985L: 3, 923L: 2, 542L: 7, 773L: 7, 800L: 8, 432L: 7, 419L: 8, 420L: 8, 1030L: 5, 422L: 8, 808L: 8, 428L: 8, 304L: 8, 819L: 7, 821L: 5, 57L: 3, 316L: 8, 545L: 4, 464L: 8, 1108L: 8, 597L: 8, 342L: 6, 983L: 8, 344L: 8, 804L: 8, 1039L: 8, 476L: 4, 892L: 8, 490L: 8, 1064L: 7, 882L: 2, 884L: 7, 887L: 8, 888L: 8, 380L: 8, 1365L: 5
},
"HONDA CIVIC 2016 TOURING": {
1024L: 5, 513L: 5, 1027L: 5, 1029L: 8, 777L: 8, 1036L: 8, 1039L: 8, 1424L: 5, 401L: 8, 148L: 8, 662L: 4, 985L: 3, 795L: 8, 773L: 7, 800L: 8, 545L: 6, 420L: 8, 806L: 8, 808L: 8, 1322L: 5, 427L: 3, 428L: 8, 304L: 8, 432L: 7, 57L: 3, 450L: 8, 929L: 8, 330L: 8, 1302L: 8, 464L: 8, 1361L: 5, 1108L: 8, 597L: 8, 470L: 2, 344L: 8, 804L: 8, 399L: 7, 476L: 7, 1633L: 8, 487L: 4, 892L: 8, 490L: 8, 493L: 5, 884L: 8, 891L: 8, 380L: 8, 1365L: 5
}
}
| 109.888889
| 453
| 0.566229
|
0fa2be26d83071c739a5fbf971bf3b5a373527b5
| 493
|
py
|
Python
|
flask_app_8_machine_learning_plots_dataframes/forms.py
|
tagler/Data_Science_Flask_Tutorials_Python_HTML_CSS
|
3cc0aae5825048d7b84cfb636add3270f0f410a1
|
[
"MIT"
] | null | null | null |
flask_app_8_machine_learning_plots_dataframes/forms.py
|
tagler/Data_Science_Flask_Tutorials_Python_HTML_CSS
|
3cc0aae5825048d7b84cfb636add3270f0f410a1
|
[
"MIT"
] | null | null | null |
flask_app_8_machine_learning_plots_dataframes/forms.py
|
tagler/Data_Science_Flask_Tutorials_Python_HTML_CSS
|
3cc0aae5825048d7b84cfb636add3270f0f410a1
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, FloatField, SubmitField
from wtforms.validators import DataRequired
class MyForm(FlaskForm):
sepal_length = FloatField('Sepal Length:', validators=[DataRequired()])
sepal_width = FloatField('Sepal Width:', validators=[DataRequired()])
petal_length = FloatField('Petal Length:', validators=[DataRequired()])
petal_width = FloatField('Petal Width:', validators=[DataRequired()])
submit = SubmitField('Submit')
| 49.3
| 75
| 0.760649
|
dec1d25dd0aa7b1bf32300c92a8600b0fb7be6ec
| 6,893
|
py
|
Python
|
tests/migrations/test_multidb.py
|
spapas/django
|
d6ecce683682eb67ebd7f2c6766195131ea30158
|
[
"BSD-3-Clause"
] | null | null | null |
tests/migrations/test_multidb.py
|
spapas/django
|
d6ecce683682eb67ebd7f2c6766195131ea30158
|
[
"BSD-3-Clause"
] | null | null | null |
tests/migrations/test_multidb.py
|
spapas/django
|
d6ecce683682eb67ebd7f2c6766195131ea30158
|
[
"BSD-3-Clause"
] | 1
|
2020-05-25T08:55:19.000Z
|
2020-05-25T08:55:19.000Z
|
import unittest
try:
import sqlparse
except ImportError:
sqlparse = None
from django.db import migrations, models, connection
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, model, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, model, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, model, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, model, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
| 39.388571
| 114
| 0.685623
|
8dc94c07aabb8be5e6887621fb9bb027204f8bca
| 3,950
|
py
|
Python
|
test/functional/feature_minchainwork.py
|
Cobra-Technologies-GmbH/PIVX
|
9c45238847a9afe4d6bb3fca4af5a876ed52b72f
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
Cobra-Technologies-GmbH/PIVX
|
9c45238847a9afe4d6bb3fca4af5a876ed52b72f
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
Cobra-Technologies-GmbH/PIVX
|
9c45238847a9afe4d6bb3fca4af5a876ed52b72f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import OmegacoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(OmegacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| 43.888889
| 108
| 0.702278
|
ba7c08914b15e1050e4ab7d6e3c20aea0b22fbec
| 306
|
py
|
Python
|
handlers/users/help.py
|
YoshlikMedia/Dssinnercircle
|
3811ebccedea8e47091d05509ce2867a57675285
|
[
"MIT"
] | null | null | null |
handlers/users/help.py
|
YoshlikMedia/Dssinnercircle
|
3811ebccedea8e47091d05509ce2867a57675285
|
[
"MIT"
] | null | null | null |
handlers/users/help.py
|
YoshlikMedia/Dssinnercircle
|
3811ebccedea8e47091d05509ce2867a57675285
|
[
"MIT"
] | null | null | null |
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandHelp
from data.text import text
from filters import IsPrivate
from loader import dp
@dp.message_handler(IsPrivate(), CommandHelp())
async def bot_help(message: types.Message):
await message.answer(text['help_button'])
| 23.538462
| 58
| 0.800654
|
9aa315921fac95865e5446d962323cfd750ca2bd
| 2,298
|
py
|
Python
|
sirepo/pkcli/elegant.py
|
njsmith/sirepo
|
82bdc848899a8d7bcd5e1ae73f91ca37d88f647c
|
[
"Apache-2.0"
] | 1
|
2019-06-06T08:08:11.000Z
|
2019-06-06T08:08:11.000Z
|
sirepo/pkcli/elegant.py
|
yeeon/sirepo
|
081595df256d40fbc7959614689d64ad2bc745d4
|
[
"Apache-2.0"
] | null | null | null |
sirepo/pkcli/elegant.py
|
yeeon/sirepo
|
081595df256d40fbc7959614689d64ad2bc745d4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Wrapper to run elegant from the command line.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import mpi
from sirepo import simulation_db
from sirepo.template import elegant_common
from sirepo.template import template_common
from sirepo.template.elegant import save_report_data, parse_elegant_log, ELEGANT_LOG_FILE
import py.path
def run(cfg_dir):
"""Run elegant in ``cfg_dir``
The files in ``cfg_dir`` must be configured properly.
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
try:
_run_elegant(bunch_report=True)
except Exception as e:
err = parse_elegant_log(py.path.local(cfg_dir))
if not err:
err = ['A server error occurred']
simulation_db.write_result({
'error': err[0],
})
save_report_data(simulation_db.read_json(template_common.INPUT_BASE_NAME), py.path.local(cfg_dir))
def run_background(cfg_dir):
"""Run elegant as a background task
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(with_mpi=True);
simulation_db.write_result({})
def _run_elegant(bunch_report=False, with_mpi=False):
exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
pkio.write_text('elegant.lte', lattice_file)
ele = 'elegant.ele'
pkio.write_text(ele, elegant_file)
kwargs = {
'output': ELEGANT_LOG_FILE,
'env': elegant_common.subprocess_env(),
}
try:
#TODO(robnagler) Need to handle this specially, b/c different binary
if execution_mode == 'parallel' and with_mpi and mpi.cfg.cores > 1:
mpi.run_program(['Pelegant', ele], **kwargs)
else:
pksubprocess.check_call_with_signals(['elegant', ele], msg=pkdlog, **kwargs)
except Exception as e:
# ignore elegant failures - errors will be parsed from the log
pass
| 33.304348
| 106
| 0.678416
|
bf01b909dc94bfe1569e71c6e30585f70c50a1be
| 229
|
py
|
Python
|
examples/pup/hub_technichub/system_shutdown.py
|
TheVinhLuong102/pybricks-api
|
1259d5d33acb41b383445a4b1776b38084efb481
|
[
"MIT"
] | 51
|
2020-04-02T10:03:45.000Z
|
2022-03-27T23:49:39.000Z
|
examples/pup/hub_technichub/system_shutdown.py
|
LEGO-Robotics/Pybricks-API
|
1259d5d33acb41b383445a4b1776b38084efb481
|
[
"MIT"
] | 77
|
2020-03-22T17:32:14.000Z
|
2022-03-28T18:02:43.000Z
|
examples/pup/hub_technichub/system_shutdown.py
|
LEGO-Robotics/Pybricks-API
|
1259d5d33acb41b383445a4b1776b38084efb481
|
[
"MIT"
] | 25
|
2020-03-18T23:35:17.000Z
|
2022-01-01T12:52:01.000Z
|
from pybricks.hubs import TechnicHub
from pybricks.tools import wait
# Initialize the hub.
hub = TechnicHub()
# Say goodbye and give some time to send it.
print("Goodbye!")
wait(100)
# Shut the hub down.
hub.system.shutdown()
| 17.615385
| 44
| 0.746725
|
62387c6d699147752416f40b344c61ef00f39e46
| 6,992
|
py
|
Python
|
dohq_teamcity/models/investigation.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 23
|
2018-10-19T07:28:45.000Z
|
2021-11-12T12:46:09.000Z
|
dohq_teamcity/models/investigation.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 31
|
2018-10-16T05:53:11.000Z
|
2021-09-09T14:44:14.000Z
|
dohq_teamcity/models/investigation.py
|
DenKoren/teamcity
|
69acb4d1402c316129b4602882a9cce2d55cf926
|
[
"MIT"
] | 12
|
2018-10-28T23:00:17.000Z
|
2021-09-07T12:07:13.000Z
|
# coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.comment import Comment # noqa: F401,E501
# from dohq_teamcity.models.problem_scope import ProblemScope # noqa: F401,E501
# from dohq_teamcity.models.problem_target import ProblemTarget # noqa: F401,E501
# from dohq_teamcity.models.resolution import Resolution # noqa: F401,E501
# from dohq_teamcity.models.user import User # noqa: F401,E501
class Investigation(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'state': 'str',
'href': 'str',
'assignee': 'User',
'assignment': 'Comment',
'scope': 'ProblemScope',
'target': 'ProblemTarget',
'resolution': 'Resolution',
'responsible': 'User'
}
attribute_map = {
'id': 'id',
'state': 'state',
'href': 'href',
'assignee': 'assignee',
'assignment': 'assignment',
'scope': 'scope',
'target': 'target',
'resolution': 'resolution',
'responsible': 'responsible'
}
def __init__(self, id=None, state=None, href=None, assignee=None, assignment=None, scope=None, target=None, resolution=None, responsible=None, teamcity=None): # noqa: E501
"""Investigation - a model defined in Swagger""" # noqa: E501
self._id = None
self._state = None
self._href = None
self._assignee = None
self._assignment = None
self._scope = None
self._target = None
self._resolution = None
self._responsible = None
self.discriminator = None
if id is not None:
self.id = id
if state is not None:
self.state = state
if href is not None:
self.href = href
if assignee is not None:
self.assignee = assignee
if assignment is not None:
self.assignment = assignment
if scope is not None:
self.scope = scope
if target is not None:
self.target = target
if resolution is not None:
self.resolution = resolution
if responsible is not None:
self.responsible = responsible
super(Investigation, self).__init__(teamcity=teamcity)
@property
def id(self):
"""Gets the id of this Investigation. # noqa: E501
:return: The id of this Investigation. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Investigation.
:param id: The id of this Investigation. # noqa: E501
:type: str
"""
self._id = id
@property
def state(self):
"""Gets the state of this Investigation. # noqa: E501
:return: The state of this Investigation. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Investigation.
:param state: The state of this Investigation. # noqa: E501
:type: str
"""
self._state = state
@property
def href(self):
"""Gets the href of this Investigation. # noqa: E501
:return: The href of this Investigation. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Investigation.
:param href: The href of this Investigation. # noqa: E501
:type: str
"""
self._href = href
@property
def assignee(self):
"""Gets the assignee of this Investigation. # noqa: E501
:return: The assignee of this Investigation. # noqa: E501
:rtype: User
"""
return self._assignee
@assignee.setter
def assignee(self, assignee):
"""Sets the assignee of this Investigation.
:param assignee: The assignee of this Investigation. # noqa: E501
:type: User
"""
self._assignee = assignee
@property
def assignment(self):
"""Gets the assignment of this Investigation. # noqa: E501
:return: The assignment of this Investigation. # noqa: E501
:rtype: Comment
"""
return self._assignment
@assignment.setter
def assignment(self, assignment):
"""Sets the assignment of this Investigation.
:param assignment: The assignment of this Investigation. # noqa: E501
:type: Comment
"""
self._assignment = assignment
@property
def scope(self):
"""Gets the scope of this Investigation. # noqa: E501
:return: The scope of this Investigation. # noqa: E501
:rtype: ProblemScope
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this Investigation.
:param scope: The scope of this Investigation. # noqa: E501
:type: ProblemScope
"""
self._scope = scope
@property
def target(self):
"""Gets the target of this Investigation. # noqa: E501
:return: The target of this Investigation. # noqa: E501
:rtype: ProblemTarget
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this Investigation.
:param target: The target of this Investigation. # noqa: E501
:type: ProblemTarget
"""
self._target = target
@property
def resolution(self):
"""Gets the resolution of this Investigation. # noqa: E501
:return: The resolution of this Investigation. # noqa: E501
:rtype: Resolution
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""Sets the resolution of this Investigation.
:param resolution: The resolution of this Investigation. # noqa: E501
:type: Resolution
"""
self._resolution = resolution
@property
def responsible(self):
"""Gets the responsible of this Investigation. # noqa: E501
:return: The responsible of this Investigation. # noqa: E501
:rtype: User
"""
return self._responsible
@responsible.setter
def responsible(self, responsible):
"""Sets the responsible of this Investigation.
:param responsible: The responsible of this Investigation. # noqa: E501
:type: User
"""
self._responsible = responsible
| 25.705882
| 176
| 0.586241
|
a139ae98e8a97536dd827986a1f05ca730dc5251
| 963
|
py
|
Python
|
knownly/console/migrations/0007_auto_20150713_1558.py
|
dwightgunning/knownly
|
55a3f82887dca1ff94723e3272ef79ed5f2d0eb2
|
[
"MIT"
] | 2
|
2017-11-21T20:24:01.000Z
|
2018-12-24T04:32:31.000Z
|
knownly/console/migrations/0007_auto_20150713_1558.py
|
dwightgunning/knownly
|
55a3f82887dca1ff94723e3272ef79ed5f2d0eb2
|
[
"MIT"
] | 2
|
2020-06-05T18:05:19.000Z
|
2021-06-10T20:04:02.000Z
|
knownly/console/migrations/0007_auto_20150713_1558.py
|
dwightgunning/knownly
|
55a3f82887dca1ff94723e3272ef79ed5f2d0eb2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('console', '0006_auto_20150713_1343'),
]
operations = [
migrations.AddField(
model_name='archiveddropboxsite',
name='date_modified',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='archiveddropboxsite',
name='dropbox_hash',
field=models.CharField(max_length=64, blank=True),
),
migrations.AddField(
model_name='dropboxsite',
name='date_modified',
field=models.DateTimeField(null=True, blank=True),
),
migrations.AddField(
model_name='dropboxsite',
name='dropbox_hash',
field=models.CharField(max_length=64, blank=True),
),
]
| 27.514286
| 62
| 0.588785
|
fedd7825f644bda85bb32e6222ea8a436928d3de
| 949
|
py
|
Python
|
photo_booth.py
|
nasreddino/faceAttendance
|
baecad8bc7e6ae98003552aea02fd05fa21017f3
|
[
"MIT"
] | null | null | null |
photo_booth.py
|
nasreddino/faceAttendance
|
baecad8bc7e6ae98003552aea02fd05fa21017f3
|
[
"MIT"
] | null | null | null |
photo_booth.py
|
nasreddino/faceAttendance
|
baecad8bc7e6ae98003552aea02fd05fa21017f3
|
[
"MIT"
] | null | null | null |
# USAGE
# python photo_booth.py --output output
# import the necessary packages
from __future__ import print_function
from pyimagesearch.photoboothapp import PhotoBoothApp
from imutils.video import VideoStream
import argparse
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True,
help="path to output directory to store snapshots")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-n", "--name", required=True,
help="Fill FirstNmae Entry to label image ")
args = vars(ap.parse_args())
# initialize the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# start the app
pba = PhotoBoothApp(vs, args["output"], args["name"])
pba.root.mainloop()
| 29.65625
| 67
| 0.748156
|
3340d8324072f679b4ac570eb34ade269e661621
| 414
|
py
|
Python
|
saas/system/api/resource/backend-framework/webpy/tesla-faas/examples/hello/handlers/base_test_handler.py
|
harry-xiaomi/SREWorks
|
e85c723ff15d2c9739d4d240be449b00b6db096d
|
[
"Apache-2.0"
] | 1
|
2022-03-22T01:09:10.000Z
|
2022-03-22T01:09:10.000Z
|
saas/system/api/resource/backend-framework/webpy/tesla-faas/examples/hello/handlers/base_test_handler.py
|
Kwafoor/SREWorks
|
37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b
|
[
"Apache-2.0"
] | null | null | null |
saas/system/api/resource/backend-framework/webpy/tesla-faas/examples/hello/handlers/base_test_handler.py
|
Kwafoor/SREWorks
|
37a64a0a84b29c65cf6b77424bd2acd0c7b42e2b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
""" """
from container.webpy.common import BaseHandler
__author__ = 'adonis'
class TestBaseHandler(BaseHandler):
def test(self, params):
return params
def echo(self, params):
return {
'params': params,
'body': self.body,
'json_body': self.json_body,
}
def test_model(self, params):
pass
| 17.25
| 46
| 0.57971
|
41b6c2de179a1e9dd433f3c4ebf238032e4ec929
| 20,105
|
py
|
Python
|
niteshade/attack.py
|
oskarfernlund/data-poisoning-attacks
|
f45a90fe6057e1cf158ce85a824626acd5f34b64
|
[
"MIT"
] | null | null | null |
niteshade/attack.py
|
oskarfernlund/data-poisoning-attacks
|
f45a90fe6057e1cf158ce85a824626acd5f34b64
|
[
"MIT"
] | null | null | null |
niteshade/attack.py
|
oskarfernlund/data-poisoning-attacks
|
f45a90fe6057e1cf158ce85a824626acd5f34b64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Data poisoning attack strategy classes following a logical hierarchy.
"""
# =============================================================================
# IMPORTS AND DEPENDENCIES
# =============================================================================
import math
import random
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import OneHotEncoder
from sklearn.datasets import load_iris
import torch
import niteshade.utils as utils
# =============================================================================
# CLASSES
# =============================================================================
class Attacker():
""" General abstract Attacker class
"""
def __init__(self):
pass
def attack(self):
""" Abstract attack method
"""
raise NotImplementedError("attack method needs to be implemented")
class AddPointsAttacker(Attacker):
""" Abstract class for attackers that add points to the batch of data.
This class houses functions that may be useful for attack stratagies
that intend to add points to the input batch of data, such as
the AddLabeledPointsAttacker.
Args:
aggressiveness (float) : decides how many points to add
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, one_hot=False):
super().__init__()
self.aggressiveness = aggressiveness
self.one_hot = one_hot
def num_pts_to_add(self, x):
""" Calculates the number of points to add to the databatch.
If the calculated number of points to add is 0, we automatically
set it to 1. This is to help for testing purposes when batch size is
1. In practice, batch size will be much larger, and so a small
aggressiveness value will be workable, but if batch size is 1, then for
aggressiveness value but 1, the calculated points to add will be 0.
Args:
x (array) : data
Returns:
num_to_add (int) : number of points to add
"""
num_points = len(x)
num_to_add = math.floor(num_points * self.aggressiveness)
if num_to_add == 0:
num_to_add = 1
return num_to_add
def pick_data_to_add(self, x, n, point=None):
""" Add n points of data.
n points will be added, where n can be determind by _num_pts_to_add.
If point!=None, then point will be added n times. Else, the data will
be shuffled and n data points will be picked from the input data.
Args:
x (array) : data
n (int) : number of data points to add
point (datapoint) : (optional) a specific point to add
Returns:
rows (array) : data to add
"""
if point == None:
x = shuffle(x)
rows = x[:n,]
return rows
class ChangeLabelAttacker(Attacker):
""" Abstract class for attacker that can change labels.
Args:
aggressiveness (float) : decides how many points labels to change
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, one_hot=False):
super().__init__()
self.aggressiveness = aggressiveness
self.one_hot = one_hot
def num_pts_to_change(self, x):
""" Calculate the number of points to change labels for.
Args:
x (array) : data
Returns:
num_to_change (int) : number of points to change labels for
"""
num_points = len(x)
num_to_change = math.floor(num_points * self.aggressiveness)
if num_to_change == 0:
num_to_change = 1
return num_to_change
class PerturbPointsAttacker(Attacker):
""" Abstract class for attacker that can change the input data.
Args:
aggressiveness (float) : decides how many points to perturb
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, one_hot=False):
super().__init__()
self.aggressiveness = aggressiveness
self.one_hot = one_hot
class RandomAttacker(ChangeLabelAttacker):
""" Randomly change the labels of points.
Given an input batch of data and corresponding labels, use aggressiveness
to caculate how many points in the batch to poison. Then, given the set
of labels for the batch of data, obtain a new set of unique labels of
the data. Then, for the number of points to poison, pick a datapoint
and change its label to a random label in the unique set of labels.
This is a strategy that flips labels, and is inspired by ideas in the
following paper: "On Defending Against Label Flipping Attacks on Malware
Detection Systems", https://arxiv.org/abs/1908.04473.
Args:
aggressiveness (float) : decides how many points to perturb
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, one_hot=False):
super().__init__(aggressiveness, one_hot)
def attack(self, X, y):
"""Attack the input batch of data.
Args:
X (array) : data
y (array/list) : labels
Returns:
X (array) : data
y (array/list) : random labels
"""
is_tensor = False
if [type(X), type(y)] == [torch.Tensor,torch.Tensor]:
is_tensor = True
X = X.numpy()
y = y.numpy()
og_y = y # remember orignal y
if self.one_hot:
y = utils.decode_one_hot(y)
unique_labels = np.unique(y)
num_to_change = super().num_pts_to_change(X)
for i in range(num_to_change):
y[i] = np.random.choice(unique_labels, 1)
if self.one_hot:
num_classes = utils.check_num_of_classes(og_y)
y = utils.one_hot_encoding(y, num_classes)
if is_tensor:
X = torch.tensor(X)
y = torch.tensor(y)
return X, y
class AddLabeledPointsAttacker(AddPointsAttacker):
""" Adds points with a specified label.
Args:
aggressiveness (float) : decides how many points to add
label (any) : label for added points
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, label, one_hot=False):
super().__init__(aggressiveness, one_hot)
self.label = label
def attack(self, x, y):
""" Adds points to the minibatch
Add a certain number of points (based on the aggressiveness) to
the minibatch, with the y lable being as specified by the user.
Args:
x (array) : data
y (list/array) : labels
label : label attached to new points added
Returns:
x (array) : new data with added points
y (list/array) : labels of new data
"""
is_tensor = False
if [type(x), type(y)] == [torch.Tensor,torch.Tensor]:
is_tensor = True
x = x.numpy()
y = y.numpy()
og_y = y # remember orignal y
if self.one_hot:
y = utils.decode_one_hot(y)
num_to_add = super().num_pts_to_add(x)
x_add = super().pick_data_to_add(x, num_to_add)
x = np.append(x, x_add, axis=0)
y_add = np.full((num_to_add, 1), self.label)
y = np.append(y, y_add)
x, y = shuffle(x, y)
if self.one_hot:
num_classes = utils.check_num_of_classes(og_y)
y = utils.one_hot_encoding(y, num_classes)
if is_tensor:
x = torch.tensor(x)
y = torch.tensor(y)
return x, y
class LabelFlipperAttacker(ChangeLabelAttacker):
""" Flip labels based on a dictionary of information.
This is a strategy that flips labels, and is inspired by ideas in the
following paper: "On Defending Against Label Flipping Attacks on Malware
Detection Systems", https://arxiv.org/abs/1908.04473.
Args:
aggressiveness (float) : decides how many points labels to change
label_flips (dict) : defines how to flip labels
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, aggressiveness, label_flips, one_hot=False):
super().__init__(aggressiveness, one_hot)
self.label_flips = label_flips
def attack(self, x, y):
""" Method to change labels of points.
For given minibatch of data x and associated labels y, the labels in y
will be flipped based on the label_flips dict that will be specified by
the user.
Args:
x (array) : data
y (array/list) : labels
Returns:
x (array) : data
y (array/list) : flipped labels
"""
is_tensor = False
if [type(x), type(y)] == [torch.Tensor,torch.Tensor]:
is_tensor = True
x = x.numpy()
y = y.numpy()
og_y = y
if self.one_hot:
y = utils.decode_one_hot(y)
if random.random() < self.aggressiveness:
for i in range(len(y)):
element = y[i]
if self.one_hot:
element = element[0]
if element in self.label_flips:
y[i] = self.label_flips[element]
if self.one_hot:
num_classes = utils.check_num_of_classes(og_y)
y = utils.one_hot_encoding(y, num_classes)
if is_tensor:
x = torch.tensor(x)
y = torch.tensor(y)
return x, y
class BrewPoison(PerturbPointsAttacker):
"""Perturb points while minimising detectability.
Given a batch of input data and corresponding labels, the user chooses
which label to target. Lets take the example of MNIST, and say the user
targets the label 1. Then, all points in the batch with label 1 will be
identified. Aggressiveness helps determine the maximum number of points
that can be perturbed, ie, poison_budget. So, poison_budget number of
points are identified from the set of points with label 1.
A random perturbation is initialised in the range (0,1). However, the
data probably is not normalised to this range. For image data, the data
is likely to be in the range of (1, 255). So, after initialising a
perturbation, it is multiplied by the max of input data to scale it up.The
perturbation is applied to the datapoints that are to bne poisoned. Then,
using the model, a prediction is made. If the perturbed points are able to
cause a misclassification, ie the model predicts the label to not be 1,
then the infinity norm of the perturbation is calculated, and a new,
'smaller' perturbation is initialised by sampling between
(0, alpha*inf_norm), where inf_norm is the infinity norm of the previous
perturbation. The perturbation is then applied to the orignal points to be
poisoned, ie, now we have a set of perturbed points, but which is more
similar to the unperturbed points, and we use the model to predict again.
If instead, there is no misclassification, ie, the predicted label is 1,
then we return the unperturbed set or previously successful perturbed set
that was able to cause a misclassification.
This is repeated for either M optimization steps or until the perturbation
is unable to cause a misclassification. The perturbed points then replace
the orignal points in the batch.
For such an attacker which makes use of a model and its predictions to
poison, it would make sense to be using a model that has already been
pre-trained. The user may use a pretrained or an untrained model. In the
case of an untrained model (or otherwise), the user has the ability to
implement a delay to BrewPoison, so as to allow the model to train for
a few episodes without the attacker intervening, thus simulating a
pretrained model. This is done by passing in the total_eps and start_ep
parameters. Here, for a 20 episode run where the attacker should poison
in the last 10 episodes, the user should set total_eps=20 and start_ep=10.
This strategy is not a direct implementation, but it is inspired by the
following paper: "Witches' Brew: Industrial Scale Data Poisoning via
Gradient Matching", https://arxiv.org/abs/2009.02276.
Args:
target (label) : label to use as a target for misclassification
M (int) : number of optimization steps for perturbation
aggressiveness (float) : determine max number of points to poison
alpha (float) : perturbation reduction parameter
start_ep (int) : number of episode after which attacker will poison
total_eps (int) : total number of eps in the simulation
one_hot (bool) : tells if labels are one_hot encoded or not
"""
def __init__(self, target, M=10, aggressiveness=0.1, alpha = 0.8,
start_ep=10, total_eps=20, one_hot=False):
self.target = target
self.M = M
self.aggressiveness = aggressiveness
self.alpha = alpha
self.start_ep = start_ep
self.total_eps = total_eps
self.one_hot = one_hot
self.curr_ep = 0
def apply_pert(self, selected_X, pert):
"""Apply the perturbation to a list of inputs.
Args:
selected_X (list) : list of tensors to perturb
pert (torch.tensor) : tensor used to perturb
Returns:
perturbed_X (list) : list of perturbed tensors
"""
perturbed_X = []
for tensor in selected_X:
perturbed_X.append(tensor + pert)
return perturbed_X
def get_new_pert(self, pert, alpha, X):
"""Initialise a new perturbation using the previous perturbation.
Given a perturbation, calculate the infinity norm of the perturbation,
then sample a new perturbation, with the maximum value being
alpha*infinity norm.
Args:
pert (tensor) : tensor to determine infinity norm
alpha (float) : Used to limit inf norm for max of new_pert
X (tensor) : tensor to use for shaping the pert
Returns:
new_pert (tensor) : new pert tensor limited by alpha and pert
"""
inf_norm = torch.max(torch.abs(pert.reshape(-1,1)))
init_pert_shape = torch.FloatTensor(X.shape[2:])
sample_pert = init_pert_shape.uniform_(0, alpha*inf_norm)
new_pert = sample_pert.repeat(X.shape[1], 1, 1)
return new_pert
def inc_reset_ep(self, curr_ep, total_eps):
"""Increase or reset the current episode number back to 0.
Increase the current episode number by 1 or reset it.
Reset needed since the attacker is initialised only once, and
so when we add to the attribute curr_ep, it carries ahead
through simulations. So, when running two simulations, this function
will reset the attribute to 0 before the next simulation starts.
Args:
curr_ep (int) : current episode number
total_eps (int) : total number of episodes
Returns:
curr_ep (int) : current episode number
"""
curr_ep += 1
if curr_ep == total_eps:
curr_ep = 0
return curr_ep
def attack(self, X, y, model):
"""Attacks batch of input data by perturbing.
Args:
X (array) : data
y (array/list) : labels
Returns:
X (array) : data
y (array/list) : flipped labels
"""
if self.curr_ep < self.start_ep:
# increase current episode
self.curr_ep = self.inc_reset_ep(self.curr_ep, self.total_eps)
return X, y
else:
# keep track of orignal labels for encoding
og_y = y
# decode if needed
if self.one_hot:
y = utils.decode_one_hot(y)
# convert to tensors if needed
was_ndarray = False
if [type(X), type(y)] != [torch.Tensor,torch.Tensor]:
X = torch.tensor(X)
y = torch.tensor(y)
was_ndarray = True
# initialise points to be poisoned
poison_budget = int(len(X) * self.aggressiveness)
idxs = []
for i in range(len(y)):
if y[i] == self.target:
idxs.append(i)
poison_budget = min(poison_budget, len(idxs))
attacked_idxs = random.sample(idxs, poison_budget)
selected_y = [y[i] for i in attacked_idxs]
selected_X = [X[i] for i in attacked_idxs]
# initialise perturbation
perturbation = torch.rand(X.shape[2:]).repeat(X.shape[1], 1, 1)
# rescale to range of X
perturbation = torch.mul(perturbation, torch.max(X))
# optimization loop
i = 0
new_pert = perturbation
old_pert = torch.zeros(X.shape[2:]).repeat(X.shape[1], 1, 1)
perturbed_X = self.apply_pert(selected_X, new_pert)
while i<self.M:
# test result
point = perturbed_X[0]
# reshape into 4d tensor with batchsize = 1
test_point = point.reshape(1, point.shape[0], point.shape[1], point.shape[2])
model.eval()
with torch.no_grad():
result = torch.argmax(model.forward(test_point))
if result == selected_y[0]:
perturbed_X = self.apply_pert(selected_X, old_pert)
break
else:
old_pert = new_pert
new_pert = self.get_new_pert(old_pert, self.alpha, X)
i += 1
perturbed_X = self.apply_pert(selected_X, new_pert)
# replace points in X with points in perturbed_X
nth_point = 0
for index in attacked_idxs:
X[index] = perturbed_X[nth_point]
nth_point += 1
# convert to ndarray if needed
if was_ndarray:
X = X.numpy()
y = y.numpy()
# encode if needed
if self.one_hot:
num_classes = utils.check_num_of_classes(og_y)
y = utils.one_hot_encoding(y, num_classes)
# increase current episode
self.curr_ep = self.inc_reset_ep(self.curr_ep, self.total_eps)
return X, y
# =============================================================================
# MAIN ENTRY POINT
# =============================================================================
if __name__ == "__main__":
pass
| 35.584071
| 93
| 0.566178
|
3a55c7e2a76cf29d964e39b9d75f450e0b7a62e1
| 1,272
|
py
|
Python
|
setup.py
|
campenr/FileExplorer
|
49b268c0996de287319f5fbe9ac8ce2ef31b0ed5
|
[
"BSD-3-Clause"
] | 1
|
2015-07-06T17:37:59.000Z
|
2015-07-06T17:37:59.000Z
|
setup.py
|
campenr/dirbrowser
|
49b268c0996de287319f5fbe9ac8ce2ef31b0ed5
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
campenr/dirbrowser
|
49b268c0996de287319f5fbe9ac8ce2ef31b0ed5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Setup.py for dirbrowser."""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="dirbrowser",
version="1.0b2",
description="Command line based directory browser",
long_description=long_description,
url="https://github.com/campenr/dirbrowser",
author="Richard Campen",
author_email="richard@campen.co",
license="BSD License",
# TODO add more classifiers (e.g. platform)
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: User Interfaces",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords="directory browser interface",
packages=find_packages(),
include_package_data=True
# TODO add entry into scripts folder
)
| 27.06383
| 63
| 0.664308
|
eeadd8debf7b07deb2c6ccbcdc03e70311764ec8
| 4,688
|
py
|
Python
|
genomic_data_service/rnaseq/tests/test_rnaseq_gene.py
|
ENCODE-DCC/genomic-data-service
|
954017a5bcc5f448fbe2867768186df5e066c67c
|
[
"MIT"
] | 3
|
2020-10-26T02:15:55.000Z
|
2022-01-26T18:39:09.000Z
|
genomic_data_service/rnaseq/tests/test_rnaseq_gene.py
|
ENCODE-DCC/genomic-data-service
|
954017a5bcc5f448fbe2867768186df5e066c67c
|
[
"MIT"
] | 3
|
2021-08-17T02:01:54.000Z
|
2022-03-30T17:14:02.000Z
|
genomic_data_service/rnaseq/tests/test_rnaseq_gene.py
|
ENCODE-DCC/genomic-data-service
|
954017a5bcc5f448fbe2867768186df5e066c67c
|
[
"MIT"
] | 1
|
2022-03-24T21:15:34.000Z
|
2022-03-24T21:15:34.000Z
|
import pytest
def test_rnaseq_gene_init():
from genomic_data_service.rnaseq.domain.gene import Gene
gene = Gene({})
assert gene.props == {}
assert isinstance(gene, Gene)
def test_rnaseq_gene_extract_ensembl_ids(raw_human_genes):
from genomic_data_service.rnaseq.domain.gene import Gene
gene = Gene(raw_human_genes[0])
gene._extract_ensembl_ids()
assert gene._ensembl_ids == [
'ENSG00000224939',
]
gene.props['dbxrefs'].append('ENSEMBL:ENSG00000221650')
gene._extract_ensembl_ids()
assert gene._ensembl_ids == [
'ENSG00000224939',
'ENSG00000221650',
]
def test_rnaseq_gene_extract_gene_properties(raw_human_genes):
from genomic_data_service.rnaseq.domain.gene import Gene
gene = Gene(raw_human_genes[0])
gene._extract_gene_properties()
assert gene._gene_properties == {
'geneid': '100302691',
'name': 'long intergenic non-protein coding RNA 184',
'symbol': 'LINC00184',
'synonyms': ['HANC', 'NCRNA00184'],
'title': 'LINC00184 (Homo sapiens)',
'@id': '/genes/100302691/'
}
def test_rnaseq_gene_by_ensembl_ids(raw_human_genes):
from genomic_data_service.rnaseq.domain.gene import Gene
gene = Gene(raw_human_genes[0])
gene_by_ensembl_ids = list(gene.by_ensembl_ids())
assert gene_by_ensembl_ids == [
(
'ENSG00000224939',
{
'geneid': '100302691',
'name': 'long intergenic non-protein coding RNA 184',
'symbol': 'LINC00184',
'synonyms': ['HANC', 'NCRNA00184'],
'title': 'LINC00184 (Homo sapiens)',
'@id': '/genes/100302691/'
}
)
]
gene.props['dbxrefs'].append('ENSEMBL:ENSG00000221650')
gene_by_ensembl_ids = list(gene.by_ensembl_ids())
assert gene_by_ensembl_ids == [
(
'ENSG00000224939',
{
'geneid': '100302691',
'name': 'long intergenic non-protein coding RNA 184',
'symbol': 'LINC00184',
'synonyms': ['HANC', 'NCRNA00184'],
'title': 'LINC00184 (Homo sapiens)',
'@id': '/genes/100302691/',
}
),
(
'ENSG00000221650',
{
'geneid': '100302691',
'name': 'long intergenic non-protein coding RNA 184',
'symbol': 'LINC00184',
'synonyms': ['HANC', 'NCRNA00184'],
'title': 'LINC00184 (Homo sapiens)',
'@id': '/genes/100302691/',
}
)
]
def test_rnaseq_gene_get_genes_by_ensembl_id(human_genes):
from genomic_data_service.rnaseq.domain.gene import get_genes_by_ensembl_id
genes_by_ensembl_id = get_genes_by_ensembl_id(human_genes)
assert genes_by_ensembl_id == {
'ENSG00000224939': {
'@id': '/genes/100302691/',
'geneid': '100302691',
'name': 'long intergenic non-protein coding RNA 184',
'symbol': 'LINC00184',
'synonyms': ['HANC', 'NCRNA00184'],
'title': 'LINC00184 (Homo sapiens)'
},
'ENSG00000283857': {
'@id': '/genes/100302145/',
'geneid': '100302145',
'name': 'microRNA 1247',
'symbol': 'MIR1247',
'synonyms': ['MIRN1247', 'hsa-mir-1247', 'mir-1247'],
'title': 'MIR1247 (Homo sapiens)'
},
'ENSG00000260442': {
'@id': '/genes/100289092/',
'geneid': '100289092',
'name': 'ATP2A1 antisense RNA 1',
'symbol': 'ATP2A1-AS1',
'title': 'ATP2A1-AS1 (Homo sapiens)'
},
'ENSG00000221650': {
'@id': '/genes/100302286/',
'geneid': '100302286',
'name': 'microRNA 1267',
'symbol': 'MIR1267',
'synonyms': ['MIRN1267', 'hsa-mir-1267'],
'title': 'MIR1267 (Homo sapiens)'
},
'ENSG00000034677': {
'geneid': '25897',
'symbol': 'RNF19A',
'name': 'ring finger protein 19A, RBR E3 ubiquitin protein ligase',
'synonyms': ['DKFZp566B1346', 'RNF19', 'dorfin'],
'@id': '/genes/25897/',
'title': 'RNF19A (Homo sapiens)'
}
}
assert len(genes_by_ensembl_id) == 5
human_genes[0].props['dbxrefs'].append('ENSEMBL:ENSG00000221444')
genes_by_ensembl_id = get_genes_by_ensembl_id(human_genes)
assert len(genes_by_ensembl_id) == 6
assert (
genes_by_ensembl_id['ENSG00000224939'] == genes_by_ensembl_id['ENSG00000221444']
)
| 34.470588
| 88
| 0.556527
|
1b37d6b65c0bb339a666fdcfec4bce78a5a6d9ea
| 9,243
|
py
|
Python
|
utils/utils_activation_maps.py
|
GlowingHorse/Class-Discriminative-Vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | 2
|
2020-06-25T15:35:19.000Z
|
2020-07-08T11:14:46.000Z
|
utils/utils_activation_maps.py
|
GlowingHorse/class-discriminative-vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | 1
|
2020-06-25T15:59:54.000Z
|
2020-10-23T06:09:39.000Z
|
utils/utils_activation_maps.py
|
GlowingHorse/Class-Discriminative-Vis
|
c4dec263f13225eed8598544b46c984784953c50
|
[
"MIT"
] | null | null | null |
"""
The utils_activation_maps.py
provides all tools for handling the activation maps and
attributions for proper size or format before visualizing
"""
import numpy as np
from skimage.transform import resize
import os
from utils.utils import save_imgs, plot, resize_show, \
MatrixDecomposer, SklearnCluster
from operator import itemgetter
import umap
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
def create_root_dir(img_name, attr_class, flag1):
name_str = os.path.splitext(os.path.basename(img_name))[-2]
root_directory = './' + name_str + '/' + attr_class + '_' + flag1
if not os.path.exists(root_directory):
os.makedirs(root_directory)
return root_directory
def create_factorization_dir(root_directory, factorization_method,
no_slash_layer_name, reverse_suffix,
n_groups):
save_directory = root_directory + '/' + factorization_method + '/' \
+ no_slash_layer_name + reverse_suffix + str(n_groups)
if not os.path.exists(save_directory):
os.makedirs(save_directory)
return save_directory
def print_result_from_logit(logit_list, labels):
sorted_logit = logit_list.argsort()
pred_index = sorted_logit[-10:][::-1]
# np.argmax(logit, axis=1)
print(itemgetter(*pred_index)(labels))
print(logit_list[sorted_logit[-10:][::-1]])
def debug_show_AM_plus_img(grad_cam_list, img, model):
# Visualize the activation maps on original image
for grad_cams_backup in grad_cam_list:
sort_grad_cams_idx = np.argsort(-(grad_cams_backup.sum(0).sum(0)))
grad_cams_backup_trans = np.transpose(grad_cams_backup, (2, 0, 1))
for i_sort_grad_cams in range(sort_grad_cams_idx.shape[0]):
grad_cam_backup = grad_cams_backup[..., sort_grad_cams_idx[i_sort_grad_cams]]
grad_cam_backup = grad_cam_backup.reshape([grad_cam_backup.shape[-1], grad_cam_backup.shape[-1]])
grad_cam_backup = resize(grad_cam_backup, (model.image_shape[0], model.image_shape[1]), order=1,
mode='constant', anti_aliasing=False)
grad_cam_backup = grad_cam_backup / grad_cam_backup.max() * 255
resize_show(grad_cam_backup, xi=img)
print("image {} in all {} images".format(i_sort_grad_cams, sort_grad_cams_idx.shape[0]))
def decompose_AM_get_group_num(factorization_method, AM, thres_explained_var):
for i_n_groups in range(3, 999):
factor_model = MatrixDecomposer(i_n_groups, factorization_method)
if factorization_method == 'FactorAnalysis':
_ = factor_model.fit(AM)
spatial_factors = factor_model.transform(AM)
else:
spatial_factors = factor_model.fit_transform(AM)
score = factor_model.get_score(AM, spatial_factors)
if score > thres_explained_var:
spatial_factors = spatial_factors.transpose(2, 0, 1).astype("float32")
channel_factors = factor_model._decomposer.components_.astype("float32")
n_groups = i_n_groups
return spatial_factors, channel_factors, n_groups
def cluster_AM_get_group_num(cluster_method, AM, **kwargs):
scaler = MinMaxScaler()
AM_scaled = scaler.fit_transform(np.reshape(AM, (-1, AM.shape[-1])))
cluster_max_num = 26
inertia = np.empty(cluster_max_num)
secondDerivative = np.empty((cluster_max_num - 5))
for i_n_groups in range(cluster_max_num):
cluster_model = SklearnCluster(i_n_groups+1, cluster_method, **kwargs)
_ = cluster_model.fit_predict(AM_scaled)
inertia[i_n_groups] = cluster_model._decomposer.inertia_
for i_n_groups in range(4, cluster_max_num-1):
secondDerivative[i_n_groups-4] = inertia[i_n_groups+1] + inertia[i_n_groups-1] - 2*inertia[i_n_groups]
# plt.figure()
# plt.plot(range(cluster_max_num - 3), inertia)
n_groups = np.argmax(secondDerivative)+4
cluster_model = SklearnCluster(n_groups, cluster_method)
labels = cluster_model.fit_predict(AM)
return labels, n_groups
def decompose_AM_with_UMAP(AM, n_groups):
umap_reducer = umap.umap_.UMAP(n_components=n_groups)
grad_cam_flat = AM.reshape([-1, AM.shape[-1]])
spatial_factors = umap_reducer.fit_transform(grad_cam_flat)
spatial_factors = spatial_factors.reshape(AM.shape)
spatial_factors = spatial_factors.transpose(2, 0, 1).astype("float32")
def map_shap_attr_to_long(channel_factors, channel_attr, kept_channel_index):
channel_factors_max_index = channel_factors.argmax(axis=0)
channel_shap = np.zeros((channel_factors.shape[0], channel_attr.shape[0]))
short_index = []
long_index = []
n_groups = channel_factors.shape[0]
for channel_factors_i in range(n_groups):
short_index.append(
np.squeeze(np.argwhere(channel_factors_max_index == channel_factors_i), axis=1))
map_short_to_long_idx = \
kept_channel_index[short_index[channel_factors_i]]
long_index.append(map_short_to_long_idx)
channel_shap[channel_factors_i, map_short_to_long_idx] = \
channel_attr[map_short_to_long_idx]
return channel_factors_max_index, channel_shap, short_index, long_index, n_groups
def map_cluster_label_to_long(labels, channel_attr, kept_channel_index):
channel_factors_max_index = labels
n_groups = labels.max() + 1
channel_shap = np.zeros((n_groups, channel_attr.shape[0]))
short_index = []
long_index = []
for channel_factors_i in range(n_groups):
short_index.append(
np.squeeze(np.argwhere(channel_factors_max_index == channel_factors_i), axis=1))
map_short_to_long_idx = \
kept_channel_index[short_index[channel_factors_i]]
long_index.append(map_short_to_long_idx)
channel_shap[channel_factors_i, map_short_to_long_idx] = \
channel_attr[map_short_to_long_idx]
return channel_factors_max_index, channel_shap, short_index, long_index, n_groups
def weight_AM2spatial_factor(AM, spatial_factors, n_groups,
short_index, kept_channel_index,
channel_attr, i_grad_cam_list_L):
"""
Weighting Activation maps using feature attributions
'''
# Alternatives
AM = np.squeeze(AM)
spatial_factors = np.zeros_like(spatial_factors)
for channel_factors_i in range(n_groups):
if len(short_index[channel_factors_i]) == 0:
continue
temp = np.squeeze(AM[..., short_index[channel_factors_i]])
if len(temp.shape) == 3:
spatial_factors[channel_factors_i, ...] = np.sum(temp, axis=-1)
else:
spatial_factors[channel_factors_i, ...] = temp
'''
"""
spatial_factors = np.zeros_like(spatial_factors)
for channel_factors_i in range(n_groups):
if len(short_index[channel_factors_i]) == 0:
continue
map_short_to_long_idx = \
kept_channel_index[short_index[channel_factors_i]]
temp = np.squeeze(AM[..., short_index[channel_factors_i]])
temp = temp * channel_attr[map_short_to_long_idx]
if i_grad_cam_list_L > 0:
temp *= -1
if len(temp.shape) == 3:
spatial_factors[channel_factors_i, ...] = np.sum(temp, axis=-1)
else:
spatial_factors[channel_factors_i, ...] = temp
return spatial_factors
def weight_AM2spatial_factor2(AM, spatial_factors, channel_factors, channel_attr,
n_groups, i_grad_cam_list_L):
"""
Using feature attributions and channel factors to weight activation maps
"""
spatial_factors = np.zeros_like(spatial_factors)
for i in range(n_groups):
temp = channel_factors[i] * channel_attr[i] * AM
if i_grad_cam_list_L > 0:
temp *= -1
if len(temp.shape) == 3:
spatial_factors[i, ...] = np.sum(temp, axis=-1)
else:
spatial_factors[i, ...] = temp
return spatial_factors
def weight_AM2spatial_factor_FGSM(AM, spatial_factors, n_groups,
short_index, kept_channel_index,
channel_shap, i_grad_cam_list_L):
"""
The same with weight_AM2spatial_factor,
but apply to adversarial samples
"""
spatial_factors = np.zeros_like(spatial_factors)
for channel_factors_i in range(n_groups):
if len(short_index[channel_factors_i]) == 0:
continue
map_short_to_long_idx = \
kept_channel_index[short_index[channel_factors_i]]
temp = AM[..., short_index[channel_factors_i]]
temp = temp * channel_shap[channel_factors_i, map_short_to_long_idx]
if i_grad_cam_list_L > 0:
temp *= -1
if len(temp.shape) == 3:
spatial_factors[channel_factors_i, ...] = np.sum(temp, axis=-1)
else:
spatial_factors[channel_factors_i, ...] = temp
return spatial_factors
def get_sort_groups_with_shap_scores(channel_shap):
every_group_attr = np.sum(channel_shap, axis=1)
ns_sorted = np.argsort(-every_group_attr)
return ns_sorted
def sort_groups_features(ns_sorted, spatial_factors, channel_shap, n_groups):
every_group_attr = np.sum(channel_shap, axis=1)
every_group_attr_sorted = every_group_attr[ns_sorted]
spatial_factors = spatial_factors[ns_sorted]
channel_shap = channel_shap[ns_sorted]
for i_remove_zero_attr in range(n_groups):
if every_group_attr_sorted[i_remove_zero_attr] == 0:
spatial_factors = np.delete(spatial_factors, -1, 0)
channel_shap = np.delete(channel_shap, -1, 0)
n_groups = channel_shap.shape[0]
return every_group_attr_sorted, spatial_factors, channel_shap, n_groups
| 38.836134
| 106
| 0.730066
|
b86e8d4480f28723e2ffd0c4669fc88f5914ab7e
| 11,788
|
py
|
Python
|
gooddata-metadata-client/gooddata_metadata_client/model/declarative_users.py
|
hkad98/gooddata-python-sdk
|
64942080ecb44c2d8e914e57f7a591daa6cca205
|
[
"MIT"
] | null | null | null |
gooddata-metadata-client/gooddata_metadata_client/model/declarative_users.py
|
hkad98/gooddata-python-sdk
|
64942080ecb44c2d8e914e57f7a591daa6cca205
|
[
"MIT"
] | null | null | null |
gooddata-metadata-client/gooddata_metadata_client/model/declarative_users.py
|
hkad98/gooddata-python-sdk
|
64942080ecb44c2d8e914e57f7a591daa6cca205
|
[
"MIT"
] | null | null | null |
"""
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_metadata_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.declarative_user import DeclarativeUser
globals()['DeclarativeUser'] = DeclarativeUser
class DeclarativeUsers(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'users': ([DeclarativeUser],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'users': 'users', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, users, *args, **kwargs): # noqa: E501
"""DeclarativeUsers - a model defined in OpenAPI
Args:
users ([DeclarativeUser]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.users = users
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, users, *args, **kwargs): # noqa: E501
"""DeclarativeUsers - a model defined in OpenAPI
Args:
users ([DeclarativeUser]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.users = users
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.555957
| 124
| 0.563454
|
f237d654ebdecd8bddd9870468a65394c8886d87
| 1,570
|
py
|
Python
|
Python/bfs.py
|
radome/algorithms-and-data-structures
|
06b7dab6e5865e199a682cb52d1ed70aeb6dfcab
|
[
"Apache-2.0"
] | 1
|
2018-02-15T16:59:55.000Z
|
2018-02-15T16:59:55.000Z
|
Python/bfs.py
|
radome/algorithms_and_data_structures
|
06b7dab6e5865e199a682cb52d1ed70aeb6dfcab
|
[
"Apache-2.0"
] | null | null | null |
Python/bfs.py
|
radome/algorithms_and_data_structures
|
06b7dab6e5865e199a682cb52d1ed70aeb6dfcab
|
[
"Apache-2.0"
] | null | null | null |
"""Breadth First Search on a graph"""
from sets import Set
from linked_list import Node
class lightQueue(object):
"""Simple queue"""
def __init__(self):
self.head = None
self.tail = None
def enqueue(self, lst):
"""Enqueue a list of nodes"""
for obj in lst:
if self.head:
self.tail.nextn = Node(obj)
self.tail = self.tail.nextn
else:
self.tail = Node(obj)
self.head = self.tail
def dequeue(self):
"""Returns the head of the queue"""
if self.head:
popped = self.head
self.head = popped.nextn
return popped.data
else:
raise IndexError('Popping from empty queue')
def isEmpty(self):
"""Returns True if the the queue is empty, False otherwise"""
if self.head:
return False
else:
return True
def bfsTraverse(graphDict, start):
"""Returns a tuple with the nodes traversed in BFS order"""
queue = lightQueue()
notVisited = Set(graphDict.keys())
queue.enqueue([start])
notVisited.remove(start)
result = [start]
while not queue.isEmpty():
curNode = queue.dequeue()
neighbours = graphDict[curNode]
toSort = []
for node in neighbours:
if node in notVisited:
toSort.append(node)
notVisited.remove(node)
toSort.sort()
queue.enqueue(toSort)
result.extend(toSort)
return tuple(result)
| 26.166667
| 69
| 0.555414
|
cf23f2575939dff2b1aed4a18eb0fbd3cc17bd0e
| 46,448
|
py
|
Python
|
visualizer.py
|
call518/virtual-network-visualizer
|
dfa10c249768a82125e901f2c3357e0278b8b4ea
|
[
"Apache-2.0"
] | 3
|
2019-04-02T14:22:26.000Z
|
2021-03-16T06:52:54.000Z
|
visualizer.py
|
call518/OpenStack-Network-Visualizer
|
dfa10c249768a82125e901f2c3357e0278b8b4ea
|
[
"Apache-2.0"
] | null | null | null |
visualizer.py
|
call518/OpenStack-Network-Visualizer
|
dfa10c249768a82125e901f2c3357e0278b8b4ea
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- mode:python; coding:utf-8 -*-
### Whole Topology
## ex) python visualizer.py
### Local-VM to Local-VM
## ex) python visualizer.py --src "T:tap94d54818-a5" --dst "T:tapeee4966d-68" [--onlypath]
### Local-VM to Remote-VM
## ex) python visualizer.py --src "T:tap94d54818-a5" --dst "T:tap708a8386-2f" [--onlypath]
### Local-VM to External
## ex) python visualizer.py --src "T:tap94d54818-a5" --dst "I:eth1(pub-compute-001)" [--onlypath]
import warnings
warnings.filterwarnings("ignore")
import paramiko
import time
import sys, getopt
import json
#import socket
import networkx as nx
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import random
import operator
import re
import time
import plotly
#plotly.tools.set_credentials_file(username='your-account', api_key='your-api-key')
import plotly.plotly as py
import plotly.graph_objs as go
def exec_ssh(ssh_hostname, ssh_cmd):
SSH_USERNAME = "root"
SSH_PASSWORD = "password"
SSH_KEY_FILE = "/root/.ssh/id_rsa"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_stdin = ssh_stdout = ssh_stderr = None
try:
#ssh.connect(SSH_ADDRESS, username=SSH_USERNAME, password=SSH_PASSWORD)
ssh.connect(hostname=ssh_hostname, port=22, username=SSH_USERNAME, key_filename=SSH_KEY_FILE)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(ssh_cmd, timeout=None, bufsize=-1, get_pty=False, environment=None)
except Exception as e:
sys.stderr.write("SSH connection error: {0}".format(e))
output = ssh_stdout.read()
return output
def removeDup(src_list):
return set([tuple(sorted(item)) for item in src_list])
def xstr(value):
if value is None:
return 'NONE'
else:
return str(value)
def findDictValue(xstr, xdict):
if xstr in xdict:
return xdict[xstr]
else:
return "NONE"
def getArgs(argv):
src_node = None
dst_node = None
only_path = False
enable_fip = False
enable_plotly = False
usage_str = " [{-s|--src} <src node> {-d|--dst} <dst node> [-o|--onlypath] [-f|--fip] [-p|--plotly]"
try:
opts, args = getopt.getopt(argv, "hsdofp", ["help", "src=", "dst=", "onlypath", "fip", "plotly"])
except getopt.GetoptError:
print("# python " + sys.argv[0] + usage_str)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print("# python " + sys.argv[0] + usage_str)
sys.exit()
elif opt in ("-s", "--src"):
src_node = arg
elif opt in ("-d", "--dst"):
dst_node = arg
elif opt in ("-o", "--onlypath"):
only_path = True
elif opt in ("-f", "--fip"):
enable_fip = True
elif opt in ("-p", "--plotly"):
enable_plotly = True
if (src_node == None and dst_node != None) or (src_node != None and dst_node == None):
print("# python " + sys.argv[0] + usage_str)
sys.exit(2)
elif (src_node == None and dst_node == None) and only_path:
print("# python " + sys.argv[0] + usage_str)
sys.exit(2)
return src_node, dst_node, only_path, enable_fip, enable_plotly
def getHostnameByOvsLocalIp(ip):
for hostname, ovs_local_ip in hostnames.items():
if ip == ovs_local_ip:
return hostname
return None
#####################################################################################
if __name__ == '__main__':
## ์ต๋จ ๊ฒฝ๋ก ์กฐ์ฌ ๋์ ์ธ์
src_node, dst_node, only_path, enable_fip, enable_plotly = getArgs(sys.argv[1:])
print("\nProcessing.............")
start_time = time.time()
## ์ธ์ ์ฒดํฌ.
isSP = False
if src_node != None and dst_node != None:
isSP = True
if isSP == False:
only_path = False
###############################################
### Raw ๋ฐ์ดํฐ ์์ฑ
###############################################
result = []
result_linux_bridge = []
result_dvr_fip = []
#hostnames = (
# #"dev-r2network-001",
# "dev-r2compute-001",
# #"dev-r2compute-002",
#)
## pair of hostanem and ovs-local-ip.
#ovs_local_ip_by_hostname = {
hostnames = {
"dev-r2network-001": "10.0.42.18",
"dev-r2compute-001": "10.0.42.36",
"dev-r2compute-002": "10.0.42.37",
}
for hostname in hostnames:
## Network ํธ์คํธ์ธ์ง ์ฒดํฌ
# snat-xxx ๋ค์์คํ์ด์ค ์กด์ฌ ์ ๋ฌด๋ก ํ๋จ
# ์์ผ๋ฉด, Networkํธ์คํธ, ์์ผ๋ฉด Compute ํธ์คํธ๋ก ๋ณธ๋ค.
isNetworkHost = False
return_chk_network_node = exec_ssh(hostname, "if `ip netns | grep -q snat`; then echo network-node; fi")
if return_chk_network_node.strip() == "network-node":
isNetworkHost = True
output_bridge = exec_ssh(hostname, "ovs-vsctl -f json list br")
output_port = exec_ssh(hostname, "ovs-vsctl -f json list port")
output_interface = exec_ssh(hostname, "ovs-vsctl -f json list interface")
json_data_bridge = json.loads(output_bridge)
json_data_interface = json.loads(output_interface)
json_data_port = json.loads(output_port)
for item_interface in json_data_interface['data']:
if_hostname = hostname
if_uuid = item_interface[0][1]
if_admin_state = item_interface[1]
#if_name = "I:" + item_interface[26] + "(" + if_hostname + ")"
if_name = "I:" + item_interface[26]
if if_name.startswith("I:eth"):
if_name = if_name + "(" + hostname + ")"
if_type = item_interface[33]
if if_type in ["vxlan", "patch", "internal"]:
if_name = if_name + "(" + hostname + ")"
if_external_ids = item_interface[13][1]
if_link_state = item_interface[20]
if type(item_interface[24]) is list:
if_mtu = None
else:
if_mtu = item_interface[24]
if_ofport = item_interface[27]
if_options = item_interface[29][1]
if_other_config = item_interface[30][1]
if_statistics = item_interface[31][1]
if_status = item_interface[32][1]
## OpenStack ๋ฉํ ์ ๋ณด ๊ฒ์
if_external_ids_attached_mac = if_external_ids_iface_id = if_external_ids_iface_status = if_external_ids_vm_uuid = None
if len(if_external_ids) > 0:
if_external_ids_attached_mac = if_external_ids[0][1]
if_external_ids_iface_id = if_external_ids[1][1]
if_external_ids_iface_status = if_external_ids[2][1]
if len(if_external_ids) > 3:
if_external_ids_vm_uuid = if_external_ids[3][1]
## Options ์์ฑ ๊ฒ์
if_options_patch_peer = if_options_vxlan_df_default = if_options_vxlan_in_key = if_options_vxlan_local_ip = if_options_vxlan_out_key = if_options_vxlan_remote_ip = None
if if_type == "patch":
if_options_patch_peer = if_options[0][1]
elif if_type == "vxlan":
if_options_vxlan_df_default = if_options[0][1]
if_options_vxlan_in_key = if_options[1][1]
if_options_vxlan_local_ip = if_options[2][1]
if_options_vxlan_out_key = if_options[3][1]
if_options_vxlan_remote_ip = if_options[4][1]
## Statistics ์์ฑ ๊ฒ์
if_statistics_collisions = if_statistics_rx_bytes = if_statistics_rx_crc_err = if_statistics_rx_dropped = if_statistics_rx_errors = if_statistics_rx_frame_err = if_statistics_rx_over_err = if_statistics_rx_packets = if_statistics_tx_bytes = if_statistics_tx_dropped = if_statistics_tx_errors = if_statistics_tx_packets = None
statistics_dict = {}
for k, v in if_statistics:
statistics_dict[k] = v
if_statistics_collisions = findDictValue("collisions", statistics_dict)
if_statistics_rx_bytes = findDictValue("rx_bytes", statistics_dict)
if_statistics_rx_crc_err = findDictValue("rx_crc_err", statistics_dict)
if_statistics_rx_dropped = findDictValue("rx_dropped", statistics_dict)
if_statistics_rx_errors = findDictValue("rx_dropped", statistics_dict)
if_statistics_rx_frame_err = findDictValue("rx_frame_err", statistics_dict)
if_statistics_rx_over_err = findDictValue("rx_over_err", statistics_dict)
if_statistics_rx_packets = findDictValue("rx_packets", statistics_dict)
if_statistics_tx_bytes = findDictValue("tx_bytes", statistics_dict)
if_statistics_tx_dropped = findDictValue("tx_dropped", statistics_dict)
if_statistics_tx_errors = findDictValue("tx_errors", statistics_dict)
if_statistics_tx_packets = findDictValue("tx_packets", statistics_dict)
## Interface๊ฐ ์ํด ์๋ Port ๊ฒ์
if_port_uuid = if_port_name = tmp_port_interface_uuid = tmp_port_name = None
for item_port in json_data_port['data']:
## OVS ๋ฒ์ ์ ๋ฐ๋ฅธ ์์ฑ ์์ ์ฐจ์ด๋ก ์ธํด ๊ตฌ๋ณํจ (์์)
try:
tmp_port_interface_uuid = item_port[8][1]
except TypeError:
tmp_port_interface_uuid = item_port[9][1]
if if_uuid == tmp_port_interface_uuid:
if_port_uuid = item_port[0][1]
try:
if_port_name = "P:" + item_port[11] + "(" + hostname + ")"
except TypeError:
if_port_name = "P:" + item_port[12] + "(" + hostname + ")"
break
## Port๊ฐ ์ํด ์๋ Bridge ๊ฒ์
if_br_uuid = if_br_name = None
if if_port_uuid:
for item_bridge in json_data_bridge['data']:
tmp_br_uuid = item_bridge[0][1]
tmp_br_name = item_bridge[13]
for port in item_bridge[16][1]:
if if_port_uuid == port[1]:
if_br_uuid = tmp_br_uuid
if_br_name = "B:" + tmp_br_name + "(" + hostname + ")"
break
result.append({
"isNetworkHost": isNetworkHost,
"if_hostname": if_hostname,
"if_uuid": if_uuid,
"if_name": if_name,
"if_admin_state": if_admin_state,
"if_name": if_name,
"if_type": if_type,
"if_external_ids_attached_mac": if_external_ids_attached_mac,
"if_external_ids_iface_id": if_external_ids_iface_id,
"if_external_ids_iface_status": if_external_ids_iface_status,
"if_external_ids_vm_uuid": if_external_ids_vm_uuid,
"if_link_state": if_link_state,
"if_mtu": if_mtu,
"if_ofport": if_ofport,
"if_options_patch_peer": if_options_patch_peer,
"if_options_vxlan_df_default": if_options_vxlan_df_default,
"if_options_vxlan_in_key": if_options_vxlan_in_key,
"if_options_vxlan_local_ip": if_options_vxlan_local_ip,
"if_options_vxlan_out_key": if_options_vxlan_out_key,
"if_options_vxlan_remote_ip": if_options_vxlan_remote_ip,
"if_other_config": if_other_config,
"if_statistics_collisions": if_statistics_collisions,
"if_statistics_rx_bytes": if_statistics_rx_bytes,
"if_statistics_rx_crc_err": if_statistics_rx_crc_err,
"if_statistics_rx_dropped": if_statistics_rx_dropped,
"if_statistics_rx_errors": if_statistics_rx_errors,
"if_statistics_rx_frame_err": if_statistics_rx_frame_err,
"if_statistics_rx_over_err": if_statistics_rx_over_err,
"if_statistics_rx_packets": if_statistics_rx_packets,
"if_statistics_tx_bytes": if_statistics_tx_bytes,
"if_statistics_tx_dropped": if_statistics_tx_dropped,
"if_statistics_tx_errors": if_statistics_tx_errors,
"if_statistics_tx_packets": if_statistics_tx_packets,
"if_status": if_status,
"if_port_uuid": if_port_uuid,
"if_port_name": if_port_name,
"if_br_uuid": if_br_uuid,
"if_br_name": if_br_name
})
## Linux Bridge ์ ๋ณด ์์ง
cmd = "BR_ARRAY=(`ip link list type bridge | awk '/^[0-9]/ {print $2}' | sed -e 's/:$//g'`); num_br=1; for br in ${BR_ARRAY[@]}; do if [ $num_br -eq 1 ]; then echo '['; fi; echo '{\"'$br'\": ['; IF_ARRAY=(`ls -1 /sys/devices/virtual/net/$br/brif/`); num=1; for if in ${IF_ARRAY[@]}; do echo '\"'$if'\"'; if [ $num -lt ${#IF_ARRAY[@]} ]; then echo ','; fi; ((num++)); done; echo ']}'; if [ $num_br -lt ${#BR_ARRAY[@]} ]; then echo ','; else echo ']'; fi; ((num_br++)); done | tr '\n' ' '"
output_linux_bridge = exec_ssh(hostname, cmd)
if len(output_linux_bridge) > 0:
output_linux_bridge = "{ \"hostname\": \"" + hostname + "\", \"data\": " + output_linux_bridge + "}"
json_data_linux_bridge = json.loads(output_linux_bridge)
else:
json_data_linux_bridge = json.loads('{ "hostname": "%s", "data": [] }' % hostname)
## Linux Bridge ๋ชฉ๋ก ์์ฑ
for data in json_data_linux_bridge['data']:
for bridge, interfaces in data.items():
#print(bridge, interfaces)
result_linux_bridge.append({
"isNetworkHost": isNetworkHost,
"hostname": hostname,
"bridge_name": bridge,
"interfaces": interfaces,
})
## FIPํ์ฑํ ๋ฐ Compute ํธ์คํธ์ผ ๊ฒฝ์ฐ๋ง(snat-xxx ๋ค์์คํ์ด์ค ์กด์ฌ ์ ๋ฌด๋ก ํ๋จ), FIP์ฐ๊ฒฐ์ฑ ๋ฉํ ์ ๋ณด ์์ง
if enable_fip and not isNetworkHost:
output_dvr_fip = exec_ssh(hostname, "(echo \"[\"; for NS in `ip netns | awk '{print $1}' | egrep \"qrouter|fip\"`; do IFs=`ip netns exec $NS ip -o link show | awk '{print $2\",\"$5\",\"$17 | \"sed -e 's/:,/,/g'\"}' | egrep \"^rfp|^fpr|^fg|^qr\"`; for IF in $IFs; do IF_ARRAY=(`echo $IF | awk -F',' '{print $1,$2,$3}'`); IF_NAME_ARRAY=(`echo ${IF_ARRAY[0]} | awk -F'@' '{print $1, $2}'`); IF_NAME=${IF_NAME_ARRAY[0]}; IF_ID=`ip netns exec $NS ip -o link show $IF_NAME | awk '{print $1 | \"sed -e 's/://g'\"}'`; IF_MTU=${IF_ARRAY[1]}; IF_MAC=${IF_ARRAY[2]}; IF_PAIR_ID=`echo ${IF_NAME_ARRAY[1]} | sed -e 's/^if//g'`; echo \"{\"; echo \"\\\"if_name\\\": \\\"$IF_NAME\\\",\"; echo \"\\\"if_namespace\\\": \\\"$NS\\\",\"; echo \"\\\"if_id\\\": \\\"$IF_ID\\\",\"; echo \"\\\"if_mtu\\\": \\\"$IF_MTU\\\",\"; echo \"\\\"if_mac\\\": \\\"$IF_MAC\\\",\"; echo \"\\\"if_pair_id\\\": \\\"$IF_PAIR_ID\\\"\"; echo \"},\"; done ;done | sed -e '$s/,//'; echo \"]\")")
for item in json.loads(output_dvr_fip):
item['if_hostname'] = hostname
if_name = item['if_name']
if if_name.startswith(("qr-","fg-")):
item['if_name'] = "I:" + if_name + "(" + hostname + ")"
else:
item['if_name'] = "VP:" + if_name + "(" + hostname + ")"
result_dvr_fip.append(item)
###############################################
### ์ด๋ฏธ์ง ์์ฑ ์์
์์
###############################################
plt.figure(figsize=(10,10)) ## ์บ๋ฒ์ค ํฌ๊ธฐ ์ฆ๊ฐ
G = nx.Graph()
for interface in result:
#print("if_name: %s (%s)" % (interface['if_name'], interface['if_uuid']))
#print(" if_port_name: %s (%s)" % (interface['if_port_name'], interface['if_port_uuid']))
#print(" if_br_name: %s (%s)" % (interface['if_br_name'], interface['if_br_uuid']))
if_name = interface['if_name']
if_type = interface['if_type']
## ๋ชจ๋ Interface ํ์
-> Node๋ก ๋ฑ๋ก
G.add_node(if_name,
isNetworkHost = xstr(interface['isNetworkHost']),
if_name = xstr(interface['if_name']),
if_hostname = xstr(interface['if_hostname']),
if_uuid = xstr(interface['if_uuid']),
if_admin_state = xstr(interface['if_admin_state']),
if_type = xstr(if_type),
if_external_ids_attached_mac = xstr(interface['if_external_ids_attached_mac']),
if_external_ids_iface_id = xstr(interface['if_external_ids_iface_id']),
if_external_ids_iface_status = xstr(interface['if_external_ids_iface_status']),
if_external_ids_vm_uuid = xstr(interface['if_external_ids_vm_uuid']),
if_link_state = xstr(interface['if_link_state']),
if_mtu = xstr(interface['if_mtu']),
if_ofport = xstr(interface['if_ofport']),
if_options_patch_peer = xstr(interface['if_options_patch_peer']),
if_options_vxlan_df_default = xstr(interface['if_options_vxlan_df_default']),
if_options_vxlan_in_key = xstr(interface['if_options_vxlan_in_key']),
if_options_vxlan_local_ip = xstr(interface['if_options_vxlan_local_ip']),
if_options_vxlan_out_key = xstr(interface['if_options_vxlan_out_key']),
if_options_vxlan_remote_ip = xstr(interface['if_options_vxlan_remote_ip']),
if_other_config = xstr(interface['if_other_config']),
if_statistics_collisions = xstr(interface['if_statistics_collisions']),
if_statistics_rx_bytes = xstr(interface['if_statistics_rx_bytes']),
if_statistics_rx_crc_err = xstr(interface['if_statistics_rx_crc_err']),
if_statistics_rx_dropped = xstr(interface['if_statistics_rx_dropped']),
if_statistics_rx_errors = xstr(interface['if_statistics_rx_errors']),
if_statistics_rx_frame_err = xstr(interface['if_statistics_rx_frame_err']),
if_statistics_rx_over_err = xstr(interface['if_statistics_rx_over_err']),
if_statistics_rx_packets = xstr(interface['if_statistics_rx_packets']),
if_statistics_tx_bytes = xstr(interface['if_statistics_tx_bytes']),
if_statistics_tx_dropped = xstr(interface['if_statistics_tx_dropped']),
if_statistics_tx_errors = xstr(interface['if_statistics_tx_errors']),
if_statistics_tx_packets = xstr(interface['if_statistics_tx_packets']),
if_status = xstr(interface['if_status']),
if_port_uuid = xstr(interface['if_port_uuid']),
if_port_name = xstr(interface['if_port_name']),
if_br_uuid = xstr(interface['if_br_uuid']),
if_br_name = xstr(interface['if_br_name'])
)
## ์ธํฐํ์ด์ค <-> Bridge <-> Port ๊ฐ Edge ์ ๋ณด ์ถ๊ฐ
# Edge๋ฑ๋ก์ ํตํด, ์๋ Port/Bridge ๋
ธ๋ ์๋ ์ถ๊ฐ๋จ.
G.add_edge(interface['if_name'], interface['if_port_name'])
G.add_edge(interface['if_port_name'], interface['if_br_name'])
#print G.nodes(data=True)
#sys.exit(1)
# ์๋ ์ถ๊ฐ๋ Port/Bridge์ isNetworkHost ๋ฐ if_type ์์ฑ ์ถ๊ฐ
G.node[interface['if_port_name']]['isNetworkHost'] = interface['isNetworkHost']
G.node[interface['if_port_name']]['if_type'] = ''
G.node[interface['if_port_name']]['if_hostname'] = interface['if_hostname']
G.node[interface['if_br_name']]['isNetworkHost'] = interface['isNetworkHost']
G.node[interface['if_br_name']]['if_type'] = ''
G.node[interface['if_br_name']]['if_hostname'] = interface['if_hostname']
#print G.node['P:qr-47ac2c1a-b0(pub-compute-001)']['isNetworkHost']
#print G.nodes(data=True)
#sys.exit(1)
## VxLAN ํฐ๋ ์ฐ๊ฒฐ ๊ตฌ์ฑ
if if_type == "vxlan":
vxlan_local_ip = interface['if_options_vxlan_local_ip']
vxlan_remote_ip = interface['if_options_vxlan_remote_ip']
vxlan_local_hostname = interface['if_options_vxlan_local_ip']
vxlan_remote_hostname = interface['if_options_vxlan_remote_ip']
#print(vxlan_local_ip, vxlan_remote_ip)
#G.add_edge(interface['if_name'], interface['if_port_name'])
#print(if_name, interface['if_options'])
## Linux Bridge ์ ๋ณด 'G'์ ์ถ๊ฐ (๋
ธ๋/์ฃ์ง)
edge_VP2LB = []
edge_I2VP = []
edge_T2LB = []
for item in result_linux_bridge:
isNetworkHost = item['isNetworkHost']
hostname = item['hostname']
br_name = "LB:" + item['bridge_name']
interfaces = item['interfaces']
G.add_node(br_name)
G.node[br_name]['isNetworkHost'] = isNetworkHost
G.node[br_name]['if_type'] = ''
for interface in interfaces:
if interface.startswith("qvb"):
if_name = "VP:" + interface
if_name_ovs_pair = re.sub(r'^VP:qvb', 'I:qvo', if_name)
G.add_node(if_name)
G.node[if_name]['isNetworkHost'] = isNetworkHost
G.node[if_name]['if_type'] = ''
G.node[if_name]['if_hostname'] = hostname
G.add_edge(if_name_ovs_pair, if_name)
G.add_edge(if_name, br_name)
edge_VP2LB.append((if_name, br_name))
edge_I2VP.append((if_name_ovs_pair, if_name))
elif interface.startswith("tap"):
if_name = "T:" + interface
G.add_node(if_name)
G.node[if_name]['isNetworkHost'] = isNetworkHost
G.node[if_name]['if_type'] = ''
G.node[if_name]['if_hostname'] = hostname
G.add_edge(if_name, br_name)
edge_T2LB.append((if_name, br_name))
## VxLAN ํฐ๋ ๋งํฌ ์ ๋ณด Dictionay ์์ฑ
vxlan_link_dict = {}
for node_data in G.nodes(data=True):
if_name = node_data[0]
data_dict = node_data[1]
#print if_name, data_dict
#sys.exit(1)
if len(data_dict) > 0:
if_type = data_dict['if_type']
if if_type == "vxlan":
vxlan_local_ip = data_dict['if_options_vxlan_local_ip']
vxlan_remote_ip = data_dict['if_options_vxlan_remote_ip']
#vxlan_local_hostname = socket.gethostbyaddr(vxlan_local_ip)[0]
vxlan_local_hostname = getHostnameByOvsLocalIp(vxlan_local_ip)
#vxlan_remote_hostname = socket.gethostbyaddr(vxlan_remote_ip)[0]
vxlan_remote_hostname = getHostnameByOvsLocalIp(vxlan_remote_ip)
vxlan_link_dict[vxlan_local_hostname + "---" + vxlan_remote_hostname] = if_name
## if_type ์์ฑ์ ๋ฐ๋ฅธ Node ๋ฐ Edge ๋ชฉ๋ก ์์ฑ
## if_link_state ๊ฐ์ด Down์ธ Interface ๋
ธ๋ ๋ชฉ๋ก ์์ฑ
nodes_if_type_patch = []
nodes_if_type_vxlan = []
nodes_if_type_internal = []
nodes_if_type_internal_fg = []
nodes_if_type_normal = []
edge_if_type_patch = []
edge_if_type_vxlan = []
#nodes_if_link_state_down = []
for node_data in G.nodes(data=True):
if_name = node_data[0]
data_dict = node_data[1]
if len(data_dict) > 0:
if_type = data_dict['if_type']
#if_link_state = data_dict['if_link_state']
if if_type == "patch":
nodes_if_type_patch.append(if_name)
peer_if_hostname = data_dict['if_hostname']
peer_if_name = "I:" + data_dict['if_options_patch_peer'] + "(" + peer_if_hostname + ")"
edge_if_type_patch.append((if_name, peer_if_name))
elif if_type == "vxlan":
nodes_if_type_vxlan.append(if_name)
vxlan_local_ip = data_dict['if_options_vxlan_local_ip']
#vxlan_local_hostname = socket.gethostbyaddr(vxlan_local_ip)[0]
vxlan_local_hostname = getHostnameByOvsLocalIp(vxlan_local_ip)
vxlan_remote_ip = data_dict['if_options_vxlan_remote_ip']
#vxlan_remote_hostname = socket.gethostbyaddr(vxlan_remote_ip)[0]
vxlan_remote_hostname = getHostnameByOvsLocalIp(vxlan_remote_ip)
if vxlan_remote_hostname in hostnames:
find_key = vxlan_remote_hostname + "---" + vxlan_local_hostname
remote_if_name = vxlan_link_dict[find_key]
edge_if_type_vxlan.append((if_name, remote_if_name))
elif if_type == "internal":
if if_name.startswith("I:fg-"):
nodes_if_type_internal_fg.append(if_name)
else:
nodes_if_type_internal.append(if_name)
else:
nodes_if_type_normal.append(if_name)
#if if_link_state == "down":
# nodes_if_link_state_down.append(if_name)
## Interface/Port/Bridge Edge ๋ชฉ๋ก ์์ฑ (์ค๋ณต ์กด์ฌ ๊ฐ๋ฅ)
edge_I2P = [(u, v) for (u, v) in G.edges() if (u.startswith("I:") and v.startswith("P:")) or (u.startswith("P:") and v.startswith("I:"))]
edge_P2B = [(u, v) for (u, v) in G.edges() if (u.startswith("P:") and v.startswith("B:")) or (u.startswith("B:") and v.startswith("P:"))]
if enable_fip:
## FIP ์ ๋ณด ์ถ๊ฐ๋ฅผ ์ํด, (B:br-int, P:int-br-ex) ํ์
์ Edge ์ ๊ฑฐ
list_isNetworkHost = G.nodes(data='isNetworkHost')
for (u, v) in edge_P2B:
#print u
#sys.exit(1)
if not G.node[u]['isNetworkHost']:
if (u.startswith("B:br-int") and v.startswith("P:int-br-ex")) or (u.startswith("P:int-br-ex") and v.startswith("B:br-int")):
#print(u, v)
G.remove_edge(u, v)
## "namespace + hostname"๋ฅผ key๋ก ํด์, VP:rfp-* ์ธํฐํ์ด์ค ์ด๋ฆ ์กฐํ ๊ฐ๋ฅํ dict ์์ฑ
dvr_fip_ns_hostname_rfp_pair = {}
for item in result_dvr_fip:
if_name = item['if_name']
if_hostname = item['if_hostname']
if if_name.startswith("VP:rfp-"):
if_namespace = item['if_namespace']
key = if_namespace + "(" + if_hostname + ")"
dvr_fip_ns_hostname_rfp_pair[key] = if_name
## I:qr-* <-> VP:rfp-* ์ ๋ํ Node/Edge ์ ๋ณด ์์ฑ/์ถ๊ฐ
edge_SNAT = []
for item in result_dvr_fip:
if_name = item['if_name']
if if_name.startswith("I:qr-"):
if_namespace = item['if_namespace']
if_hostname = item['if_hostname']
key = item['if_namespace'] + "(" + if_hostname + ")"
if_rfp = dvr_fip_ns_hostname_rfp_pair[key]
#print if_namespace, if_name, if_rfp
G.add_node(if_rfp)
G.add_edge(if_name, if_rfp)
#print if_name, if_rfp
#edge_I2P.append((if_name, if_rfp))
edge_SNAT.append((if_name, if_rfp))
## VP:fpr-* ์ธํฐํ์ด์ค์ Index-ID๋ฅผ key๋ก ํด์, VP:fpr-* ์ด๋ฆ์ ์กฐํฌ ๊ฐ๋ฅํ dict ์์ฑ
dvr_fip_id_hostname_fpr_pair = {}
for item in result_dvr_fip:
if_name = item['if_name']
if if_name.startswith("VP:fpr-"):
if_id = item['if_id']
if_hostname = item['if_hostname']
key = if_id + "(" + if_hostname + ")"
#print if_id, if_name
dvr_fip_id_hostname_fpr_pair[key] = if_name
## VP:rfp-* <-> VP:fpr-* ์ ๋ํ Node/Edge ์ ๋ณด ์์ฑ/์ถ๊ฐ
for item in result_dvr_fip:
if_name = item['if_name']
if if_name.startswith("VP:rfp-"):
if_pair_id = item['if_pair_id']
if_hostname = item['if_hostname']
key = if_pair_id + "(" + if_hostname + ")"
#print if_pair_id
if_pair_name = dvr_fip_id_hostname_fpr_pair[key]
#print if_name, if_pair_name
G.add_node(if_pair_name)
G.add_edge(if_name, if_pair_name)
edge_I2VP.append((if_name, if_pair_name))
## hostname์ key๋ก ํด์, I:fg-* ์ธํฐํ์ด์ค ์ด๋ฆ ์กฐํ ๊ฐ๋ฅํ dict ์์ฑ
dvr_fip_hostname_fg_pair = {}
for item in result_dvr_fip:
if_name = item['if_name']
if_hostname = item['if_hostname']
if if_name.startswith("I:fg-"):
dvr_fip_hostname_fg_pair[if_hostname] = if_name
## VP:fpr-* <-> I:fg-* ์ ๋ํ Edge ์ ๋ณด ์์ฑ/์ถ๊ฐ
edge_ROUTING = []
for item in result_dvr_fip:
if_name = item['if_name']
if if_name.startswith("VP:fpr-"):
if_hostname = item['if_hostname']
if_fg_name = dvr_fip_hostname_fg_pair[if_hostname]
#print if_name, if_hostname, if_fg_name
G.add_edge(if_name, if_fg_name)
edge_ROUTING.append((if_name, if_fg_name))
## ์์์ ๋ฌด๊ดํ๊ฒ ์ค๋ณต ์ ๊ฑฐ ์ฒ๋ฆฌ
edge_I2P = removeDup(edge_I2P)
edge_P2B = removeDup(edge_P2B)
edge_VP2LB = removeDup(edge_VP2LB)
edge_I2VP = removeDup(edge_I2VP)
edge_T2LB = removeDup(edge_T2LB)
if enable_fip:
edge_SNAT = removeDup(edge_SNAT)
edge_ROUTING = removeDup(edge_ROUTING)
edge_if_type_patch = removeDup(edge_if_type_patch)
edge_if_type_vxlan = removeDup(edge_if_type_vxlan)
## ๋ชจ๋ Egde ๋ชฉ๋ก์ G ๊ฐ์ฒด์ ํตํฉ
G.add_edges_from(edge_I2P)
G.add_edges_from(edge_P2B)
G.add_edges_from(edge_VP2LB)
G.add_edges_from(edge_I2VP)
G.add_edges_from(edge_T2LB)
if enable_fip:
G.add_edges_from(edge_SNAT)
G.add_edges_from(edge_ROUTING)
G.add_edges_from(edge_if_type_patch)
G.add_edges_from(edge_if_type_vxlan)
## ์์ฒญ๋ ์์๊ณผ ๋ ๋
ธ๋์ ๋ํ '์ต๋จ ๊ฒฝ๋ก' ๋
ธ๋ ๋ฆฌ์คํธ ์์ฑ
if isSP:
shortest_path_list = nx.astar_path(G, source=src_node, target=dst_node)
#shortest_path_list = nx.shortest_path(G, source=src_node, target=dst_node)
#shortest_path_list = nx.all_shortest_paths(G, source=src_node, target=dst_node)
#shortest_path_list = nx.shortest_path_length(G, source=src_node, target=dst_node)
#shortest_path_list = nx.average_shortest_path_length(G, source=src_node, target=dst_node)
#shortest_path_list = nx.has_path(G, source=src_node, target=dst_node)
#for p in shortest_path_list:
# print(p)
#sys.exit(1)
## Node ์ข
๋ฅ(Interface/Port/Bridge)๋ณ ๋ชฉ๋ก ์์ฑ
nodes_interface = [node for node in G.nodes() if node.startswith("I:")]
nodes_port = [node for node in G.nodes() if node.startswith("P:")]
nodes_bridge = [node for node in G.nodes() if node.startswith("B:")]
nodes_linux_bridge = [node for node in G.nodes() if node.startswith("LB:")]
nodes_linux_interface_pair = [node for node in G.nodes() if node.startswith("VP:")]
nodes_linux_interface_tap = [node for node in G.nodes() if node.startswith("T:")]
if isSP:
nodes_sp_interface = [node for node in shortest_path_list if node.startswith("I:")]
nodes_sp_port = [node for node in shortest_path_list if node.startswith("P:")]
nodes_sp_bridge = [node for node in shortest_path_list if node.startswith("B:")]
nodes_sp_linux_bridge = [node for node in shortest_path_list if node.startswith("LB:")]
nodes_sp_linux_interface_pair = [node for node in shortest_path_list if node.startswith("VP:")]
nodes_sp_linux_interface_tap = [node for node in shortest_path_list if node.startswith("T:")]
nodes_sp_if_type_patch = [node for node in nodes_if_type_patch if node in shortest_path_list]
nodes_sp_if_type_vxlan = [node for node in nodes_if_type_vxlan if node in shortest_path_list]
nodes_sp_if_type_internal = [node for node in nodes_if_type_internal if node in shortest_path_list]
nodes_sp_if_type_internal_fg = [node for node in nodes_if_type_internal_fg if node in shortest_path_list]
#nodes_sp_if_link_state_down = [node for node in nodes_if_link_state_down if node in shortest_path_list]
## SP Node ์ข
๋ฅ(Interface/Port/Bridge)๋ณ ๋ชฉ๋ก ์์ฑ
if isSP:
## SP Edge ๋ชฉ๋ก ์์ฑ
edge_I2P_sp = []
for edge in edge_I2P:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_I2P_sp.append(edge)
edge_P2B_sp = []
for edge in edge_P2B:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_P2B_sp.append(edge)
edge_VP2LB_sp = []
for edge in edge_VP2LB:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_VP2LB_sp.append(edge)
edge_I2VP_sp = []
for edge in edge_I2VP:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_I2VP_sp.append(edge)
edge_T2LB_sp = []
for edge in edge_T2LB:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_T2LB_sp.append(edge)
edge_if_type_patch_sp = []
for edge in edge_if_type_patch:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_if_type_patch_sp.append(edge)
edge_if_type_vxlan_sp = []
for edge in edge_if_type_vxlan:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_if_type_vxlan_sp.append(edge)
if only_path:
## ํ์ํ Node ์ ๋ณด๋ง ๋จ๊ธฐ๊ณ ์ ๋ฆฌ
nodes_sp_if_type_patch_tmp = []
for node in nodes_sp_if_type_patch:
if node in shortest_path_list:
nodes_sp_if_type_patch_tmp.append(node)
nodes_sp_if_type_patch = nodes_sp_if_type_patch_tmp
nodes_sp_if_type_vxlan_tmp = []
for node in nodes_sp_if_type_vxlan:
if node in shortest_path_list:
nodes_sp_if_type_vxlan_tmp.append(node)
nodes_sp_if_type_vxlan = nodes_sp_if_type_vxlan_tmp
nodes_sp_if_type_internal_tmp = []
for node in nodes_sp_if_type_internal:
if node in shortest_path_list:
nodes_sp_if_type_internal_tmp.append(node)
nodes_sp_if_type_internal = nodes_sp_if_type_internal_tmp
if enable_fip:
nodes_sp_if_type_internal_fg_tmp = []
for node in nodes_sp_if_type_internal_fg:
if node in shortest_path_list:
nodes_sp_if_type_internal_fg_tmp.append(node)
nodes_sp_if_type_internal_fg = nodes_sp_if_type_internal_fg_tmp
## ํ์ํ Edge ์ ๋ณด๋ง ๋จ๊ธฐ๊ณ ์ ๋ฆฌ
edge_I2P_sp_tmp = []
for edge in edge_I2P:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_I2P_sp_tmp.append(edge)
edge_I2P_sp = edge_I2P_sp_tmp
edge_P2B_sp_tmp = []
for edge in edge_P2B:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_P2B_sp_tmp.append(edge)
edge_P2B_sp = edge_P2B_sp_tmp
edge_VP2LB_sp_tmp = []
for edge in edge_VP2LB:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_VP2LB_sp_tmp.append(edge)
edge_VP2LB_sp = edge_VP2LB_sp_tmp
edge_I2VP_sp_tmp = []
for edge in edge_I2VP:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_I2VP_sp_tmp.append(edge)
edge_I2VP_sp = edge_I2VP_sp_tmp
edge_T2LB_sp_tmp = []
for edge in edge_T2LB:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_T2LB_sp_tmp.append(edge)
edge_T2LB_sp = edge_T2LB_sp_tmp
edge_if_type_patch_sp_tmp = []
for edge in edge_if_type_patch:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_if_type_patch_sp_tmp.append(edge)
edge_if_type_patch_sp = edge_if_type_patch_sp_tmp
edge_if_type_vxlan_sp_tmp = []
for edge in edge_if_type_vxlan:
src = edge[0]
dst = edge[1]
if src in shortest_path_list and dst in shortest_path_list:
edge_if_type_vxlan_sp_tmp.append(edge)
edge_if_type_vxlan_sp = edge_if_type_vxlan_sp_tmp
## 'G'์ ๋
ธ๋ ๋ชฉ๋ก์์ ๋ฌด๊ดํ ๋
ธ๋ ์ ๊ฑฐ
for node in list(G.nodes()):
if node not in shortest_path_list:
G.remove_node(node)
## ๋ ์ด์์ ์ ์
if only_path == True:
pos = nx.spring_layout(G, k=0.05, iterations=70)
else:
pos = nx.kamada_kawai_layout(G, scale=1)
#pos = nx.shell_layout(G) # positions for all nodes
#pos = nx.spring_layout(G, k=0.05, iterations=50) # positions for all nodes
#pos = nx.spring_layout(G, iterations=50)
#pos = nx.spectral_layout(G, scale=2) # positions for all nodes
#pos = nx.circular_layout(G) # positions for all nodes
#pos = nx.random_layout(G) # positions for all nodes
## ๋
ธ๋ ๊ฒน์นจ ํํผ ๋ ์ด์์::kamada kawai (์ฃผ์: ๋
ธ๋๊ฐ ๋ง์ ๊ฒฝ์ฐ, ์๊ฐ์ด ์ค๋ ๊ฑธ๋ฆผ)
#df = pd.DataFrame(index=G.nodes(), columns=G.nodes())
#for row, data in nx.shortest_path_length(G):
# for col, dist in data.items():
# df.loc[row,col] = dist
#df = df.fillna(df.max().max())
#pos = nx.kamada_kawai_layout(G, dist=df.to_dict())
## Default Node ์ฌ์ด์ฆ
node_szie = 3
if isSP:
alpha_normal = 0.1
else:
alpha_normal = 0.5
alpha_sp = 0.9
### ๊ธฐ๋ณธ Node ์คํ์ผ ์ ์/์์ฑ
if not only_path:
## Interface Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_interface, with_labels=True, node_size=node_szie, node_shape='o', node_color='#F972FF', alpha=alpha_normal, linewidths=1, label='OVS Interface')
## Port Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_port, with_labels=True, node_size=node_szie, node_shape='o', node_color='#72B2FF', alpha=alpha_normal, linewidths=1, label='OVS POrt')
## Bridge Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_bridge, with_labels=True, node_size=node_szie, node_shape='o', node_color='#FF5634', alpha=alpha_normal, linewidths=1, label='OVS Bridge')
## Linux Interface Node ๊ทธ๋ฆฌ๊ธฐ (veth pair)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_linux_interface_pair, with_labels=True, node_size=node_szie, node_shape='o', node_color='#F972FF', alpha=alpha_normal, linewidths=1, label='Linux VETH (One of Pair)')
## Linux Interface Node ๊ทธ๋ฆฌ๊ธฐ (tap)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_linux_interface_tap, with_labels=True, node_size=node_szie, node_shape='o', node_color='#7E7E7E', alpha=alpha_normal, linewidths=1, label='Linux TAP')
## Linux Bridge Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_linux_bridge, with_labels=True, node_size=node_szie, node_shape='o', node_color='#0C00A0', alpha=alpha_normal, linewidths=1, label='Linux Bridge')
## Patch ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_if_type_patch, with_labels=True, node_size=node_szie, node_shape='o', node_color='#279700', alpha=alpha_normal, linewidths=1, label='OVS Patch (One of Patch)')
## VxLAN ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_if_type_vxlan, with_labels=True, node_size=node_szie, node_shape='o', node_color='#E9D000', alpha=alpha_normal, linewidths=1, label='OVS VxLAN')
## Internal ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_if_type_internal, with_labels=True, node_size=node_szie, node_shape='o', node_color='#382000', alpha=alpha_normal, linewidths=1, label='OVS Internal')
nx.draw_networkx_nodes(G, pos, nodelist=nodes_if_type_internal_fg, with_labels=True, node_size=node_szie, node_shape='o', node_color='#FF0000', alpha=alpha_normal, linewidths=1, label='OVS Internal(fg)')
## Down ์ํ ๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
## ๋ฏธ์ฌ์ฉ (OVS์ link_state๊ฐ์ด ์ ํํ์ง ์์. namespace์ ์ํ Interface์ ์ํ ์ฒดํฌ ๋ชปํ๋ ๊ฒ์ผ๋ก ์ถ์ )
#nx.draw_networkx_nodes(G, pos, nodelist=nodes_if_link_state_down, with_labels=True, node_size=node_szie, node_shape='o', node_color='#FF0000', alpha=alpha_normal, linewidths=1, label='OVS Link-Down')
#nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_link_state_down, with_labels=True, node_size=node_szie_ap, node_shape='o', node_color='#FF0000', alpha=alpha_sp, linewidths=1, label='OVS Link-Down')
### SP ๋ชจ๋์ผ ๊ฒฝ์ฐ Node ์คํ์ผ ์ ์/๋ฎ์ด์ฐ๊ธฐ
if isSP:
if only_path:
node_szie_sp = 300
else:
node_szie_sp = 20
## Interface Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_interface, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#F972FF', alpha=alpha_sp, linewidths=1, label='OVS Interface')
## Port Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_port, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#72B2FF', alpha=alpha_sp, linewidths=1, label='OVS POrt')
## Bridge Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_bridge, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#FF5634', alpha=alpha_sp, linewidths=1, label='OVS Bridge')
## Linux Interface Node ๊ทธ๋ฆฌ๊ธฐ (veth pair)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_linux_interface_pair, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#F972FF', alpha=alpha_sp, linewidths=1, label='Linux VETH (One of Pair)')
## Linux Interface Node ๊ทธ๋ฆฌ๊ธฐ (tap)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_linux_interface_tap, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#7E7E7E', alpha=alpha_sp, linewidths=1, label='Linux TAP')
## Linux Bridge Node ๊ทธ๋ฆฌ๊ธฐ
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_linux_bridge, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#0C00A0', alpha=alpha_sp, linewidths=1, label='Linux Bridge')
## Patch ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_type_patch, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#279700', alpha=alpha_sp, linewidths=1, label='OVS Patch (One of Patch)')
## VxLAN ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_type_vxlan, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#E9D000', alpha=alpha_sp, linewidths=1, label='OVS VxLAN')
## Internal ํ์
๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_type_internal, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#382000', alpha=alpha_sp, linewidths=1, label='OVS Internal')
nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_type_internal_fg, with_labels=True, node_size=node_szie_sp, node_shape='o', node_color='#FF0000', alpha=alpha_sp, linewidths=1, label='OVS Internal(fg)')
## Down ์ํ ๋
ธ๋ ์
๋ฐ์ดํธ (์์ ๋ณ๊ฒฝ)
## ๋ฏธ์ฌ์ฉ (OVS์ link_state๊ฐ์ด ์ ํํ์ง ์์. namespace์ ์ํ Interface์ ์ํ ์ฒดํฌ ๋ชปํ๋ ๊ฒ์ผ๋ก ์ถ์ )
#nx.draw_networkx_nodes(G, pos, nodelist=nodes_sp_if_link_state_down, with_labels=True, node_size=node_szie_ap, node_shape='o', node_color='#FF0000', alpha=alpha_sp, linewidths=1, label='OVS Link-Down')
## Node Label ๊ทธ๋ฆฌ๊ธฐ (Interface/Port/Bridge)
label_font_size = 1
label_font_size_sp = 2
label_font_size_sp_only = 5
labels = {}
labels_sp = {}
for node in G.nodes():
if isSP:
if node not in shortest_path_list:
labels[node] = node
else:
labels_sp[node] = node
else:
labels[node] = node
if not isSP:
nx.draw_networkx_labels(G, pos, labels, font_size=label_font_size, font_family='sans-serif', alpha=alpha_normal)
elif isSP and not only_path:
nx.draw_networkx_labels(G, pos, labels, font_size=label_font_size, font_family='sans-serif', alpha=alpha_normal)
nx.draw_networkx_labels(G, pos, labels_sp, font_size=label_font_size_sp, font_family='sans-serif', alpha=alpha_sp)
else:
nx.draw_networkx_labels(G, pos, labels_sp, font_size=label_font_size_sp_only, font_family='sans-serif', alpha=alpha_sp)
## Edge ๊ทธ๋ฆฌ๊ธฐ
# Only Path์ ๊ฒฝ์ฐ Edge ์คํ์ผ ์ ์/์์ฑ
if only_path:
nx.draw_networkx_edges(G, pos, edgelist=edge_I2P_sp, width=1, alpha=0.5, edge_color='#E67E22', label='OVS-Interface <--> OVS-Port')
nx.draw_networkx_edges(G, pos, edgelist=edge_P2B_sp, width=2, alpha=0.5, edge_color='#2ECC71', label='OVS-Port <--> OVS-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_patch_sp, width=5, alpha=0.5, edge_color='#00FFE8', label='OVS-Patch')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_vxlan_sp, width=5, alpha=0.5, edge_color='#FFF818', label='VxLAN Tunnel')
nx.draw_networkx_edges(G, pos, edgelist=edge_I2VP_sp, width=0.8, alpha=0.5, edge_color='#68FF66', label='Linux VETH-Pair')
nx.draw_networkx_edges(G, pos, edgelist=edge_VP2LB_sp, width=0.2, alpha=0.5, edge_color='#16BFFF', label='Linux-VETH <--> Linux-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_T2LB_sp, width=0.2, alpha=0.5, edge_color='#6A6A6A', label='Linux-TAP <--> Linux-Bridge')
# ๊ทธ์ธ, ๋ชจ๋ ๋
ธ๋ ์ ๋ณด ํ์ ๋ชจ๋์์ Edge ์คํ์ผ ์ ์/์์ฑ
else:
nx.draw_networkx_edges(G, pos, edgelist=edge_I2P, width=0.1, alpha=alpha_normal, edge_color='#E67E22', label='OVS-Interface <--> OVS-Port')
nx.draw_networkx_edges(G, pos, edgelist=edge_P2B, width=0.2, alpha=alpha_normal, edge_color='#2ECC71', label='OVS-Port <--> OVS-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_patch, width=0.8, alpha=alpha_normal, edge_color='#00FFE8', label='OVS-Patch')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_vxlan, width=1, alpha=alpha_normal, edge_color='#FFF818', label='VxLAN Tunnel')
nx.draw_networkx_edges(G, pos, edgelist=edge_I2VP, width=0.8, alpha=alpha_normal, edge_color='#68FF66', label='Linux VETH-Pair')
nx.draw_networkx_edges(G, pos, edgelist=edge_VP2LB, width=0.2, alpha=alpha_normal, edge_color='#16BFFF', label='Linux-VETH <--> Linux-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_T2LB, width=0.2, alpha=alpha_normal, edge_color='#6A6A6A', label='Linux-TAP <--> Linux-Bridge')
if enable_fip:
nx.draw_networkx_edges(G, pos, edgelist=edge_SNAT, width=0.2, alpha=alpha_normal, edge_color='#FF0000', style='dashed', label='fg <--> fpr (SNAT in F-IP Namespace)')
nx.draw_networkx_edges(G, pos, edgelist=edge_ROUTING, width=0.2, alpha=alpha_normal, edge_color='#0000C7', style='dashed', label='rfp <--> qr (ROUTING in Dist-Router Namespace)')
## ๋ชจ๋ ๋
ธ๋ ํ์ ๋ชจ๋์์, SP ๊ฒฝ๋ก๋ง ๊ฐ์กฐ Edge ์คํ์ผ ์ ์/์์ฑ
if isSP and not only_path:
nx.draw_networkx_edges(G, pos, edgelist=edge_I2P_sp, width=0.1, alpha=alpha_sp, edge_color='#E67E22', label='OVS-Interface <--> OVS-Port')
nx.draw_networkx_edges(G, pos, edgelist=edge_P2B_sp, width=0.2, alpha=alpha_sp, edge_color='#2ECC71', label='OVS-Port <--> OVS-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_patch_sp, width=0.8, alpha=alpha_sp, edge_color='#00FFE8', label='OVS-Patch')
nx.draw_networkx_edges(G, pos, edgelist=edge_if_type_vxlan_sp, width=1, alpha=alpha_sp, edge_color='#FFF818', label='VxLAN Tunnel')
nx.draw_networkx_edges(G, pos, edgelist=edge_I2VP_sp, width=0.8, alpha=alpha_sp, edge_color='#68FF66', label='Linux VETH-Pair')
nx.draw_networkx_edges(G, pos, edgelist=edge_VP2LB_sp, width=0.2, alpha=alpha_sp, edge_color='#16BFFF', label='Linux-VETH <--> Linux-Bridge')
nx.draw_networkx_edges(G, pos, edgelist=edge_T2LB_sp, width=0.2, alpha=alpha_sp, edge_color='#6A6A6A', label='Linux-TAP <--> Linux-Bridge')
if enable_fip:
nx.draw_networkx_edges(G, pos, edgelist=edge_SNAT, width=0.2, alpha=alpha_normal, edge_color='#FF0000', style='dashed', label='fg <--> fpr (SNAT in F-IP Namespace)')
nx.draw_networkx_edges(G, pos, edgelist=edge_ROUTING, width=0.2, alpha=alpha_normal, edge_color='#0000C7', style='dashed', label='rfp <--> qr (ROUTING in Dist-Router Namespace)')
plt.axis('off')
#plt.figure(figsize = (10,9))
plt.title("OpenStack Network Connectivity")
print("Processed [elapsed time: %f sec]\n" % (time.time() - start_time))
print("Creating GEXF.........")
start_time = time.time()
nx.write_gexf(G, "/var/www/html/OpenStack-Network-Connectivity.gexf")
#nx.write_gexf(G, "/var/www/html/OpenStack-Network-Connectivity.gexf", version="1.1draft")
print("Created GEXF [elapsed time: %f sec]\n" % (time.time() - start_time))
print("Creating Image........")
start_time = time.time()
#plt.legend(numpoints = 1)
plt.legend(loc=2, prop={'size': 2})
plt.savefig("/var/www/html/OpenStack-Network-Connectivity.png", format = "png", dpi = 800)
print("Created Image [elapsed time: %f sec]\n" % (time.time() - start_time))
##### plot.ly ๊ทธ๋ํ ์์ฑ/์ ์ก ########################################################
if enable_plotly:
print("Creating Plotly........")
start_time = time.time()
#G=nx.random_geometric_graph(200,0.125)
#pos=nx.get_node_attributes(G,'pos')
## 'G'์ ๋
ธ๋๋ณ ํฌ์ง์
์ ๋ณด ์ถ๊ฐ
nx.set_node_attributes(G, name='pos', values=pos)
## ๊ธฐ์ค ๋
ธ๋ ์ ์ (์ค์ฌ์์ 0.5, 0.5 ๊ฑฐ๋ฆฌ์ ์๋ ๋
ธ๋ ์ ํ)
dmin = 1
ncenter = 0
for n in pos:
x,y = pos[n]
d = (x - 0.5) ** 2 + (y - 0.5) ** 2
if d < dmin:
ncenter = n
dmin = d
p = nx.single_source_shortest_path_length(G,ncenter)
## Edge ์ถ์
edge_trace = go.Scatter(
x = [],
y = [],
line = dict(width=0.5, color='#888'),
hoverinfo = 'none',
mode = 'lines')
## Edge ํฌ์ง์
์์ฑ
for edge in G.edges():
x0, y0 = G.node[edge[0]]['pos']
x1, y1 = G.node[edge[1]]['pos']
edge_trace['x'] += tuple([x0, x1, None])
edge_trace['y'] += tuple([y0, y1, None])
## Node ์ถ์
node_trace = go.Scatter(
x = [],
y = [],
text = [],
mode = 'markers',
hoverinfo = 'text',
marker = dict(
showscale = True,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale = 'YlGnBu',
reversescale = True,
color = [],
size = 10,
colorbar = dict(
thickness = 15,
title = 'Node Connections',
xanchor = 'left',
titleside = 'right'
),
line = dict(width=2)))
## Node ํฌ์ง์
์์ฑ
for node in G.nodes():
x, y = G.node[node]['pos']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
## Node ํํ ์ ๋ณด ์์ฑ
for node, adjacencies in enumerate(G.adjacency()):
node_trace['marker']['color'] += tuple([len(adjacencies[1])])
#node_info = '# of connections: ' + str(len(adjacencies[1]))
node_info = adjacencies[0] + " (#" + str(len(adjacencies[1])) + ") "
node_trace['text'] += tuple([node_info])
## ๊ทธ๋ํ ์์ฑ
fig = go.Figure(
data = [edge_trace, node_trace],
layout = go.Layout(
title = '<br>Network graph made with Python',
titlefont = dict(size = 16),
showlegend = False,
hovermode = 'closest',
margin = dict(b=20, l=5, r=5, t=40),
annotations = [
dict(
text = "Python code: <a href='https://plot.ly/ipython-notebooks/network-graphs/'> https://plot.ly/ipython-notebooks/network-graphs/</a>",
showarrow = False,
xref = "paper",
yref="paper",
x=0.005,
y=-0.002
)
],
xaxis = dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis = dict(showgrid=False, zeroline=False, showticklabels=False)
)
)
## plot.ly๋ก ๋ฐ์ดํฐ ์ ์ก
py.plot(fig, filename='networkx')
print("Created Plotly [elapsed time: %f sec]\n" % (time.time() - start_time))
#### (์ฐธ๊ณ ์ฉ) ########################################################
print("======= Summary of Graph =======")
## ๊ทธ๋ํ ์ ๋ณด ์ถ๋ ฅ
print(nx.info(G))
## ๊ทธ๋ํ ๋ฐ๋ ์ถ๋ ฅ (0~1 ์ฌ์ด ๊ฐ์ผ๋ก, 1์ ์ต๋ ๋ฐ๋)
#print("Network density:", nx.density(G))
## ์ต๋จ ๊ฒฝ๋ก ์ฐพ๊ธฐ ์์
#fell_whitehead_path = nx.shortest_path(G, source="I:qvoeee4966d-68", target="I:vxlan-0a00e8ae(pub-compute-001)")
#print("Shortest path between Fell and Whitehead:", fell_whitehead_path)
## ๋
ธ๋๋ณ ์ค์๋(์ค์ฌ์ฑ) ์ธก์
#degree_dict = dict(G.degree(G.nodes()))
#sorted_degree = sorted(degree_dict.items(), key=operator.itemgetter(1), reverse=True)
#print("Top 20 nodes by degree:")
#for d in sorted_degree[:20]:
# print(d)
| 42.187103
| 953
| 0.703561
|
7bcb7f613021b08a4b51ba9ae69a1765bb8a14ea
| 2,062
|
py
|
Python
|
orco/internals/browser.py
|
spirali/orco
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 3
|
2019-08-15T08:06:59.000Z
|
2020-06-14T13:13:09.000Z
|
orco/internals/browser.py
|
spirali/orco
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 8
|
2019-08-06T11:38:08.000Z
|
2020-03-01T21:44:12.000Z
|
orco/internals/browser.py
|
spirali/xstore
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 2
|
2019-07-29T18:33:13.000Z
|
2019-08-30T07:54:43.000Z
|
import os
import threading
from flask import Flask, Response
from flask_cors import CORS
from flask_restful import Resource, Api
from .database import Database
STATIC_ROOT = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "static"
)
app = Flask(__name__, static_url_path=STATIC_ROOT)
cors = CORS(app)
api = Api(app, prefix="/rest")
thread_local_db = threading.local()
def get_db():
# TODO: Do something better then creating a DB each time
db = Database(app.db_url)
return db
class Builders(Resource):
def get(self):
return get_db().builder_summaries(app.builders)
api.add_resource(Builders, "/builders")
class Jobs(Resource):
def get(self, builder_name):
return get_db().job_summaries(builder_name)
api.add_resource(Jobs, "/jobs/<string:builder_name>")
class Blobs(Resource):
def get(self, job_id):
return get_db().blob_summaries(job_id)
api.add_resource(Blobs, "/blobs/<int:job_id>")
class Status(Resource):
def get(self):
return get_db().get_running_status()
api.add_resource(Status, "/status/")
def from_gzipped_file(filename):
assert not os.path.isabs(filename)
filename = os.path.join(STATIC_ROOT, filename)
with open(filename, "rb") as f:
data = f.read()
headers = {"Content-Encoding": "gzip", "Content-Length": len(data)}
if filename.endswith("css.gz"):
headers["Content-Type"] = "text/css"
return Response(data, headers=headers)
# filename = os.path.join(STATIC_ROOT, "main.js.gz")
# return from_gzipped_file(filename)
@app.route("/static/<path:path>")
def static_serve(path):
return from_gzipped_file(path + ".gz")
@app.route("/manifest.json")
def static_manifest():
return from_gzipped_file("manifest.json.gz")
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def static_index(path):
return from_gzipped_file("index.html.gz")
def init_service(runtime):
app.builders = list(runtime._builders.values())
app.db_url = runtime.db.url
return app
| 22.172043
| 73
| 0.695926
|
8c4aa16812c5ebf7093247baffbd6df2c18f808b
| 102
|
py
|
Python
|
lang/Python/loops-continue.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/loops-continue.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/loops-continue.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
for i in range(1,11):
if i % 5 == 0:
print(i)
continue
print(i, ",", end=' ')
| 17
| 26
| 0.411765
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.