hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b1515b9e65d57fbf2aa7118c59965ea71710713
| 2,978
|
py
|
Python
|
XY/roc.py
|
andrewmaurer/FPM
|
1c6ff2cee19a375111e43981496600ff3b8d2c80
|
[
"MIT"
] | null | null | null |
XY/roc.py
|
andrewmaurer/FPM
|
1c6ff2cee19a375111e43981496600ff3b8d2c80
|
[
"MIT"
] | null | null | null |
XY/roc.py
|
andrewmaurer/FPM
|
1c6ff2cee19a375111e43981496600ff3b8d2c80
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def tpr( y_pred, y_actual, threshold):
"""
y_pred : numpy array of predicted values between 0 and 1.
y_actual : numpy array of actual values, either 0 or 1.
threshold : value at which roundup begins.
Returns true positive rate when `y_pred` is rounded up when greater than `threshold` and down otherwise.
"""
y_rounded = np.array([ pred > threshold for pred in y_pred ])
num_true_positive = np.logical_and(y_rounded, y_actual).sum()
num_total_positive = y_actual.sum()
return num_true_positive / num_total_positive
def fpr( y_pred, y_actual, threshold):
"""
y_pred : numpy array of predicted values between 0 and 1.
y_actual : numpy array of actual values, either 0 or 1.
threshold : value at which roundup begins.
Returns false positive rate when `y_pred` is rounded up when greater than `threshold` and down otherwise.
"""
y_rounded = np.array([ pred > threshold for pred in y_pred ])
num_false_positive = np.logical_and(y_rounded, np.logical_not(y_actual)).sum()
num_total_positive = np.logical_not(y_actual).sum()
return num_false_positive / num_total_positive
def compute_auc(y_pred, y_actual):
"""
Approximates the Area Under the Curve of the Receiver Operating Characteristic.
roc_auc_score from sklearn.metrics is definitely a closer approximation. Will improve here.
"""
thresholds = np.arange(0,1,0.01)
fprs = [ fpr(y_pred, y_actual, threshold) for threshold in thresholds]
tprs = [tpr(y_pred, y_actual, threshold) for threshold in thresholds]
rectangle_bases = np.array([ np.abs(fprs[i+1] - fprs[i]) for i in range(len(fprs) - 1) ])
rectangle_heights = tprs[:-1]
rectangle_areas = rectangle_bases * rectangle_heights
return rectangle_areas.sum()
def plot_auc(y_pred, y_actual, include_chance=True):
"""
include_chance : if True, plots the straight line y=x which can be compared to ROC score.
Uses matplotlib.pyplot to plot the ROC curve.
"""
thresholds = np.arange(0,1,0.01)
fprs = [ fpr(y_pred, y_actual, threshold) for threshold in thresholds]
tprs = [tpr(y_pred, y_actual, threshold) for threshold in thresholds]
plt.plot(fprs, tprs)
if include_chance:
plt.plot([0,1], [0,1], color='orange')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
# y_actual = np.array([0] * 75 + [1] * 100)
# y_pred = np.concatenate([
# np.random.uniform(0, 0.7, 75 ),
# np.random.uniform(0.3, 1.0, 100 )
# ])
# tpr( y_pred, y_actual, 0.5)
# fpr( y_pred, y_actual, 0.5)
# compute_auc(y_pred, y_actual)
# plot_auc(y_pred, y_actual, include_chance=True)
| 40.243243
| 113
| 0.648758
|
c225c37ab0453fd3434b1b99a1226995f2e1260d
| 1,193
|
py
|
Python
|
nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py
|
mfalkiewicz/nipype
|
775e21b78fb1ffa2ff9cb12e6f052868bd44d052
|
[
"Apache-2.0"
] | 1
|
2015-01-19T13:12:27.000Z
|
2015-01-19T13:12:27.000Z
|
nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py
|
bpinsard/nipype
|
373bdddba9f675ef153951afa368729e2d8950d2
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import SwapDimensions
def test_SwapDimensions_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position='1',
),
new_dims=dict(argstr='%s %s %s',
mandatory=True,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
),
output_type=dict(),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = SwapDimensions.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_SwapDimensions_outputs():
output_map = dict(out_file=dict(),
)
outputs = SwapDimensions.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 25.382979
| 67
| 0.642917
|
3ec28d6dc5c53b963374a9f3b4e1b0db0c86fe44
| 3,589
|
py
|
Python
|
spiders/git/git/middlewares.py
|
dingzhaohan/deep_research
|
8d4373e32d75a348368d46eca2dd1b26d8d93c4b
|
[
"Apache-2.0"
] | 2
|
2020-02-20T01:57:20.000Z
|
2020-09-12T12:43:46.000Z
|
spiders/git/git/middlewares.py
|
dingzhaohan/deep_research
|
8d4373e32d75a348368d46eca2dd1b26d8d93c4b
|
[
"Apache-2.0"
] | null | null | null |
spiders/git/git/middlewares.py
|
dingzhaohan/deep_research
|
8d4373e32d75a348368d46eca2dd1b26d8d93c4b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class GitSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class GitDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.509615
| 78
| 0.665366
|
a5fe81eb9b396cdb94cfe8476bed9a44d8ff36d7
| 7,176
|
py
|
Python
|
jira/resilientsession.py
|
zrhoffman/jira
|
bb99305f5c7c0d00348bdb75b1cfd37b652cce9d
|
[
"BSD-2-Clause"
] | null | null | null |
jira/resilientsession.py
|
zrhoffman/jira
|
bb99305f5c7c0d00348bdb75b1cfd37b652cce9d
|
[
"BSD-2-Clause"
] | 27
|
2021-05-19T06:56:51.000Z
|
2022-03-18T14:24:52.000Z
|
jira/resilientsession.py
|
zrhoffman/jira
|
bb99305f5c7c0d00348bdb75b1cfd37b652cce9d
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import random
from requests.exceptions import ConnectionError
from requests import Session
import time
from jira.exceptions import JIRAError
logging.getLogger("jira").addHandler(logging.NullHandler())
def raise_on_error(r, verb="???", **kwargs):
request = kwargs.get("request", None)
# headers = kwargs.get('headers', None)
if r is None:
raise JIRAError(None, **kwargs)
if r.status_code >= 400:
error = ""
if r.status_code == 403 and "x-authentication-denied-reason" in r.headers:
error = r.headers["x-authentication-denied-reason"]
elif r.text:
try:
response = json.loads(r.text)
if "message" in response:
# Jira 5.1 errors
error = response["message"]
elif "errorMessages" in response and len(response["errorMessages"]) > 0:
# Jira 5.0.x error messages sometimes come wrapped in this array
# Sometimes this is present but empty
errorMessages = response["errorMessages"]
if isinstance(errorMessages, (list, tuple)):
error = errorMessages[0]
else:
error = errorMessages
# Catching only 'errors' that are dict. See https://github.com/pycontribs/jira/issues/350
elif (
"errors" in response
and len(response["errors"]) > 0
and isinstance(response["errors"], dict)
):
# Jira 6.x error messages are found in this array.
error_list = response["errors"].values()
error = ", ".join(error_list)
else:
error = r.text
except ValueError:
error = r.text
raise JIRAError(
r.status_code, error, r.url, request=request, response=r, **kwargs
)
# for debugging weird errors on CI
if r.status_code not in [200, 201, 202, 204]:
raise JIRAError(r.status_code, request=request, response=r, **kwargs)
# testing for the bug exposed on
# https://answers.atlassian.com/questions/11457054/answers/11975162
if (
r.status_code == 200
and len(r.content) == 0
and "X-Seraph-LoginReason" in r.headers
and "AUTHENTICATED_FAILED" in r.headers["X-Seraph-LoginReason"]
):
pass
class ResilientSession(Session):
"""This class is supposed to retry requests that do return temporary errors.
At this moment it supports: 502, 503, 504
"""
def __init__(self, timeout=None):
self.max_retries = 3
self.timeout = timeout
super(ResilientSession, self).__init__()
# Indicate our preference for JSON to avoid https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551
self.headers.update({"Accept": "application/json,*.*;q=0.9"})
def __recoverable(self, response, url, request, counter=1):
msg = response
if isinstance(response, ConnectionError):
logging.warning(
"Got ConnectionError [%s] errno:%s on %s %s\n%s\n%s"
% (
response,
response.errno,
request,
url,
vars(response),
response.__dict__,
)
)
if hasattr(response, "status_code"):
if response.status_code in [502, 503, 504, 401]:
# 401 UNAUTHORIZED still randomly returned by Atlassian Cloud as of 2017-01-16
msg = "%s %s" % (response.status_code, response.reason)
# 2019-07-25: Disabled recovery for codes above^
return False
elif not (
response.status_code == 200
and len(response.content) == 0
and "X-Seraph-LoginReason" in response.headers
and "AUTHENTICATED_FAILED" in response.headers["X-Seraph-LoginReason"]
):
return False
else:
msg = "Atlassian's bug https://jira.atlassian.com/browse/JRA-41559"
# Exponential backoff with full jitter.
delay = min(60, 10 * 2 ** counter) * random.random()
logging.warning(
"Got recoverable error from %s %s, will retry [%s/%s] in %ss. Err: %s"
% (request, url, counter, self.max_retries, delay, msg)
)
logging.debug("response.headers: %s", response.headers)
logging.debug("response.body: %s", response.content)
time.sleep(delay)
return True
def __verb(self, verb, url, retry_data=None, **kwargs):
d = self.headers.copy()
d.update(kwargs.get("headers", {}))
kwargs["headers"] = d
# if we pass a dictionary as the 'data' we assume we want to send json
# data
data = kwargs.get("data", {})
if isinstance(data, dict):
data = json.dumps(data)
retry_number = 0
while retry_number <= self.max_retries:
response = None
exception = None
try:
method = getattr(super(ResilientSession, self), verb.lower())
response = method(url, timeout=self.timeout, **kwargs)
if response.status_code >= 200 and response.status_code <= 299:
return response
except ConnectionError as e:
logging.warning(
"%s while doing %s %s [%s]" % (e, verb.upper(), url, kwargs)
)
exception = e
retry_number += 1
if retry_number <= self.max_retries:
response_or_exception = response if response is not None else exception
if self.__recoverable(
response_or_exception, url, verb.upper(), retry_number
):
if retry_data:
# if data is a stream, we cannot just read again from it,
# retry_data() will give us a new stream with the data
kwargs["data"] = retry_data()
continue
else:
break
if exception is not None:
raise exception
raise_on_error(response, verb=verb, **kwargs)
return response
def get(self, url, **kwargs):
return self.__verb("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.__verb("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.__verb("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.__verb("DELETE", url, **kwargs)
def head(self, url, **kwargs):
return self.__verb("HEAD", url, **kwargs)
def patch(self, url, **kwargs):
return self.__verb("PATCH", url, **kwargs)
def options(self, url, **kwargs):
return self.__verb("OPTIONS", url, **kwargs)
| 37.375
| 152
| 0.545847
|
c477802680d4fc51e49f3e9e751ef19794ad8c94
| 3,621
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_12_01_preview/_monitor_management_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_12_01_preview/_monitor_management_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_12_01_preview/_monitor_management_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import MonitorManagementClientConfiguration
from .operations import MetricNamespacesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class MonitorManagementClient:
"""Monitor Management Client.
:ivar metric_namespaces: MetricNamespacesOperations operations
:vartype metric_namespaces:
$(python-base-namespace).v2017_12_01_preview.operations.MetricNamespacesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
"""
def __init__(
self,
credential: "TokenCredential",
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = MonitorManagementClientConfiguration(credential=credential, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.metric_namespaces = MetricNamespacesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MonitorManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 40.233333
| 123
| 0.677437
|
321701ae7bdf59e8821afe14c678069d40663db9
| 16,988
|
py
|
Python
|
data_prep/visual_prep.py
|
pleonova/data-diary
|
e02ea27c82b87c0f5fb899191aea708de586c502
|
[
"MIT"
] | 10
|
2020-06-04T17:49:06.000Z
|
2022-01-02T17:03:18.000Z
|
data_prep/visual_prep.py
|
pleonova/data-diary
|
e02ea27c82b87c0f5fb899191aea708de586c502
|
[
"MIT"
] | null | null | null |
data_prep/visual_prep.py
|
pleonova/data-diary
|
e02ea27c82b87c0f5fb899191aea708de586c502
|
[
"MIT"
] | 1
|
2021-05-10T04:00:06.000Z
|
2021-05-10T04:00:06.000Z
|
import requests
import os
from datetime import date, datetime, timedelta as td
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import random
############ Data Munging ############
def time_dataframe_prep(df, start_date, end_date, start_date_column, end_date_column, category_column):
"""
Returns an exploded dataframe, with every minute labeled with the event name or 'no entry'.
Parameters
----------
df : dataframe
A dataframe that contains tagged timstamps
start_date : str
Date of first entry
end_date :str
Date of last entry
start_date_column : datetime
Column that contains when the event started
end_date_column : datetime
Column that contains when the event ended
category_column : str
Column that contains the event tag name
Returns
-------
df_minutes_se : dataframe
Table with every minute tagged
"""
########################
## Step 1: Create a dataframe of just the end dates
########################
df_end = df[[end_date_column]].copy()
# Add a column for 'no entry'
df_end[category_column] = 'no entry'
# If there is no gap in data (as in there is an entry immediately following the previous),
# remove the record from the df_end dataframe
start_date_pt_list = list(df[start_date_column].unique())
df_end = df_end[~df_end[end_date_column].isin(start_date_pt_list)]
########################
## Step 2: Combine End and Start Dates into single dataframe
########################
# Create a two column data frame with the start date and the category
df_start = df[[start_date_column, category_column]].copy()
# Update column names to match that of df_start
df_end.rename(columns = {end_date_column: start_date_column}, inplace = True)
# Append the df_end dataframe to the bottom
df_entries = pd.concat([df_start, df_end])
########################
## Step 3: Expand Dataset - Every Second
########################
# Create a dataframe of second intevals between two dates
time_range = pd.date_range(start_date, end_date, freq= '1s')
time_range_df = pd.DataFrame(time_range).rename(columns = {0: 'date_time'})
# Convert to time
time_range_df['date_time'] = pd.to_datetime(time_range_df['date_time'])
########################
## Step 4: Add our time stamps to the expanded time dataframe
########################
df_seconds = pd.merge(time_range_df, df_entries, how = 'left',
left_on = 'date_time', right_on = start_date_column)
# Find the first date_time with a category entry
date_of_first_entry = df_seconds[(df_seconds[category_column] != 'no entry')
& (~df_seconds[category_column].isna())
]['date_time'].min()
# Find the index of the first entry
index_of_first_entry = df_seconds.index[df_seconds['date_time'] == date_of_first_entry][0]
# Reduce the dataframe to begin with the first entry
df_seconds2 = df_seconds[index_of_first_entry:].copy()
########################
## Step 5: Label every minute
########################
# Forward fill the category until next entry
df_seconds2[category_column] = df_seconds2[category_column].ffill()
df_seconds2[start_date_column] = df_seconds2[start_date_column].ffill()
########################
## Step 6: Pick the end of a minute entry (at 58 seconds)
########################
# Expand the time stamp into the relevant time components
# df_seconds2[['hour','minute','second']] = pd.to_timedelta(
# df_seconds2['date_time']).dt.components.iloc[:, 1:4]
df_seconds2['hour'] = df_seconds2['date_time'].dt.hour
df_seconds2['minute'] = df_seconds2['date_time'].dt.minute
df_seconds2['second'] = df_seconds2['date_time'].dt.second
# Select the entries at specified second interval (otherwise the frequency is too much for the chart)
df_minutes = df_seconds2[df_seconds2['second'] == 58].reset_index()
df_minutes['date_time_min'] = df_minutes['date_time'].values.astype('<M8[m]')
########################
## Step 7: Add duration columns
########################
df_minutes['duration_minutes'] = 1
# Find the index of the latest entry
latest_date = df_minutes[df_minutes[category_column] != 'no entry']['date_time'].max()
index_of_last_entry = df_minutes.index[df_minutes['date_time'] == latest_date][0]
# Reduce the dataframe to begin with the first entry
df_minutes_se = df_minutes[0:index_of_last_entry].copy()
return df_minutes_se
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
fsize = 18
params = {
'axes.labelsize': fsize,
'axes.titlesize':fsize,
'axes.titlepad': 20,
'xtick.labelsize':fsize,
'xtick.major.pad': 5,
'ytick.labelsize':fsize,
'axes.labelpad': 20,
'lines.linewidth' : 3,
'figure.titlesize': fsize *1.5,
'figure.figsize' : (16,8),
'legend.title_fontsize': fsize,
'legend.fontsize': fsize #*0.925,
}
plt.rcParams.update(params)
plt.close('all')
color_palette_p = [
"#e8bca7",
'#911eb4',
'#8b88cc',
'#d1952e',
"#88ccbf",
"#D2D179",
"#4084BF",
'#e6194b',
'#52965b',
'#fbcc11',
'#4363d8',
'#f58231',
"#5cb569",
"#88ccbf",
'#c45748',
'#b7b1b0',
'#8ba3dd',
'#b7a519',
'#b27c62',
'#e0c8a6'
]
my_color_schema = {'asleep': '#783f04ff',
'device': '#70a1f5',
'active': '#f1c232ff',
'movement': '#e58829',
'restless': '#ffccf2',
'awake': '#00ff3f', ##7fffd4',
'no entry': '#ffffff',
'other': '#e8e8e8',
'project': '#05A9CA',
'coursework': '#CBA54F',
'Software Development': '#295ce5',
'Communication & Scheduling': '#c87d93',
'Utilities': '#66ff99',
'Reference & Learning': '#66ccff',
'Social Networking': '#22db35',
'Entertainment': '#e52962',
'Uncategorized': '#0b0a0a',
'Design & Composition': '#b734f2',
'Shopping': '#d4ea20',
'News & Opinion': '#f9dfc9',
'Business': '#4f9618',
}
my_color_categories = [key for (key, value) in sorted(my_color_schema.items())]
my_color_palette = [value for (key, value) in sorted(my_color_schema.items())]
def organize_categories_colors(d, category_column, colors, specified_category_list):
### Colors & Categories
category_list = list(d[category_column].unique())
## Which categories have not yet been assigned a color in the my_color_schema
unknown_category_list = list(set(category_list) - set(my_color_categories))
# Generate color pallete if no color list was provided
r = lambda: random.randint(0,255)
long_color_list = []
if colors == None:
for i in range(0, len(unknown_category_list)):
long_color_list.append('#%02X%02X%02X' % (r(),r(),r()))
color_list = long_color_list
else:
color_list = colors
# Zip colors
color_pairs_new = dict(zip(unknown_category_list, color_list))
# Add the category/color pairs already defined in my_color_schema dictionary
known_category_list = list(set(category_list) & set(my_color_categories))
modified_my_color_schema = {x: my_color_schema[x] for x in known_category_list}
# Combine new
color_pairs = {**color_pairs_new, **modified_my_color_schema}
# Focus only a subset of categories
if specified_category_list != None:
# Create a list where all but the specified entries are included
category_list_remaining = category_list.copy()
[category_list_remaining.remove(x) for x in specified_category_list]
# Convert all the not specified entries to the same color (make it easier to visually inspect for patterns)
color_pairs.update(dict.fromkeys(category_list_remaining, '#e8e8e8'))
# Ordered categories and colors
category_list_names_ordered = [key for (key, value) in sorted(color_pairs.items())]
color_palette = [value for (key, value) in sorted(color_pairs.items())]
return color_palette, category_list_names_ordered, color_pairs
def create_chart_xy_components(d, date_time_column, start_date, end_date, category_column):
d = d[(d[date_time_column] >= start_date) & (d[date_time_column] <= end_date)].copy()
### X & Y Axis
# x-axis time periods
d['Date Abr'] = d['date_time'].dt.date
# Add day of week
# d['day_of_week'] = d['date_time'].dt.dayofweek
# d['day_of_week'] = d['date_time'].dt.strftime('%a')
# d['date_week'] = d['Date Abr'].astype(str) +', ' + d['day_of_week'].astype(str)
d['date_week'] = d['date_time'].dt.strftime('%Y-%m-%d, %a')
# y-axis scaled for 24 hour period
d['time_from_day_start'] = (d[date_time_column] - d[date_time_column].dt.normalize()).dt.total_seconds().fillna(0)/(60*60)
return d
def daily_chart_24_hours(d, category_column, category_list_names_ordered, color_palette,
add_reference_lines, top_line, bottom_line,
legend_on, turn_xaxis_on,
new_yaxis_labels = False,
new_ref_line_text = False
):
plt.style.use('fivethirtyeight')
v_val = 0
h_val = 200
verts = list(zip([-h_val,h_val,h_val,-h_val],[-v_val,-v_val,v_val,v_val]))
fig, ax = plt.subplots()
for i in range(len(category_list_names_ordered)):
plt.scatter(d[d[category_column] == category_list_names_ordered[i]]['Date Abr'],
d[d[category_column] == category_list_names_ordered[i]]['time_from_day_start'],
s = 1800,
c = color_palette[i],
marker = (verts),
)
plt.yticks(np.arange(0, 25, step=6))
if new_yaxis_labels:
plt.yticks(np.arange(0, 25, step=6), new_yaxis_labels)
xstart = d['Date Abr'].min() - pd.DateOffset(days=1)
xend = d['Date Abr'].max() + pd.DateOffset(days=1)
plt.xlim(xstart, xend)
# Add labels with Day of the week at the end, ordered
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %-d, %a'))
# Remove the extra date at the front and end
locs, labels = plt.xticks()
date_label_list = list(d['date_time'].dt.strftime('%b %-d, %a').unique())
plt.xticks(np.arange(locs[0] - 1, locs[0] + len(date_label_list) + 1, step =1),
[""] + [""] + date_label_list,
rotation=90)
if turn_xaxis_on == False:
ax.tick_params(labelbottom=False)
if legend_on == True:
leg = plt.legend(category_list_names_ordered, bbox_to_anchor=(1.11,0.5),
loc="center", title = (r"$\bf{" + category_column + "}$"), fancybox=True)
for i in leg.legendHandles:
i.set_linewidth(7)
else:
plt.legend('')
plt.ylabel('Time of Day')
plt.gca().invert_yaxis()
plt.xlabel('Date')
plt.title(r"$\bf{" + 'Recorded' + "}$" + ' ' + r"$\bf{" + 'Daily' + "}$" + ' ' + r"$\bf{" + 'Minutes' + "}$" +
f"\nDate Range: {str(xstart.strftime('%Y-%m-%d'))} to {str(xend.strftime('%Y-%m-%d'))}")
# plt.title(f'Daily Activities \n Date Range: {str(xstart.strftime("%Y-%m-%d"))} to {str(xend.strftime("%Y-%m-%d"))}', fontweight = 'bold')
## Reference Lines
if add_reference_lines == True:
# Alternative titles
if new_ref_line_text:
top_line_text = new_ref_line_text[0]
bottom_line_text = new_ref_line_text[1]
else:
top_line_text = ' Start: {}'.format(top_line)
bottom_line_text = ' End: {}'.format(bottom_line)
plt.axhline(y=top_line, linewidth=2, color='black', linestyle = '--')
plt.text(x=xend, y=top_line, s=top_line_text, alpha=0.7, color='#334f8d')
plt.axhline(y=bottom_line, linewidth=2, color='black', linestyle = '--')
plt.text(x=xend, y=bottom_line, s=bottom_line_text, alpha=0.7, color='#334f8d')
plt.show()
def pivot_data_with_missing_days(data, specified_category_list, remove_no_entry_category,
values_column_name, values_hour_conversion, category_column):
# If you don't want the "no entry" in the list of categories, remove it
if remove_no_entry_category == 1:
## Using a set loses the order and switches the colors to no longer match original
# specified_category_entries = list(set(specified_category_list) - set(['no entry']))
specified_category_entries = specified_category_list.copy()
specified_category_entries.remove('no entry')
else:
specified_category_entries = specified_category_list
# List of all dates, ordered
date_list = list(data['Date Abr'].unique())
date_list.sort()
# Aggregate data
d2_alt = data[data[category_column].isin(specified_category_entries)].groupby([category_column, 'Date Abr'])[values_column_name].sum().reset_index()
d2_alt['hours'] = d2_alt[values_column_name]/(values_hour_conversion)
# total_time = round(d2_alt['hours'].sum(),2)
# Create a pivot table in order to create a stacked bar chart
pivot_d21 = d2_alt.pivot(index='Date Abr', columns=category_column, values='hours')
# Add any missing dates to the table
missing_dates = set(date_list) - set(list(pivot_d21.index))
pivot_d2 = pivot_d21.reindex(pivot_d21.index.tolist() + list(missing_dates)).sort_index()
return pivot_d2 #, total_time #, specified_category_entries
def stacked_bar_chart_categories(pivot_data, color_pairs, legend_on, ymax, ystep):
# List of dates
dates = pivot_data.reset_index()['Date Abr']
# date_list = list(pd.to_datetime(dates).dt.strftime('%Y-%m-%d, %a'))
date_list = list(pd.to_datetime(dates).dt.strftime('%b %-d, %a'))
# date_list.sort()
# Categories and the matching colors
specified_category_entries = list(pivot_data.T.index)
cat_color_pairs_alt = {item: color_pairs.get(item) for item in specified_category_entries}
colors_alt = list(cat_color_pairs_alt.values())
# Create x-axis names and tick positions
objects = date_list
pos = np.arange(len(date_list))
#### Plot ####
plt.style.use('fivethirtyeight')
pivot_data.plot.bar(stacked=True,
color = colors_alt,
figsize=(12,6),
edgecolor = 'black',
linewidth = 1)
plt.xticks(pos, objects )
# locs, labels = plt.xticks(pos, objects) # Get locations and labels
# plt.xticks(locs, [""] + list(d['date_time'].dt.strftime('%Y-%m-%d, %a').unique()) + [""], rotation=90)
plt.xticks(rotation=90)
plt.yticks(np.arange(0, ymax, step=ystep))
if legend_on == True:
plt.legend(specified_category_entries , bbox_to_anchor=(1.25,0.5), loc="center")
else:
plt.legend('')
plt.ylabel('Hours')
plt.xlabel('Week Start')
plt.title('Total Time Spent: ' + r"$\bf{" + str(round(pivot_data.sum().sum(),2)) + "}$" + ' Hours' )
plt.show()
def horizontal_bar_chart_totals(pivot_data, num_categories, color_pairs, category_column, ytick_labels_on):
color_df = pd.DataFrame.from_dict(color_pairs.items())
color_df.rename(columns = {0: category_column, 1: 'Color'}, inplace = True)
cat_totals_df = pivot_data.sum().reset_index()
cat_totals_df.rename(columns = {0: 'Total Time'}, inplace = True)
cat_totals_df['Total Time'] = round(cat_totals_df['Total Time'],2)
cat_totals_df = pd.merge(cat_totals_df, color_df, how = 'left', on = category_column)
cat_totals_df.sort_values('Total Time', ascending = False, inplace = True)
plt.style.use('fivethirtyeight')
cdata = cat_totals_df.head(num_categories)
ax = sns.barplot(x='Total Time', y=category_column, hue=category_column,
data=cdata, palette=list(cat_totals_df['Color']),
dodge=False, edgecolor=".2")
# ax.legend(bbox_to_anchor=(1.35,0.5), loc="center")
ax.legend_.remove()
# Add total sum values at the end of the bar
for i, v in enumerate(cdata['Total Time']):
ax.text(v, i, " " + str(v), color=list(cat_totals_df['Color'])[i], va='center', fontweight='bold')
plt.title('Total Time Spent: ' + r"$\bf{" + str(round(pivot_data.sum().sum(),2)) + "}$" + ' Hours' )
plt.xlabel('Hours')
if ytick_labels_on == False:
ax.tick_params(labelleft=False)
plt.show()
| 36.850325
| 152
| 0.62191
|
ae2625dc5870ccd54054a96c2211c2cb15d0df81
| 391
|
py
|
Python
|
src/easy/the-major-element/solutions/python/solution.py
|
rdtsc/codeeval-solutions
|
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
|
[
"MIT"
] | null | null | null |
src/easy/the-major-element/solutions/python/solution.py
|
rdtsc/codeeval-solutions
|
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
|
[
"MIT"
] | null | null | null |
src/easy/the-major-element/solutions/python/solution.py
|
rdtsc/codeeval-solutions
|
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import collections
for line in (line.rstrip() for line in sys.stdin):
frequency = collections.defaultdict(int)
for value in line.split(','):
frequency[value] += 1
majorTerm = 'None'
threshold = (line.count(',') + 1) // 2
for key, count in frequency.items():
if count > threshold:
majorTerm = key
break
print(majorTerm)
| 18.619048
| 50
| 0.647059
|
6296d8b900c7356fe0a6a8eb383288129a4a2b93
| 1,041
|
py
|
Python
|
src/util/file_provider.py
|
fressive/koe-server
|
5371fbaf66458b21f358bae7513c9c3ebb0c3da1
|
[
"MIT"
] | null | null | null |
src/util/file_provider.py
|
fressive/koe-server
|
5371fbaf66458b21f358bae7513c9c3ebb0c3da1
|
[
"MIT"
] | null | null | null |
src/util/file_provider.py
|
fressive/koe-server
|
5371fbaf66458b21f358bae7513c9c3ebb0c3da1
|
[
"MIT"
] | null | null | null |
from model.config import Config
import os
class FileProvider:
@staticmethod
def save(md5, data):
pass
@staticmethod
def get(md5):
pass
@staticmethod
def get_instance(provider):
if provider == "local_storage":
return LocalStorage()
elif provider == "gridfs_storage":
return GridFSStorage()
else:
return LocalStorage()
class LocalStorage(FileProvider):
@staticmethod
def write(md5, data):
save_path = Config.get("data_save_path")
if not os.path.exists(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, md5), 'wb') as file:
file.write(data)
@staticmethod
def read(md5):
save_path = Config.get("data_save_path")
with open(os.path.join(save_path, md5), 'rb') as file:
return file.read()
class GridFSStorage(FileProvider):
@staticmethod
def save(md5, data):
pass
@staticmethod
def get(md5):
pass
| 22.630435
| 62
| 0.600384
|
cfbfe96633dc1abc92ba66abe178a3c2965f9cbc
| 811
|
py
|
Python
|
CosmonautBlog/manage.py
|
WelcomeToTheRapture/CosmonautBlog
|
7936a9e7efb71d733f2f281f91913c05d099e198
|
[
"MIT"
] | null | null | null |
CosmonautBlog/manage.py
|
WelcomeToTheRapture/CosmonautBlog
|
7936a9e7efb71d733f2f281f91913c05d099e198
|
[
"MIT"
] | null | null | null |
CosmonautBlog/manage.py
|
WelcomeToTheRapture/CosmonautBlog
|
7936a9e7efb71d733f2f281f91913c05d099e198
|
[
"MIT"
] | 1
|
2018-02-14T03:41:22.000Z
|
2018-02-14T03:41:22.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CosmonautBlog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.26087
| 77
| 0.644883
|
95faf988d9ab488921c6764be4ea87352e154dff
| 3,264
|
py
|
Python
|
python/getting_started/aws_signing.py
|
ludokriss/getting-started
|
5a95297ad27e1d5328d30d59807568e3c3890591
|
[
"MIT"
] | null | null | null |
python/getting_started/aws_signing.py
|
ludokriss/getting-started
|
5a95297ad27e1d5328d30d59807568e3c3890591
|
[
"MIT"
] | null | null | null |
python/getting_started/aws_signing.py
|
ludokriss/getting-started
|
5a95297ad27e1d5328d30d59807568e3c3890591
|
[
"MIT"
] | null | null | null |
import datetime
import hashlib
import hmac
def now():
return datetime.datetime.utcnow()
class AwsSigningV4(object):
__ALGORITHM = "AWS4-HMAC-SHA256"
def __init__(
self,
aws_access_key_id,
aws_secret_access_key,
aws_host,
aws_region="eu-west-1",
aws_service="execute-api",
):
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
self.__aws_host = aws_host
self.__aws_region = aws_region
self.__aws_service = aws_service
def __sign(self, key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def __get_signature_key(self, key, date_stamp, region_name, service_name):
k_date = self.__sign(("AWS4" + key).encode("utf-8"), date_stamp)
k_region = self.__sign(k_date, region_name)
k_service = self.__sign(k_region, service_name)
k_signing = self.__sign(k_service, "aws4_request")
return k_signing
def create_headers(self, path, method, querystring="", data=None):
# Create a date for headers and the credential string
timestamp = now()
amz_date = timestamp.strftime("%Y%m%dT%H%M%SZ")
# Date w/o time, used in credential scope
date_stamp = timestamp.strftime("%Y%m%d")
canonical_uri = path
canonical_querystring = querystring
canonical_headers = "host:" + self.__aws_host + "\n"
canonical_headers += "x-amz-date:" + amz_date + "\n"
signed_headers = "host;x-amz-date"
payload_hash = hashlib.sha256((data or "").encode("utf-8")).hexdigest()
canonical_request = method + "\n"
canonical_request += canonical_uri + "\n"
canonical_request += canonical_querystring + "\n"
canonical_request += canonical_headers + "\n"
canonical_request += signed_headers + "\n"
canonical_request += payload_hash
credential_scope = date_stamp + "/"
credential_scope += self.__aws_region + "/"
credential_scope += self.__aws_service + "/" + "aws4_request"
string_to_sign = self.__ALGORITHM + "\n"
string_to_sign += amz_date + "\n"
string_to_sign += credential_scope + "\n"
string_to_sign += hashlib.sha256(canonical_request.encode("utf-8")).hexdigest()
signing_key = self.__get_signature_key(
key=self.__aws_secret_access_key,
date_stamp=date_stamp,
region_name=self.__aws_region,
service_name=self.__aws_service,
)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
authorization_header = self.__ALGORITHM + " "
credential_header = "Credential=" + self.__aws_access_key_id + "/"
credential_header += credential_scope
authorization_header += credential_header + ", "
authorization_header += "SignedHeaders=" + signed_headers + ", "
authorization_header += "Signature=" + signature
headers = {"x-amz-date": amz_date, "Authorization": authorization_header}
if data is not None:
headers["x-amz-content-sha256"] = payload_hash
return headers
| 37.517241
| 87
| 0.640319
|
7b624264f23ed022c902db0d42f57441ce2621bf
| 8,206
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class BgpLsCommunitiesList(Base):
"""Bgp Non VPN RR Communities
The BgpLsCommunitiesList class encapsulates a list of bgpLsCommunitiesList resources that are managed by the system.
A list of resources can be retrieved from the server using the BgpLsCommunitiesList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'bgpLsCommunitiesList'
_SDM_ATT_MAP = {
'AsNumber': 'asNumber',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'LastTwoOctets': 'lastTwoOctets',
'Name': 'name',
'Type': 'type',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(BgpLsCommunitiesList, self).__init__(parent, list_op)
@property
def AsNumber(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): AS #
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumber']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def LastTwoOctets(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Last Two Octets
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LastTwoOctets']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Type(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Type']))
def update(self, Name=None):
# type: (str) -> BgpLsCommunitiesList
"""Updates bgpLsCommunitiesList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> BgpLsCommunitiesList
"""Adds a new bgpLsCommunitiesList resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved bgpLsCommunitiesList resources using find and the newly added bgpLsCommunitiesList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> BgpLsCommunitiesList
"""Finds and retrieves bgpLsCommunitiesList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpLsCommunitiesList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpLsCommunitiesList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching bgpLsCommunitiesList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpLsCommunitiesList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpLsCommunitiesList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, AsNumber=None, LastTwoOctets=None, Type=None):
"""Base class infrastructure that gets a list of bgpLsCommunitiesList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AsNumber (str): optional regex of asNumber
- LastTwoOctets (str): optional regex of lastTwoOctets
- Type (str): optional regex of type
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 38.345794
| 190
| 0.659396
|
eacb3cae0b37cf147b73741fd472a6b338f66204
| 7,680
|
py
|
Python
|
examples/s2s_trans/criterions/s2t_loss.py
|
fengpeng-yue/speech-to-speech-translation
|
099aa326f29c51a882532952186e329a87d2c4d5
|
[
"MIT"
] | 2
|
2022-03-30T08:20:16.000Z
|
2022-03-30T08:25:48.000Z
|
examples/s2s_trans/criterions/s2t_loss.py
|
fengpeng-yue/speech-to-speech-translation
|
099aa326f29c51a882532952186e329a87d2c4d5
|
[
"MIT"
] | null | null | null |
examples/s2s_trans/criterions/s2t_loss.py
|
fengpeng-yue/speech-to-speech-translation
|
099aa326f29c51a882532952186e329a87d2c4d5
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
test_type: str = field(
default="asr",
metadata={"help": "test asr or st"},
)
sentence_avg: bool = II("optimization.sentence_avg")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion(
"s2t_loss", dataclass=LabelSmoothedCrossEntropyCriterionConfig
)
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size=0,
report_accuracy=False,
test_type = "asr",
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.task.args = task.args
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
self.test_type = test_type
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
src_tokens = sample["net_input"]["src_speech"]
src_lens = sample["net_input"]["src_speech_lens"]
if self.test_type == "asr":
prev_output_tokens = sample["net_input"]["prev_src_text_tokens"]
else:
prev_output_tokens = sample["net_input"]["prev_tgt_text_tokens"]
collated_audios = sample["net_input"]["collated_audios_orig"]
padding_mask = sample["net_input"]["padding_mask"]
net_output = model(src_tokens, src_lens,collated_audios,padding_mask,prev_output_tokens)
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
if self.test_type == "asr":
sample_size = (
sample["src_text"].size(0) if self.sentence_avg else sample["src_txt_ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["src_txt_ntokens"],
"nsentences": sample["src_text"].size(0),
"sample_size": sample_size,
}
else:
sample_size = (
sample["tgt_text"].size(0) if self.sentence_avg else sample["tgt_txt_ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["tgt_txt_ntokens"],
"nsentences": sample["tgt_text"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, self.test_type, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 38.592965
| 96
| 0.617708
|
5d5ae9fb1889ce013b5d71b5d377b8b7b97d7415
| 1,147
|
py
|
Python
|
model_configs/ch_alpha/strnconcat_resnet50.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | 22
|
2021-06-01T07:40:01.000Z
|
2022-03-14T07:09:01.000Z
|
model_configs/ch_alpha/strnconcat_resnet50.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | null | null | null |
model_configs/ch_alpha/strnconcat_resnet50.py
|
kiyoon/PyVideoAI
|
c4d3ba7a69723aeae7da48245989ae11cbdb1f8b
|
[
"MIT"
] | null | null | null |
import torch
from torch import optim
from ..models.epic.tsn import STRN, TRN
def load_model(num_classes, input_frame_length):
#class_counts = (num_classes,352)
class_counts = num_classes
segment_count = input_frame_length
base_model = 'resnet50rnconcat'
pretrained = 'imagenet'
# repo = 'epic-kitchens/action-models'
# model = torch.hub.load(repo, 'TRN', class_counts, segment_count, 'RGB',
# base_model = base_model,
# pretrained='epic-kitchens')
# Use TRN if you want to add an fc layer after the base model.
# Use STRN if you don't.
model = TRN(class_counts, segment_count, 'RGB',
base_model = base_model,
pretrained=pretrained)
return model
def load_pretrained(model):
return None
def get_optim_policies(model):
# return model.parameters() # no policies
return model.get_optim_policies()
dataloader_type = 'sparse_frames' # video_clip, sparse_video, sparse_frames
ddp_find_unused_parameters = True
# input configs
input_normalise = True
input_bgr = False
input_mean = [0.485, 0.456, 0.406]
input_std = [0.229, 0.224, 0.225]
| 25.488889
| 77
| 0.695728
|
957f4200738a6040e3c3d889da19865841b9db2c
| 4,056
|
py
|
Python
|
src/programy/clients/restful/flask/client.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
src/programy/clients/restful/flask/client.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
src/programy/clients/restful/flask/client.py
|
NeolithEra/program-y
|
8c2396611f30c8095e98ff02988223a641c1a3be
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from flask import Flask, jsonify, request, make_response, abort, Response
from programy.clients.restful.client import RestBotClient
class FlaskRestBotClient(RestBotClient):
def __init__(self, id, argument_parser=None):
RestBotClient.__init__(self, id, argument_parser)
self.initialise()
def server_abort(self, message, status_code):
abort(Response(message), status_code)
def create_response(self, response_data, status_code, version=1.0):
if self.configuration.client_configuration.debug is True:
self.dump_request(response_data)
if version == 1.0:
return make_response(jsonify(response_data, status_code))
elif version == 2.0:
return make_response(jsonify(response_data), status_code)
else:
return make_response('Invalid API version', 400)
def run(self, flask):
print("%s Client running on http://%s:%s" % (self.id, self.configuration.client_configuration.host,
self.configuration.client_configuration.port))
self.startup()
if self.configuration.client_configuration.debug is True:
print("%s Client running in debug mode" % self.id)
if self.configuration.client_configuration.ssl_cert_file is not None and \
self.configuration.client_configuration.ssl_key_file is not None:
context = (self.configuration.client_configuration.ssl_cert_file,
self.configuration.client_configuration.ssl_key_file)
print("%s Client running in https mode" % self.id)
flask.run(host=self.configuration.client_configuration.host,
port=self.configuration.client_configuration.port,
debug=self.configuration.client_configuration.debug,
ssl_context=context)
else:
print("%s Client running in http mode, careful now !" % self.id)
flask.run(host=self.configuration.client_configuration.host,
port=self.configuration.client_configuration.port,
debug=self.configuration.client_configuration.debug)
self.shutdown()
if __name__ == '__main__':
REST_CLIENT = None
print("Initiating Flask REST Service...")
APP = Flask(__name__)
@APP.route('/api/rest/v1.0/ask', methods=['GET', 'POST'])
def ask_v1_0():
response_data, status = REST_CLIENT.process_request(request, version=1.0)
return REST_CLIENT.create_response(response_data, status, version=1.0)
@APP.route('/api/rest/v2.0/ask', methods=['GET', 'POST'])
def ask_v2_0():
response_data, status = REST_CLIENT.process_request(request, version=2.0)
return REST_CLIENT.create_response(response_data, status, version=2.0)
print("Loading, please wait...")
REST_CLIENT = FlaskRestBotClient("flask")
REST_CLIENT.run(APP)
| 44.571429
| 120
| 0.698964
|
cc9a6f3175c34113ce09b3a73a7eed150e4fe5b7
| 10,455
|
py
|
Python
|
portal/portal/management/commands/rebuild_index.py
|
junjun315/PaddlePaddle.org
|
3feaa68376d8423e41d076814e901e6bf108c705
|
[
"Apache-2.0"
] | null | null | null |
portal/portal/management/commands/rebuild_index.py
|
junjun315/PaddlePaddle.org
|
3feaa68376d8423e41d076814e901e6bf108c705
|
[
"Apache-2.0"
] | null | null | null |
portal/portal/management/commands/rebuild_index.py
|
junjun315/PaddlePaddle.org
|
3feaa68376d8423e41d076814e901e6bf108c705
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import json
import math
from subprocess import check_output
import tempfile
import jieba
from bs4 import BeautifulSoup
from textblob import TextBlob as tb
from django.conf import settings
from django.core.management import BaseCommand
from .utils import sanitize_version
def get_section_for_api_title(title, depth=0):
"""
Traverses the tree upwards from the title node upto 3 levels in search for
a "section" classed 'div'. If it finds it, returns it.
"""
for parent in title.parents:
if parent and parent.has_attr('class') and 'section' in parent['class']:
return parent
else:
if depth == 2:
return None
return get_section_for_api_title(parent, depth+1)
return None
def jieba_zh_title(raw_title):
segments = jieba.cut_for_search(raw_title)
joined_segments = ''
for segment in segments:
if len(segment.strip()):
joined_segments += ' ' + segment
return joined_segments.strip()
def jieba_zh_content(token):
chinese_seg_list = [' '.join(jieba.cut_for_search(s)) for s in token]
return ', '.join(chinese_seg_list)
"""
Primarily done to reduce index size, may be reversed in future.
"""
def filter_insignificant_tokens(stripped_strings):
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# Reserving them for the API.
special_characters = ['=', '/', '_', '.']
filtered_string = ''
for stripped_string in stripped_strings:
filtered_string += ' '.join([token for token in stripped_string.split(' ') if token and not (
(token[0] in digits) or any(
special_character in token for special_character in special_characters
)
)])
return filtered_string
# The class must be named Command, and subclass BaseCommand
class Command(BaseCommand):
# Show this when the user types help
help = "Usage: python manage.py rebuild_index <language> <version> --content_id=<e.g. documentation>"
def get_docs_count(self):
return len(self.documents) + len(self.api_documents)
def add_arguments(self, parser):
parser.add_argument('language', nargs='+')
parser.add_argument('version', nargs='+')
parser.add_argument(
'--content_id', action='store', default=None, dest='content_id')
def build_api_document(self, source_dir, lang):
existing_docs_count = self.get_docs_count() + 1
for subdir, dirs, all_files in os.walk(source_dir):
for file in all_files:
subpath = os.path.join(subdir, file)
(name, extension) = os.path.splitext(file)
# We explicitly only want to look at HTML files which are not indexes.
if extension == '.html' and 'index_' not in file:
with open(os.path.join(settings.BASE_DIR, subpath)) as html_file:
soup = BeautifulSoup(html_file, 'lxml')
for api_call in soup.find_all(re.compile('^h(1|2|3)')):
parent_section = get_section_for_api_title(api_call)
title = next(api_call.stripped_strings)
content = parent_section.strings if parent_section else ''
if lang == 'zh':
content = jieba_zh_content(content) if content else content
elif content:
content = '. '.join(content)
try:
self.api_documents.append({
'id': existing_docs_count,
'path': '/' + subpath + (api_call.a['href'] if (api_call.a and api_call.a.has_attr('href')) else ''),
'title': str(title.encode('utf-8')),
'prefix': os.path.splitext(os.path.basename(name))[0] if '.' in name else '',
'content': content.encode('utf-8')
})
existing_docs_count += 1
except Exception as e:
print("Unable to parse the file at: %s" % subpath)
def build_document(self, source_dir, lang, version):
existing_docs_count = self.get_docs_count() + 1
apis_processed = False
for subdir, dirs, all_files in os.walk(source_dir):
for file in all_files:
subpath = os.path.join(subdir, file)
(name, extension) = os.path.splitext(file)
# HACK: After version 1.1, API docs are within "docs", unlike before.
# If we find this repo to contain an "api" directory under
# documentation/docs/<language>/<version>/, send it
# to be processed as a API folder.
subpath_pieces = subpath.split('/')
if len(subpath_pieces) > 5 and subpath_pieces[1] == 'docs' and (
subpath_pieces[4] in ['api', 'api_cn'] and not apis_processed):
# This means that anything before 1.2 should be treated as English
# because there was no Chinese API before that.
self.build_api_document(subdir, lang if version >= '1.2' else 'en')
apis_processed = True
if extension == '.html':
document = {
'id': existing_docs_count,
'path': '/' + subpath
}
if document['path'] in self.unique_paths:
continue
# And extract their document content so that we can TFIDF
# their contents.
with open(
os.path.join(settings.BASE_DIR, subpath)) as html_file:
soup = BeautifulSoup(html_file, 'lxml')
# Find the first header 1 or h2.
title = soup.find('h1')
if not title:
title = soup.find('h2')
if title:
raw_title = next(title.stripped_strings)
if lang == 'zh':
document['title'] = jieba_zh_title(raw_title)
document['displayTitle'] = raw_title
else:
document['title'] = raw_title
else:
# No point trying to store a non-titled file
# because it is probably a nav or index of sorts.
continue
# Segment the Chinese sentence through jieba library
# Temporarily jieba-ing even content.
# if lang == 'zh':
document['content'] = jieba_zh_content(
filter_insignificant_tokens(soup.stripped_strings)
)
# else:
# document['content'] = ', '.join(soup.stripped_strings)
self.documents.append(document)
existing_docs_count += 1
self.unique_paths.append(document['path'])
print 'Indexing "%s"...' % document['title'].encode('utf-8')
def handle(self, *args, **options):
self.documents = []
self.api_documents = []
self.unique_paths = []
contents_to_build = []
if options['content_id']:
contents_to_build.append(options['content_id'])
else:
for maybe_dir in os.listdir(settings.WORKSPACE_DIR):
if os.path.isdir(
os.path.join(settings.WORKSPACE_DIR, maybe_dir)):
contents_to_build.append(maybe_dir)
# First we need to go through all the generated HTML documents.
version = sanitize_version(options['version'][0])
for content_to_build in contents_to_build:
source_dir = os.path.join(
settings.WORKSPACE_DIR, content_to_build,
options['language'][0], version
)
if content_to_build == 'api' and version not in ['0.10.0', '0.11.0']:
self.build_api_document(source_dir, 'en')
else:
self.build_document(source_dir, options['language'][0], version)
# And create an index JS file that we can import.
output_index_dir = os.path.join(
settings.INDEXES_DIR, 'indexes',
options['language'][0], version
)
if not os.path.exists(output_index_dir):
os.makedirs(output_index_dir)
output_index_js = os.path.join(output_index_dir, 'index.js')
output_toc_js = os.path.join(output_index_dir, 'toc.js')
tmp_documents_file = tempfile.NamedTemporaryFile(delete=False)
tmp_documents_file.write(json.dumps(self.documents + self.api_documents))
tmp_documents_file.close()
with open(output_index_js, 'w') as index_file:
index_file.write('var index = ' + check_output(['node',
os.path.join(settings.PROJECT_ROOT, 'management/commands/build-index.js'), tmp_documents_file.name]))
with open(output_toc_js, 'w') as toc_file:
content_less_toc = {}
for doc in self.documents + self.api_documents:
if doc['path'] not in content_less_toc:
serialized_doc = {
'id': doc['id'],
'path': doc['path'],
'title': doc['displayTitle'] if 'displayTitle' in doc else doc['title']
}
if 'prefix' in doc:
serialized_doc['prefix'] = doc['prefix']
content_less_toc[doc['id']] = serialized_doc
toc_file.write('var indexPathMap = ' + json.dumps(content_less_toc))
os.remove(tmp_documents_file.name)
# Gzip the index generated.
# NOTE: Will make NGINX do this on the fly.
# check_output(['gzip', output_index_js])
# check_output(['gzip', output_toc_js])
| 38.4375
| 137
| 0.536872
|
bdbdb9a5bbe9b00ab67ec7d38d7d9fb41520517b
| 377
|
py
|
Python
|
easy/1342.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
easy/1342.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
easy/1342.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
class Solution:
def numberOfSteps (self, num: int) -> int:
count = 0
temp = num
while temp != 0:
print(temp)
if temp%2 == 0:
count += 1
temp = temp/2
else:
count += 1
temp -= 1
return count
s = Solution()
res = s.numberOfSteps(6)
print(res)
| 22.176471
| 46
| 0.419098
|
3c0321e1224173df2fbeebf26fa7197f377d90eb
| 122
|
py
|
Python
|
backend/app/admin.py
|
rattrayalex/django-graphene-react-funtimes
|
e0690d2965301a5100479dad329d509ece5dcac3
|
[
"MIT"
] | 3
|
2016-06-11T03:14:42.000Z
|
2018-03-16T11:08:56.000Z
|
backend/app/admin.py
|
rattrayalex/django-graphene-react-funtimes
|
e0690d2965301a5100479dad329d509ece5dcac3
|
[
"MIT"
] | null | null | null |
backend/app/admin.py
|
rattrayalex/django-graphene-react-funtimes
|
e0690d2965301a5100479dad329d509ece5dcac3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Company, Job
admin.site.register(Company)
admin.site.register(Job)
| 17.428571
| 32
| 0.803279
|
527fe792a38b1b19478a0a785e04808d36b15927
| 1,698
|
py
|
Python
|
scripts/motors.py
|
kandahyon/pimouse_ros
|
3234c406bef138969cf56a823586b240943dadb2
|
[
"BSD-3-Clause"
] | 1
|
2021-02-27T19:00:46.000Z
|
2021-02-27T19:00:46.000Z
|
scripts/motors.py
|
kandahyon/pimouse_ros
|
3234c406bef138969cf56a823586b240943dadb2
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/motors.py
|
kandahyon/pimouse_ros
|
3234c406bef138969cf56a823586b240943dadb2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
class Motor():
def __init__(self):
if not self.set_power(True): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write ti rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz,forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
| 27.387097
| 82
| 0.703769
|
178a3e875d035354d05f81f1fd168e888fa83bbb
| 5,786
|
py
|
Python
|
JSONWireProtocol/AppiumVDC-SauceLabs.py
|
phillram/saucelabs-simple-python
|
b7ff7cbffff9ce041d30f9bb173aaf5f77b1cc6f
|
[
"MIT"
] | 2
|
2019-04-30T23:28:49.000Z
|
2019-10-04T01:10:16.000Z
|
JSONWireProtocol/AppiumVDC-SauceLabs.py
|
phillram/saucelabs-simple-python
|
b7ff7cbffff9ce041d30f9bb173aaf5f77b1cc6f
|
[
"MIT"
] | 1
|
2019-06-18T02:50:26.000Z
|
2019-06-18T02:50:26.000Z
|
JSONWireProtocol/AppiumVDC-SauceLabs.py
|
phillram/saucelabs-simple-python
|
b7ff7cbffff9ce041d30f9bb173aaf5f77b1cc6f
|
[
"MIT"
] | 2
|
2019-04-30T22:45:41.000Z
|
2019-06-14T21:29:55.000Z
|
####################################################################
# Skeleton for Appium Virtual Tests on Sauce Labs
####################################################################
###################################################################
# Imports that are good to use
###################################################################
from appium import webdriver
from time import sleep
import os
import urllib3
import json
import random
import sys
androidTest = False
iosTest = False
useApp = False
###################################################################
# Selenium with Python doesn't like using HTTPS correctly
# and displays a warning that it uses Unverified HTTPS request
# The following disables that warning to clear the clutter
# But I should find a way to do the proper requests
###################################################################
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
###################################################################
# Pull a random Pokemon name to use as the test name
###################################################################
pokemon_names_url = urllib3.PoolManager().request('GET', 'https://raw.githubusercontent.com/sindresorhus/pokemon/master/data/en.json')
pokemon_names = json.loads(pokemon_names_url.data.decode('utf-8'))
random_pokemon = random.choice(pokemon_names)
###################################################################
# Choose if you want Android of iOS capabilities
# Uncomment one of those lines
###################################################################
# androidTest = True
# iosTest = True
###################################################################
# Select Data Center
# Set region to 'US' or 'EU'
# Test will default to 'US' if left blank or set to any other than 'US' or 'EU'
###################################################################
region = 'US'
###################################################################
# Uncomment if this is an app test
# Add in the location to the stored app too
###################################################################
# useApp = True
appLocation = 'sauce-storage:app.apk'
###################################################################
# Common parameters (desired capabilities)
###################################################################
projectParameters = {
'tags':['Case', 'NUM',],
# The following are not required
'name': random_pokemon,
# 'deviceOrientation' : 'portrait',
# 'appiumVersion': '1.16.0',
# 'autoAcceptAlerts':'true',
}
androidParameters = { # Define Android parameters here
'deviceName' : 'Android GoogleAPI Emulator',
'platformVersion' : '10.0',
'platformName' : 'Android',
}
iosParameters = { # Define iOS Parameters here
'deviceName' : 'iPhone X Simulator',
'platformVersion' : '13.0',
'platformName' : 'iOS',
# 'nativeWebTap': 'true',
# 'locationServicesEnabled':'true',
# 'locationServicesAuthorized':'true',
}
###################################################################
# Merge parameters into a single capability dictionary
###################################################################
sauceParameters = {}
sauceParameters.update(projectParameters)
sauceParameters.update({'build': '-'.join(projectParameters.get('tags'))}) # This concatenates the tags key above to add the build parameter
if androidTest != True and iosTest != True:
print('You need to specify a platform to test on!')
sys.exit()
elif androidTest == True and iosTest == True:
print('Don\'t be greedy! Only choose one platform!')
sys.exit()
elif androidTest:
sauceParameters.update(androidParameters)
if useApp:
sauceParameters['app'] = appLocation # Use app if it's specified
else:
sauceParameters['browserName'] = 'Chrome' # Otherwise use Chrome
#Note! Replace 'Chrome' with 'Browser' for older versions of Android to use the stock browser
elif iosTest:
sauceParameters.update(iosParameters)
if useApp:
sauceParameters['app'] = appLocation
else:
sauceParameters['browserName'] = 'safari'
###################################################################
# Connect to Sauce Labs
###################################################################
try:
region
except NameError:
region = 'US'
if region != 'EU':
print('You are using the US data center')
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
elif region == 'EU':
print ('You are using the EU data center')
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.eu-central-1.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
###################################################################
# Test logic goes here
###################################################################
# Navigating to a website
driver.get('https://www.google.com')
# Finding an element
interact = driver.find_element_by_name('q')
# Using the selected element
interact.send_keys('chupacabra')
interact.submit()
# interact.click()
# Saving an extra screenshot
# driver.save_screenshot('screenshot.png')
# Using Action chains
# ActionChains(driver).move_to_element(interact).perform()
# Sauce Labs specific executors
# driver.execute_script('sauce: break')
# driver.execute_script('sauce:context=Notes here')
# Setting the job status to passed
driver.execute_script('sauce:job-result=passed')
# Ending the test session
driver.quit()
| 36.389937
| 150
| 0.53664
|
e3291906efc42dc7baf459b6a7bf50a3c8833881
| 1,116
|
py
|
Python
|
DadosAbertosBrasil/_utils/get_data.py
|
GusFurtado/DadosAbertosBrasil
|
d8848f4c5c1107b3b67fd4ef73689541d06c3e28
|
[
"MIT"
] | 21
|
2020-06-08T22:58:33.000Z
|
2022-01-27T03:25:01.000Z
|
DadosAbertosBrasil/_utils/get_data.py
|
GusFurtado/DadosAbertosBrasil
|
d8848f4c5c1107b3b67fd4ef73689541d06c3e28
|
[
"MIT"
] | null | null | null |
DadosAbertosBrasil/_utils/get_data.py
|
GusFurtado/DadosAbertosBrasil
|
d8848f4c5c1107b3b67fd4ef73689541d06c3e28
|
[
"MIT"
] | 8
|
2021-02-19T19:28:01.000Z
|
2022-02-14T23:13:43.000Z
|
'''Função que captura os dados das APIs.
Adiciona o endpoint, path e parâmetros do request e retorna um arquivo JSON.
'''
from typing import Union
import requests
def get_data(
endpoint: str,
path: Union[str, list],
params: dict = None
) -> dict:
'''Coleta os dados requisitados das APIs REST.
Parâmetros
----------
endpoint : str
Diretório base da API desejada.
path : list ou str
Caminho de parâmetros para acessar a função desejada.
Pode ser uma string de parâmetros unidos por barras '/' ou pode ser
uma lista de strings na ordem correta. Os dois métodos produzem o
mesmo resultado.
params : dict (default=None)
Dicionário de parâmetros de busca que serão enviados para o request.
Retorna
-------
dict
Dados brutos coletados da API.
'''
if isinstance(path, list):
path = [str(p) for p in path]
path = '/'.join(path)
return requests.get(
url = endpoint + path,
headers = {'Accept':'application/json'},
params = params
).json()
| 24.26087
| 76
| 0.612007
|
eebad6c45b7bed279f8f306014d65dbcc832b79b
| 17,564
|
py
|
Python
|
tests/catalyst/callbacks/test_control_flow.py
|
sergunya17/catalyst
|
f98d71138c09cd1b5a69b788cb5006115f5c7fda
|
[
"Apache-2.0"
] | 2,693
|
2019-01-23T19:16:12.000Z
|
2022-03-31T02:12:42.000Z
|
tests/catalyst/callbacks/test_control_flow.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | 763
|
2019-01-22T20:12:56.000Z
|
2022-03-27T18:36:10.000Z
|
tests/catalyst/callbacks/test_control_flow.py
|
Ran485/catalyst
|
84bc7576c981278f389279d87dda85dd66a758b6
|
[
"Apache-2.0"
] | 445
|
2019-01-23T17:07:09.000Z
|
2022-03-30T05:38:45.000Z
|
# flake8: noqa
import random
import unittest
from unittest.mock import Mock
from catalyst.dl import Callback, CallbackOrder, ControlFlowCallback
class _Runner:
def __init__(self, stage, loader_key, global_epoch, epoch):
self.stage_key = stage
self.loader_key = loader_key
self.global_epoch_step = global_epoch
self.stage_epoch_step = epoch
class DummyCallback(Callback):
def __init__(self):
super().__init__(CallbackOrder.Internal)
class Dummy(Exception):
pass
def _raise(runner: "IRunner"):
raise Dummy()
class RaiserCallback(Callback):
def __init__(self, order, method_to_raise: str):
super().__init__(order)
setattr(self, method_to_raise, _raise)
def test_controll_flow_callback_filter_fn_periodical_epochs():
wraped = ControlFlowCallback(DummyCallback(), epochs=3)
mask = [i % 3 == 0 for i in range(1, 10 + 1)]
expected = {
"train": mask,
"valid": mask,
"another_loader": mask,
"like_valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_periodical_ignore_epochs():
wraped = ControlFlowCallback(DummyCallback(), ignore_epochs=4)
mask = [i % 4 != 0 for i in range(1, 10 + 1)]
expected = {
"train": mask,
"valid": mask,
"another_loader": mask,
"like_valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_epochs():
wraped = ControlFlowCallback(DummyCallback(), epochs=[3, 4, 6])
mask = [
False,
False,
True,
True,
False,
True,
False,
False,
False,
False,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_global_epochs():
wraped = ControlFlowCallback(DummyCallback(), epochs=[3, 4, 7, 10], use_global_epochs=True)
mask = [
False,
False,
True,
True,
False,
False,
True,
False,
False,
True,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for stage_num, stage in enumerate(["stage1", "stage2"]):
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(stage, loader, epoch + stage_num * 5, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_ignore_epochs():
wraped = ControlFlowCallback(DummyCallback(), ignore_epochs=[3, 4, 6, 8])
mask = [
True,
True,
False,
False,
True,
False,
True,
False,
True,
True,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 10 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_controll_flow_callback_filter_fn_global_ignore_epochs():
wraped = ControlFlowCallback(
DummyCallback(), ignore_epochs=[3, 4, 7, 10], use_global_epochs=True
)
mask = [
True,
True,
False,
False,
True,
True,
False,
True,
True,
False,
]
expected = {
"train": mask,
"valid": mask,
}
actual = {loader: [] for loader in expected.keys()}
for stage_num, stage in enumerate(["stage1", "stage2"]):
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner(stage, loader, epoch + stage_num * 5, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_loaders():
wraped = ControlFlowCallback(DummyCallback(), loaders=["valid"])
expected = {
"train": [False] * 5,
"valid": [True] * 5,
"another_loader": [False] * 5,
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_ignore_loaders():
wraped = ControlFlowCallback(DummyCallback(), ignore_loaders=["valid", "another_loader"])
expected = {
"train": [True] * 5,
"valid": [False] * 5,
"another_loader": [False] * 5,
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_multiple_epochs_loaders():
wraped = ControlFlowCallback(DummyCallback(), loaders={"valid": 3, "another_loader": [2, 4]})
expected = {
"train": [False] * 5,
"valid": [False, False, True, False, False],
"another_loader": [False, True, False, True, False],
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_multiple_epochs_ignore_loaders():
wraped = ControlFlowCallback(
DummyCallback(), ignore_loaders={"valid": 3, "another_loader": [2, 4]}
)
expected = {
"train": [True] * 5,
"valid": [True, True, False, True, True],
"another_loader": [True, False, True, False, True],
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_string_lambda():
wraped = ControlFlowCallback(
DummyCallback(), filter_fn="lambda stage, epoch, loader: 'valid' in loader"
)
expected = {
"train": [False] * 5,
"valid": [True] * 5,
"another_loader": [False] * 5,
"like_valid": [True] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
def test_control_flow_callback_filter_fn_lambda():
wraped = ControlFlowCallback(
DummyCallback(), filter_fn=lambda stage, epoch, loader: "valid" not in loader
)
expected = {
"train": [True] * 5,
"valid": [False] * 5,
"another_loader": [True] * 5,
"like_valid": [False] * 5,
}
actual = {loader: [] for loader in expected.keys()}
for epoch in range(1, 5 + 1):
for loader in expected.keys():
runner = _Runner("stage", loader, epoch, epoch)
wraped.on_loader_start(runner)
actual[loader].append(wraped._is_enabled)
assert actual == expected
class TestControlFlowCallback(unittest.TestCase):
def test_with_missing_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
for order in orders:
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback)
def test_epochs_with_wrong_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
order = random.choice(orders)
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback, epochs=None)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, epochs="123456")
def test_ignore_epochs_with_wrong_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
order = random.choice(orders)
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback, ignore_epochs=None)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, ignore_epochs="123456")
def test_loaders_with_wrong_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
order = random.choice(orders)
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback, loaders=1234.56)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, loaders=1234.56)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, loaders={"train": ["", "fjdskjfdk", "1234"]})
def test_ignore_loaders_with_wrong_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
order = random.choice(orders)
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback, ignore_loaders=1234.56)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, ignore_loaders=1234.56)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, ignore_loaders={"train": ["", "fjdskjfdk", "1234"]})
def test_ignore_foo_with_wrong_args(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
order = random.choice(orders)
callback = RaiserCallback(order, "on_epoch_start")
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn=12345)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn=lambda arg: True)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn=lambda *args: True)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn=lambda one, two, three, four: True)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn=lambda *args, **kwargs: True)
def test_filter_fn_with_wrong_args(self):
runner = Mock(stage="stage1", loader_key="train", epoch=1)
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
def _ignore_foo(stage: str, epoch: int, loader: str) -> bool:
return False
def _raise_foo(stage: str, epoch: int, loader: str) -> bool:
return True
for order in orders:
callback = RaiserCallback(order, "on_loader_start")
wrapper = ControlFlowCallback(callback, filter_fn=_ignore_foo)
wrapper.on_loader_start(runner)
callback = RaiserCallback(order, "on_loader_start")
wrapper = ControlFlowCallback(callback, filter_fn=_raise_foo)
with self.assertRaises(Dummy):
wrapper.on_loader_start(runner)
events = (
"on_loader_end",
"on_stage_start",
"on_stage_end",
"on_epoch_start",
"on_epoch_end",
"on_batch_start",
"on_batch_end",
"on_exception",
)
for event in events:
for order in orders:
callback = RaiserCallback(order, event)
wrapper = ControlFlowCallback(callback, filter_fn=_ignore_foo)
wrapper.on_loader_start(runner)
wrapper.__getattribute__(event)(runner)
callback = RaiserCallback(order, event)
wrapper = ControlFlowCallback(callback, filter_fn=_raise_foo)
wrapper.on_loader_start(runner)
with self.assertRaises(Dummy):
wrapper.__getattribute__(event)(runner)
def test_filter_fn_with_eval(self):
runner = Mock(stage="stage1", loader_key="train", epoch=1)
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
for order in orders:
callback = RaiserCallback(order, "on_loader_start")
wrapper = ControlFlowCallback(callback, filter_fn="lambda s, e, l: False")
wrapper.on_loader_start(runner)
callback = RaiserCallback(order, "on_loader_start")
wrapper = ControlFlowCallback(callback, filter_fn="lambda s, e, l: True")
with self.assertRaises(Dummy):
wrapper.on_loader_start(runner)
events = (
"on_loader_end",
"on_stage_start",
"on_stage_end",
"on_epoch_start",
"on_epoch_end",
"on_batch_start",
"on_batch_end",
"on_exception",
)
for event in events:
for order in orders:
callback = RaiserCallback(order, event)
wrapper = ControlFlowCallback(callback, filter_fn="lambda s, e, l: False")
wrapper.on_loader_start(runner)
wrapper.__getattribute__(event)(runner)
callback = RaiserCallback(order, event)
wrapper = ControlFlowCallback(callback, filter_fn="lambda s, e, l: True")
wrapper.on_loader_start(runner)
with self.assertRaises(Dummy):
wrapper.__getattribute__(event)(runner)
def test_filter_fn_with_err_in_eval(self):
orders = (
CallbackOrder.Internal,
CallbackOrder.Metric,
CallbackOrder.MetricAggregation,
CallbackOrder.Optimizer,
CallbackOrder.Scheduler,
CallbackOrder.External,
)
events = (
"on_loader_start",
"on_loader_end",
"on_stage_start",
"on_stage_end",
"on_epoch_start",
"on_epoch_end",
"on_batch_start",
"on_batch_end",
"on_exception",
)
for event in events:
for order in orders:
callback = RaiserCallback(order, event)
with self.assertRaises(ValueError):
ControlFlowCallback(callback, filter_fn="lambda s, e, l")
| 32.227523
| 97
| 0.59713
|
c94ca62d3a9fb49a50f8afdc718c75b9a5015405
| 1,605
|
py
|
Python
|
src/ssl/create_ssl.py
|
snikch/chia-blockchain
|
5f2000dbaf854deb7c0c7654d1ee6a84e06e233c
|
[
"Apache-2.0"
] | 1
|
2021-04-12T09:10:51.000Z
|
2021-04-12T09:10:51.000Z
|
src/ssl/create_ssl.py
|
snikch/chia-blockchain
|
5f2000dbaf854deb7c0c7654d1ee6a84e06e233c
|
[
"Apache-2.0"
] | 1
|
2022-03-25T19:11:21.000Z
|
2022-03-25T19:11:21.000Z
|
src/ssl/create_ssl.py
|
fakecoinbase/Chia-Networkslashchia-blockchain
|
84e6a4da18fb0a790a870cbd516f13c9bc7f0716
|
[
"Apache-2.0"
] | 1
|
2022-01-26T11:57:29.000Z
|
2022-01-26T11:57:29.000Z
|
from datetime import datetime, timedelta
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
def generate_selfsigned_cert(hostname="/CN=Chia Blockchain CA", key=None):
"""Generates self signed certificate for a hostname, and optional IP addresses."""
# Generate our key
if key is None:
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend(),
)
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, hostname)])
# path_len=0 means this cert can only sign itself, not other certs.
basic_contraints = x509.BasicConstraints(ca=True, path_length=0)
now = datetime.utcnow()
cert = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.public_key(key.public_key())
.serial_number(1000)
.not_valid_before(now)
.not_valid_after(now + timedelta(days=10 * 365))
.add_extension(basic_contraints, False)
.sign(key, hashes.SHA256(), default_backend())
)
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
return cert_pem.decode(), key_pem.decode()
| 37.325581
| 86
| 0.722741
|
d586bfd8e0f6f7f5703181913b97a8dc306e042a
| 783
|
py
|
Python
|
app/__init__.py
|
jonnygovish/pitch-hub
|
16fc71394cc0ff8edfebd760accb13a004507a8c
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
jonnygovish/pitch-hub
|
16fc71394cc0ff8edfebd760accb13a004507a8c
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
jonnygovish/pitch-hub
|
16fc71394cc0ff8edfebd760accb13a004507a8c
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
#creating app configuration
app.config.from_object(config_options[config_name])
#Initializing flask extensions
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix ='/auth')
return app
| 25.258065
| 60
| 0.805875
|
aa412877d3124f1b364bf9931a469c9b0985d902
| 8,515
|
py
|
Python
|
resources/user.py
|
coulbyl/e-vent-hub
|
b65db41142e9153e11d721abfca4bc688bb654a5
|
[
"MIT"
] | null | null | null |
resources/user.py
|
coulbyl/e-vent-hub
|
b65db41142e9153e11d721abfca4bc688bb654a5
|
[
"MIT"
] | 1
|
2021-05-17T12:33:59.000Z
|
2021-05-17T12:33:59.000Z
|
resources/user.py
|
coulbyl/e-vent-hub
|
b65db41142e9153e11d721abfca4bc688bb654a5
|
[
"MIT"
] | 1
|
2021-05-27T12:24:57.000Z
|
2021-05-27T12:24:57.000Z
|
import functools
from flask_restful import Resource, abort
from flask_jwt_extended import (
get_jwt, jwt_required, create_access_token, create_refresh_token,
get_jwt_identity
)
from models.user import UserModel
from models.event import EventModel
from models.token import TokenBlockList
from parsers.user import post_parser, put_parser, reset_parser, login_parser
from parsers.event import active_parser
from werkzeug.security import check_password_hash, safe_str_cmp, generate_password_hash
from datetime import datetime
from .admin import admin_required
from utils import remove_file_upload, saveFileUploaded, UPLOAD_FOLDER
# Message
from resources import (
ACCOUNT_DOES_NOT_EXIST, ACCOUNT_ALREADY_EXISTS, ACCOUNT_SUCCESSFULLY_CREATED,
ACCOUNT_SUCCESSFULLY_DELETED, ACCOUNT_SUCCESSFULLY_UPDATED, EVENT_DOES_NOT_EXIST,
EXTENTION_ERROR, INVALIDCREDENTIALS, SERVER_ERROR)
def client_required(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
claims = get_jwt_identity()
if not claims['client']:
abort(401, message="Client privilege required.")
return func(*args, **kwargs)
return wrapper
class UserRegister(Resource):
""" /user/register - Register a new user."""
@classmethod
def post(cls):
data = post_parser.parse_args(strict=True)
if UserModel.find_by_email(email=data.email):
abort(400, message=ACCOUNT_ALREADY_EXISTS)
user = UserModel(**data)
if data['photo']:
response = saveFileUploaded(data['photo'], 'client')
print(response)
if response is None:
abort(400, message=EXTENTION_ERROR)
user.photo = response
try:
user.save()
access_token = create_access_token(identity=user._uuid, fresh=True)
refresh_token = create_refresh_token(identity=user._uuid)
return {
'user': user.json(),
'token': {'access_token': access_token, 'refresh_token': refresh_token},
'message': ACCOUNT_SUCCESSFULLY_CREATED
}, 201
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
class UserFavouriteEvent(Resource):
""" /user/favourite-event/<int:user_id>/<int:event_id> - """
@classmethod
@jwt_required()
@client_required
def post(cls, user_id: int, event_id: int):
user = UserModel.find_by_id(_id=user_id)
if user:
event = EventModel.find_by_id(_id=event_id)
if event:
try:
user.add_favourite(event)
return {"message": "Événement ajouté à votre liste de favoris."}, 201
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
abort(404, message=EVENT_DOES_NOT_EXIST)
abort(404, message=ACCOUNT_DOES_NOT_EXIST)
@classmethod
@jwt_required()
@client_required
def delete(cls, user_id: int, event_id: int):
user = UserModel.find_by_id(_id=user_id)
if user:
event = EventModel.find_by_id(_id=event_id)
if event:
try:
user.remove_favourite(event)
return {"message": "Événement retiré à votre liste de favoris."}, 201
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
abort(404, message=EVENT_DOES_NOT_EXIST)
abort(404, message=ACCOUNT_DOES_NOT_EXIST)
class User(Resource):
@classmethod
@jwt_required()
def get(cls, _id: int):
""" /user/<id> - Get a user."""
user = UserModel.find_by_id(_id=_id)
if not user:
abort(404, message=ACCOUNT_DOES_NOT_EXIST)
return user.json()
@classmethod
@jwt_required()
@client_required
def put(cls, _id: int):
""" /user/<id> - Update a user."""
user_found = UserModel.find_by_id(_id=_id)
if user_found:
existing_photo = user_found.photo
data = put_parser.parse_args(strict=True)
user_found.firstname = data.firstname
user_found.lastname = data.lastname
user_found.email = data.email
user_found.contacts = data.contacts
if data['photo']:
response = saveFileUploaded(data['photo'], 'client')
if response is None:
abort(400, message=EXTENTION_ERROR)
user_found.photo = response
remove_file_upload(f"{UPLOAD_FOLDER}/client/{existing_photo}")
user_found.updated_at = datetime.utcnow()
try:
user_found.save()
return {'message': ACCOUNT_SUCCESSFULLY_UPDATED}
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
abort(400, message=ACCOUNT_DOES_NOT_EXIST)
@classmethod
@jwt_required()
@client_required
def delete(cls, _id: int):
""" /user/<id> - Delete a user."""
user = UserModel.find_by_id(_id=_id)
if user:
try:
user.deleted = True
user.save()
return {'message': ACCOUNT_SUCCESSFULLY_DELETED}
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
abort(400, message=ACCOUNT_DOES_NOT_EXIST)
class UserList(Resource):
""" /users - Get all users - (superuser)"""
@ classmethod
# @jwt_required() # admin claims
def get(cls):
return {'users': [user.json() for user in UserModel.find_all()]}
class UserPasswordReset(Resource):
""" /user/reset-password/<_id> - Reset user password"""
@classmethod
@jwt_required()
@client_required
def put(cls, _id: int):
user_found = UserModel.find_by_id(_id=_id)
if user_found:
data = reset_parser.parse_args(strict=True)
is_same = check_password_hash(user_found.password, data.old_password)
if is_same and safe_str_cmp(data.new_password, data.confirm_password):
user_found.password = generate_password_hash(data.new_password)
user_found.updated_at = datetime.utcnow()
user_found.save()
return {'message': 'Mot de passe réinitialisé avec succès.'}
abort(400, message="Un problème est survenu. Vérifiez votre mot de passe.")
abort(400, message=ACCOUNT_DOES_NOT_EXIST)
class UserLogin(Resource):
""" /user/login - Login a user """
@classmethod
def post(cls):
data: dict = login_parser.parse_args()
user = UserModel.find_by_email(email=data.email)
if user and check_password_hash(user.password, data.password):
access_token = create_access_token(identity=user._uuid, fresh=True)
refresh_token = create_refresh_token(identity=user._uuid)
return {
"user": user.json(),
"token": {"access_token": access_token, "refresh_token": refresh_token}
}
abort(401, message=INVALIDCREDENTIALS)
class Logout(Resource):
""" /logout - Logout a user """
@classmethod
@jwt_required()
def delete(cls):
jti = get_jwt()['jti']
current_token = TokenBlockList(jti=jti)
current_token.save()
return {"message": "JWT révoqué et déconnexion de l'utilisateur réussie !"}
class TokenRefresh(Resource):
""" /refresh - Refresh a token """
@classmethod
@jwt_required(refresh=True)
def get(cls):
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
return {'access_token': new_token}
class UserActivation(Resource):
""" /user/activation/<id> - Activate or deactivate public user"""""
@classmethod
@jwt_required()
@admin_required
def put(cls, _id: int):
args = active_parser.parse_args(strict=True)
user = UserModel.find_without_active(_id)
if user:
user.active = args.active
user.updated_at = datetime.utcnow()
try:
user.save()
return {'message': ACCOUNT_SUCCESSFULLY_UPDATED}
except Exception as e:
abort(500, message=SERVER_ERROR.format(type(e).__name__))
abort(400, message=ACCOUNT_DOES_NOT_EXIST)
| 36.234043
| 89
| 0.627481
|
90153c20cbd9c7ef7e7219cfcf25dbb8f4be9541
| 15,955
|
py
|
Python
|
python2.7/site-packages/twisted/web/woven/model.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
python2.7/site-packages/twisted/web/woven/model.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
python2.7/site-packages/twisted/web/woven/model.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
# -*- test-case-name: twisted.web.test.test_woven -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
__version__ = "$Revision: 1.53 $"[11:-2]
import types
import weakref
import warnings
from zope.interface import implements
from twisted.python import components, reflect
from twisted.internet import defer
from twisted.web.woven import interfaces
class _Nothing: pass
def adaptToIModel(m, parent=None, submodel=None):
adapted = interfaces.IModel(m, None)
if adapted is None:
adapted = Wrapper(m)
adapted.parent = parent
adapted.name = submodel
return adapted
class Model:
"""
A Model which keeps track of views which are looking at it in order
to notify them when the model changes.
"""
implements(interfaces.IModel)
def __init__(self, *args, **kwargs):
if len(args):
self.original = args[0]
else:
self.original = self
self.name = ''
self.parent = None
self.views = []
self.subviews = {}
self.submodels = {}
self._getter = kwargs.get('getter')
self._setter = kwargs.get('setter')
self.cachedFor = None
self.initialize(*args, **kwargs)
def __getstate__(self):
self.views = []
self.subviews = {}
self.submodels = {}
return self.__dict__
def invalidateCache(self):
"""Invalidate the cache for this object, so the next time
getData is called, it's getter method is called again.
"""
self.cachedFor = None
def initialize(self, *args, **kwargs):
"""
Hook for subclasses to initialize themselves without having to
mess with the __init__ chain.
"""
pass
def addView(self, view):
"""
Add a view for the model to keep track of.
"""
if view not in [ref() for ref in self.views]:
self.views.append(weakref.ref(view))
def addSubview(self, name, subview):
subviewList = self.subviews.get(name, [])
subviewList.append(weakref.ref(subview))
self.subviews[name] = subviewList
def removeView(self, view):
"""
Remove a view that the model no longer should keep track of.
"""
# AM: loop on a _copy_ of the list, since we're changing it!!!
for weakref in list(self.views):
ref = weakref()
if ref is view or ref is None:
self.views.remove(weakref)
def setGetter(self, getter):
self._getter = getter
def setSetter(self, setter):
self._setter = setter
def notify(self, changed=None):
"""
Notify all views that something was changed on me.
Passing a dictionary of {'attribute': 'new value'} in changed
will pass this dictionary to the view for increased performance.
If you don't want to do this, don't, and just use the traditional
MVC paradigm of querying the model for things you're interested
in.
"""
self.cachedFor = None
if changed is None: changed = {}
retVal = []
# AM: loop on a _copy_ of the list, since we're changing it!!!
for view in list(self.views):
ref = view()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
self.views.remove(view)
for key, value in self.subviews.items():
if value.wantsAllNotifications or changed.has_key(key):
for item in list(value):
ref = item()
if ref is not None:
retVal.append((ref, ref.modelChanged(changed)))
else:
value.remove(item)
return retVal
protected_names = ['initialize', 'addView', 'addSubview', 'removeView', 'notify', 'getSubmodel', 'setSubmodel', 'getData', 'setData']
allowed_names = []
def lookupSubmodel(self, request, submodelName):
"""
Look up a full submodel name. I will split on `/' and call
L{getSubmodel} on each element in the 'path'.
Override me if you don't want 'traversing'-style lookup, but
would rather like to look up a model based on the entire model
name specified.
If you override me to return Deferreds, make sure I look up
values in a cache (created by L{setSubmodel}) before doing a
regular Deferred lookup.
XXX: Move bits of this docstring to interfaces.py
"""
if not submodelName:
return None
# Special case: If the first character is /
# Start at the bottom of the model stack
currentModel = self
if submodelName[0] == '/':
while currentModel.parent is not None:
currentModel = currentModel.parent
submodelName = submodelName[1:]
submodelList = submodelName.split('/') #[:-1]
# print "submodelList", submodelList
for element in submodelList:
if element == '.' or element == '':
continue
elif element == '..':
currentModel = currentModel.parent
else:
currentModel = currentModel.getSubmodel(request, element)
if currentModel is None:
return None
return currentModel
def submodelCheck(self, request, name):
"""Check if a submodel name is allowed. Subclass me to implement a
name security policy.
"""
if self.allowed_names:
return (name in self.allowed_names)
else:
return (name and name[0] != '_' and name not in self.protected_names)
def submodelFactory(self, request, name):
warnings.warn("Warning: default Model lookup strategy is changing:"
"use either AttributeModel or MethodModel for now.",
DeprecationWarning)
if hasattr(self, name):
return getattr(self, name)
else:
return None
def getSubmodel(self, request, name):
"""
Get the submodel `name' of this model. If I ever return a
Deferred, then I ought to check for cached values (created by
L{setSubmodel}) before doing a regular Deferred lookup.
"""
if self.submodels.has_key(name):
return self.submodels[name]
if not self.submodelCheck(request, name):
return None
m = self.submodelFactory(request, name)
if m is None:
return None
sm = adaptToIModel(m, self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
"""
Set a submodel on this model. If getSubmodel or lookupSubmodel
ever return a Deferred, I ought to set this in a place that
lookupSubmodel/getSubmodel know about, so they can use it as a
cache.
"""
if self.submodelCheck(request, name):
if self.submodels.has_key(name):
del self.submodels[name]
setattr(self, name, value)
def dataWillChange(self):
pass
def getData(self, request):
if self.cachedFor != id(request) and self._getter is not None:
self.cachedFor = id(request)
self.dataWillChange()
self.orig = self.original = self._getter(request)
return self.original
def setData(self, request, data):
if self._setter is not None:
self.cachedFor = None
return self._setter(request, data)
else:
if hasattr(self, 'parent') and self.parent:
self.parent.setSubmodel(request, self.name, data)
self.orig = self.original = data
class MethodModel(Model):
"""Look up submodels with wmfactory_* methods.
"""
def submodelCheck(self, request, name):
"""Allow any submodel for which I have a submodel.
"""
return hasattr(self, "wmfactory_"+name)
def submodelFactory(self, request, name):
"""Call a wmfactory_name method on this model.
"""
meth = getattr(self, "wmfactory_"+name)
return meth(request)
def getSubmodel(self, request=None, name=None):
if name is None:
warnings.warn("Warning! getSubmodel should now take the request as the first argument")
name = request
request = None
cached = self.submodels.has_key(name)
sm = Model.getSubmodel(self, request, name)
if sm is not None:
if not cached:
sm.cachedFor = id(request)
sm._getter = getattr(self, "wmfactory_"+name)
return sm
class AttributeModel(Model):
"""Look up submodels as attributes with hosts.allow/deny-style security.
"""
def submodelFactory(self, request, name):
if hasattr(self, name):
return getattr(self, name)
else:
return None
#backwards compatibility
WModel = Model
class Wrapper(Model):
"""
I'm a generic wrapper to provide limited interaction with the
Woven models and submodels.
"""
parent = None
name = None
def __init__(self, orig):
Model.__init__(self)
self.orig = self.original = orig
def dataWillChange(self):
pass
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class ListModel(Wrapper):
"""
I wrap a Python list and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
try:
i = int(name)
except:
return None
if i > len(orig):
return None
sm = adaptToIModel(orig[i], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[int(name)] = value
def __len__(self):
return len(self.original)
def __getitem__(self, name):
return self.getSubmodel(None, str(name))
def __setitem__(self, name, value):
self.setSubmodel(None, str(name), value)
def __repr__(self):
myLongName = reflect.qual(self.__class__)
return "<%s instance at 0x%x: wrapped data: %s>" % (myLongName,
id(self), self.original)
class StringModel(ListModel):
""" I wrap a Python string and allow it to interact with the Woven models
and submodels. """
def setSubmodel(self, request=None, name=None, value=None):
raise ValueError("Strings are immutable.")
# pyPgSQL returns "PgResultSet" instances instead of lists, which look, act
# and breathe just like lists. pyPgSQL really shouldn't do this, but this works
try:
from pyPgSQL import PgSQL
components.registerAdapter(ListModel, PgSQL.PgResultSet, interfaces.IModel)
except:
pass
class DictionaryModel(Wrapper):
"""
I wrap a Python dictionary and allow it to interact with the Woven
models and submodels.
"""
def dataWillChange(self):
self.submodels = {}
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("getSubmodel must get a request argument now")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
orig = self.original
if name not in orig:
return None
sm = adaptToIModel(orig[name], self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
self.original[name] = value
class AttributeWrapper(Wrapper):
"""
I wrap an attribute named "name" of the given parent object.
"""
def __init__(self, parent, name):
self.original = None
parent = ObjectWrapper(parent)
Wrapper.__init__(self, parent.getSubmodel(None, name))
self.parent = parent
self.name = name
class ObjectWrapper(Wrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
sm = adaptToIModel(getattr(self.original, name), self, name)
self.submodels[name] = sm
return sm
def setSubmodel(self, request=None, name=None, value=None):
if value is None:
warnings.warn("Warning!")
value = name
name = request
request = None
setattr(self.original, name, value)
class UnsafeObjectWrapper(ObjectWrapper):
"""
I may wrap an object and allow it to interact with the Woven models
and submodels. By default, I am not registered for use with anything.
I am unsafe because I allow methods to be called. In fact, I am
dangerously unsafe. Be wary or I will kill your security model!
"""
def getSubmodel(self, request=None, name=None):
if name is None and type(request) is type(""):
warnings.warn("Warning!")
name = request
request = None
if self.submodels.has_key(name):
return self.submodels[name]
value = getattr(self.original, name)
if callable(value):
return value()
sm = adaptToIModel(value, self, name)
self.submodels = sm
return sm
class DeferredWrapper(Wrapper):
def setData(self, request=None, data=_Nothing):
if data is _Nothing:
warnings.warn("setData should be called with request as first arg")
data = request
request = None
if isinstance(data, defer.Deferred):
self.original = data
else:
views, subviews = self.views, self.subviews
new = adaptToIModel(data, self.parent, self.name)
self.__class__ = new.__class__
self.__dict__ = new.__dict__
self.views, self.subviews = views, subviews
class Link(AttributeModel):
def __init__(self, href, text):
AttributeModel.__init__(self)
self.href = href
self.text = text
try:
components.registerAdapter(StringModel, types.StringType, interfaces.IModel)
components.registerAdapter(ListModel, types.ListType, interfaces.IModel)
components.registerAdapter(ListModel, types.TupleType, interfaces.IModel)
components.registerAdapter(DictionaryModel, types.DictionaryType, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.Deferred, interfaces.IModel)
components.registerAdapter(DeferredWrapper, defer.DeferredList, interfaces.IModel)
except ValueError:
# The adapters were already registered
pass
| 32.694672
| 137
| 0.600752
|
9e27275a98face7d8fed9e80ae8333ef7012e31c
| 2,599
|
py
|
Python
|
selim/test.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
selim/test.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
selim/test.py
|
tilacyn/dsb2018_topcoders
|
e0f95ef70bc062d4dea321d2aa73231a9538cd63
|
[
"MIT"
] | null | null | null |
import cv2
from models.model_factory import make_model
from os.path import join as opjoin
import numpy as np
from matplotlib import pyplot as plt
def dice(y_true, y_pred):
intersection = np.sum(y_true * y_pred)
return 2 * intersection / (np.sum(y_true) + np.sum(y_pred))
def pad(image):
return np.pad(image, ((0, 0), (0, 2)), constant_values=1)
class Test:
def __init__(self, nn_models_dir, predict_threshold):
self.nn_models_dir = nn_models_dir
self.predict_threshold = predict_threshold
def calculate_metrics_for_model(self, model, batches):
dice_coefficients = []
for i in range(number_to_show):
image, image_parts = x[i]
mask, mask_parts = y[i]
pred_parts = model.predict(image_parts, batch_size=len(image_parts))
pred_parts = pred_parts.reshape(16, 16, 256, 256).swapaxes(1, 2).reshape(16 * 256, 16 * 256)
pred_parts = cv2.resize(pred_parts, (256, 256))
show(image, mask, pred_parts, self.predict_threshold)
def calculate_metrics_for_td(self, td, batches):
if td.metric_eval_timestamp != 'None':
return td
else:
model = make_model(td.model_name, (None, None, 3))
model.load_weights(opjoin(self.nn_models_dir, td.model_file_name))
td.add_metrics(self.calculate_metrics_for_model(model, batches))
return td
def visualize_for_train_data(self, td, batch, number_to_show=4):
model = make_model(td.model_name, (None, None, 3))
model.load_weights(opjoin(self.nn_models_dir, td.model_file_name))
x, y = batch
pred = model.predict(x, batch_size=16)
for i in range(number_to_show):
image, image_parts = x[i]
mask, mask_parts = y[i]
pred_parts = model.predict(image_parts, batch_size=len(image_parts))
pred_parts = pred_parts.reshape(16, 16, 256, 256).swapaxes(1, 2).reshape(16 * 256, 16 * 256)
pred_parts = cv2.resize(pred_parts, (256, 256))
show(image, mask, pred_parts, self.predict_threshold)
def show(x, y, pred, predict_threshold):
expected = np.reshape(y, (256, 256))
actual = np.reshape(pred, (256, 256))
x_to_show = x[:, :, 0]
x_to_show = x_to_show / x_to_show.max()
actual = actual > predict_threshold * actual.max()
to_show = np.array([pad(x_to_show), pad(actual), pad(expected)])
to_show = np.hstack(to_show.reshape(3, 256, 256))
plt.imshow(to_show)
plt.show()
print('\n===============')
print('===============\n')
| 37.128571
| 104
| 0.637168
|
5f7865b8119c8191c5722ee49d9a3542cfc3d9d4
| 2,579
|
py
|
Python
|
app/user/views.py
|
gDoe24/recipe-app-api
|
d3eb24534062b61370da367416aeec6c98eed1fa
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
gDoe24/recipe-app-api
|
d3eb24534062b61370da367416aeec6c98eed1fa
|
[
"MIT"
] | 7
|
2020-03-06T13:41:34.000Z
|
2022-02-13T05:23:39.000Z
|
app/user/views.py
|
gDoe24/recipe-app-api
|
d3eb24534062b61370da367416aeec6c98eed1fa
|
[
"MIT"
] | 1
|
2020-02-04T20:41:43.000Z
|
2020-02-04T20:41:43.000Z
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from rest_framework.views import APIView
from core.models import Recipe, Tag
from django.views.generic import TemplateView
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404, redirect
from user.serializers import UserSerializer, AuthTokenSerializer, LoginSerializer
from django.contrib.auth import logout as django_logout
import json
from django.http import Http404, HttpResponse
class CreateUserView(generics.CreateAPIView):
#Create a new user
serializer_class=UserSerializer
authentication_classes = []
permission_classes = []
class CreateTokenView(ObtainAuthToken):
#authenticate user
serializer_class= AuthTokenSerializer
renderer_classes= api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
#View to manage the authenticated user
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
#retrieve and return the authenticated user
return self.request.user
#Logout API
class LogoutView(APIView):
authentication_classes = (authentication.TokenAuthentication,)
def post(self, request):
return self.logout(request)
def logout(self, request):
try:
request.user.auth_token.delete()
except (AttributeError, ObjectDoesNotExist):
pass
logout(request)
return Response({"success": _("Successfully logged out.")},
status=status.HTTP_200_OK)
#Login API
class LoginAPI(generics.GenericAPIView):
serializer_class=UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user,
context = self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)
})
def home(request):
tag=Tag.objects.all()
recipe_list = Recipe.objects.all().order_by('-creation_date')
paginator = Paginator(recipe_list, 3)
page = request.GET.get('page')
recipe = paginator.page(1)
return render(request, 'user/home.html', {'recipes':recipe, 'tags':tag})
| 28.977528
| 81
| 0.799922
|
8f370cee3b13ad0839c802bab2b5698c1cf5557f
| 263
|
py
|
Python
|
weird_fish/main.py
|
synchronizedsynecdoche/weird-fish
|
56ba5f46f95c9940d23f607415b8964e115f5f21
|
[
"MIT"
] | null | null | null |
weird_fish/main.py
|
synchronizedsynecdoche/weird-fish
|
56ba5f46f95c9940d23f607415b8964e115f5f21
|
[
"MIT"
] | null | null | null |
weird_fish/main.py
|
synchronizedsynecdoche/weird-fish
|
56ba5f46f95c9940d23f607415b8964e115f5f21
|
[
"MIT"
] | null | null | null |
import requests
from Accounts import Accounts
class weird_fish:
def __init__(self, url: str ="https://mastodon.social"):
self.url = url
server = Accounts(self.url)
user = server.get_account(1)
print(user.intro())
weird_fish()
| 23.909091
| 60
| 0.653992
|
f0d45896b4903a2c8e913fd2fad7f2a113ec47d3
| 5,991
|
py
|
Python
|
src/shared/utils.py
|
LukeSavefrogs/ICTSM-Maximo-Automation
|
5e6b776882cab1d05e3760667a6764d129a33765
|
[
"Apache-2.0"
] | null | null | null |
src/shared/utils.py
|
LukeSavefrogs/ICTSM-Maximo-Automation
|
5e6b776882cab1d05e3760667a6764d129a33765
|
[
"Apache-2.0"
] | null | null | null |
src/shared/utils.py
|
LukeSavefrogs/ICTSM-Maximo-Automation
|
5e6b776882cab1d05e3760667a6764d129a33765
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import inspect
import json
import logging
from deprecated import deprecated
from shared.cache import Cache
logger = logging.getLogger(__name__)
class Credentials (Cache):
FILENAME_TEMPLATE = "{product}_credentials.yaml"
PRODUCT_NAME = ""
def __init__(self, product_name:str, max_login_fails:int = 2, **kwds) -> None:
"""Initialize the Credentials for the application
Args:
product_name (str): Name of the product. Used to build the filename
max_login_fails (int, optional): Max number of failures allowed for the credentials. Defaults to 2.
"""
self.PRODUCT_NAME = product_name
file_name = self.FILENAME_TEMPLATE.format(product=self.PRODUCT_NAME.lower())
self.max_login_fails = max_login_fails if isinstance(max_login_fails, int) else 2
super().__init__(file_name, **kwds)
logger.info(f"Credential storage initialization completed successfully for product '{self.PRODUCT_NAME}' with {self.max_login_fails} MAX login failures")
def getCredentials(self):
if not self.exists():
print(f"File di configurazione '{self.getCacheFilename()}' non trovato.\n")
self.setCredentials()
conf = self.getRawContent()
if not self.isValid(conf):
print(f"File di configurazione '{self.getCacheFilename()}' non valido.\n")
self.setCredentials()
conf = self.getRawContent()
print(f"File di configurazione '{self.getCacheFilename()}' caricato.\n")
return conf
def setCredentials(self):
USERNAME = self.__single_input_cred(f"Inserisci lo USERNAME di {self.PRODUCT_NAME.strip()}: ")
PASSWORD = self.__single_input_cred(f"Inserisci la PASSWORD di {self.PRODUCT_NAME.strip()}: ")
data = {
"USERNAME": USERNAME,
"PASSWORD": PASSWORD,
"FAILED_LOGINS": 0
}
self.setCache(data)
print(f"\nHo salvato le credenziali nel file '{self.getCacheFilename()}'")
def isValid(self, config: dict):
# print("Configurazione: " + str(config))
if not super().isValid(config):
return False
# Additional checks
for key in ["FAILED_LOGINS", "USERNAME", "PASSWORD"]:
if key not in config["data"]:
print(f"Chiave necessaria non trovata: {key}")
return False
if config["data"]["FAILED_LOGINS"] >= self.max_login_fails:
print("\n\n------------------------------------------------------------------------------------------")
print("PASSWORD SCADUTA".center(90))
print("Cambiare la password e reimmetterla in questo script".center(90))
print("------------------------------------------------------------------------------------------\n\n")
return False
return True
def addFailedLoginAttempt(self):
config = self.getRawContent()["data"]
config["FAILED_LOGINS"] += 1
self.setCache(config)
def clearFailedLoginAttempts(self):
config = self.getRawContent()["data"]
config["FAILED_LOGINS"] = 0
self.setCache(config)
# Hidden method
def __single_input_cred(self, text:str = ""):
"""Utility method. Used internally to execute checks on user credential input
Args:
text (str, optional): The label text to show to the user. Defaults to "".
Returns:
str: The value provided by the user
"""
while True:
try:
value = str(input(text))
except ValueError:
print("ERRORE - Valore non valido. Deve essere una stringa\n")
continue
if value.strip() == "":
print("ERRORE - Il valore non puo' essere lasciato vuoto\n")
continue
else:
break
return value
def getCorrectPath(filePath):
"""Returns the correct path (relative/absolute) wether is a frozen app or a script
Args:
filePath (str): The path to the resource you need
Returns:
str: Final resolved path
"""
# Se il percorso specificato è assoluto non fare nulla
if os.path.isabs(filePath):
return filePath
# Se è un'applicazione PyInstaller e il percorso è relativo
if hasattr(sys, "_MEIPASS"):
file = os.path.join(sys._MEIPASS, filePath)
# Se è uno script e il percorso è relativo
else:
# Scopro il percorso del file chiamante
frame = inspect.stack()[1]
caller_filename = frame[0].f_code.co_filename
# Prendo la cartella parent del file chiamante
caller_working_directory = os.path.dirname(os.path.realpath(caller_filename))
# Risolvo i percorsi relativi alla cartella in cui è presente lo script chiamante
file = os.path.abspath(os.path.join(caller_working_directory, filePath))
# print(f"Caller: {caller_filename}")
# print(f"Caller WD: {caller_working_directory}")
# print(f"Final path: {file}\n")
return file
# Description:
# Returns the path of where the script (or executable) is ACTUALLY located.
# It even works for frozen applications (like executables created with `pyinstaller`)
#
# I tried `os.path.dirname(os.path.realpath(__file__))` but it returned the correct result only when
# the script was NOT frozen.
# A different but still working approach would have been `os.path.dirname(os.path.realpath(getEntryPoint()))`,,
# in which getEntryPoint() checks if script is frozen.
#
# From:
# https://stackoverflow.com/a/4943474/8965861
#
def get_entry_point():
"""Returns the name of the script currently running.
It works both independent, launched from within a module or from a frozen script (with a
tool like pyinstaller)
Returns:
str: The absolute path of the script/executable
"""
return os.path.realpath(sys.argv[0])
def get_entry_point_dir():
"""Returns the directory of the script currently running.
It works both independent, launched from within a module or from a frozen script (with a
tool like pyinstaller)
Returns:
str: The absolute path of the directory the script/executable is placed in
"""
return os.path.dirname(get_entry_point())
@deprecated("This function should not be used. Use `get_entry_point()` or `get_entry_point_dir()` instead...")
def getEntryPoint():
is_executable = getattr(sys, 'frozen', False)
if is_executable:
# print("Program is an executable")
return sys.executable
# print("Program is a script")
return inspect.stack()[-1][1]
| 29.367647
| 155
| 0.703055
|
34dbbaa0581b39f11dbcf551a7be5cef9076ed13
| 13,665
|
py
|
Python
|
backend/edw/models/mixins/entity/add_date_terms_validation.py
|
MMotionMan/django-edw
|
0f686429d29e0f40409a3b2318664973b2844c08
|
[
"BSD-3-Clause"
] | 4
|
2019-09-18T05:51:12.000Z
|
2020-10-23T08:50:00.000Z
|
backend/edw/models/mixins/entity/add_date_terms_validation.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 10
|
2020-04-29T11:46:44.000Z
|
2022-03-11T23:38:27.000Z
|
backend/edw/models/mixins/entity/add_date_terms_validation.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 13
|
2020-04-09T07:49:48.000Z
|
2022-03-02T07:06:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import calendar
from django.db import transaction
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from edw.models.entity import EntityModel
from edw.models.term import TermModel
from edw.utils.dateutils import datetime_to_local
_default_system_flags_restriction = (TermModel.system_flags.delete_restriction |
TermModel.system_flags.change_parent_restriction |
TermModel.system_flags.change_slug_restriction |
TermModel.system_flags.change_semantic_rule_restriction |
TermModel.system_flags.has_child_restriction |
TermModel.system_flags.external_tagging_restriction)
class BaseAddedDateTermsValidationMixin(object):
"""
RUS: Миксин управления терминами даты.
"""
REQUIRED_FIELDS = ('created_at',)
@cached_property
def local_created_at(self):
return datetime_to_local(self.created_at)
def need_terms_validation_after_save(self, origin, **kwargs):
"""
RUS: Проставляет метки в термин Дата после сохранеия объекта.
"""
if origin is None or origin.local_created_at != self.local_created_at:
do_validate = kwargs["context"]["validate_added_date"] = True
else:
do_validate = False
return super(BaseAddedDateTermsValidationMixin, self).need_terms_validation_after_save(
origin, **kwargs) or do_validate
class AddedDayTermsValidationMixin(BaseAddedDateTermsValidationMixin):
ADDED_DAY_ROOT_TERM_SLUG = "added-day"
ADDED_DAY_KEY = 'added-day-{:02d}'
ADDED_DAY_RANGE_KEY = 'added-day-{0:02d}-{1:02d}'
@classmethod
def validate_term_model(cls):
"""
RUS: Добавляет день в модель терминов. Проверяет, есть ли день в модели TermModel,
и при его отсутствии создает диапазон дат и разбивку по дням в этих диапазонах.
"""
# валидируем только один раз
key = 'vldt:day_add'
need_validation = EntityModel._validate_term_model_cache.get(key, True)
if need_validation:
EntityModel._validate_term_model_cache[key] = False
system_flags = _default_system_flags_restriction
with transaction.atomic():
try: # added day
added_day = TermModel.objects.get(slug=cls.ADDED_DAY_ROOT_TERM_SLUG, parent=None)
except TermModel.DoesNotExist:
added_day = TermModel(
slug=cls.ADDED_DAY_ROOT_TERM_SLUG,
parent=None,
name=_('Added day'),
semantic_rule=TermModel.OR_RULE,
system_flags=system_flags
)
added_day.save()
day_ranges = ((1, 11), (11, 21), (21, 32))
for r in day_ranges: # added day range
day_range_key = cls.ADDED_DAY_RANGE_KEY.format(r[0], r[1] - 1)
with transaction.atomic():
try:
added_day_range = TermModel.objects.get(slug=day_range_key, parent=added_day)
except TermModel.DoesNotExist:
added_day_range = TermModel(
slug=day_range_key,
parent_id=added_day.id,
name="{} - {}".format(r[0], r[1] - 1),
semantic_rule=TermModel.OR_RULE,
system_flags=system_flags
)
added_day_range.save()
for i in range(r[0], r[1]): # added day
day_key = cls.ADDED_DAY_KEY.format(i)
with transaction.atomic():
try:
TermModel.objects.get(slug=day_key)
except TermModel.DoesNotExist:
day = TermModel(
slug=day_key,
parent_id=added_day_range.id,
name="{:02d}".format(i),
semantic_rule=TermModel.OR_RULE,
system_flags=system_flags
)
day.save()
super(AddedDayTermsValidationMixin, cls).validate_term_model()
def validate_terms(self, origin, **kwargs):
"""
RUS: Проставляет по данным объектам соответствующий термин День создания.
"""
context = kwargs["context"]
force_validate_terms = context.get("force_validate_terms", False)
if force_validate_terms or context.get("validate_added_date", False):
added_days = self.get_added_days()
if force_validate_terms:
self.terms.remove(*[x.id for x in added_days.values()])
elif origin is not None:
term = added_days[self.ADDED_DAY_KEY.format(origin.local_created_at.day)]
self.terms.remove(term)
term = added_days[self.ADDED_DAY_KEY.format(self.local_created_at.day)]
self.terms.add(term)
super(AddedDayTermsValidationMixin, self).validate_terms(origin, **kwargs)
@staticmethod
def get_added_days():
"""
RUS: Возвращает дни. Если дни отсутствуют в TermModel, то возвращает дни из EntityModel.
"""
added_days = getattr(EntityModel, "_added_days_cache", None)
if added_days is None:
added_days = {}
try:
root = TermModel.objects.get(slug=AddedDayTermsValidationMixin.ADDED_DAY_ROOT_TERM_SLUG, parent=None)
for term in root.get_descendants(include_self=True):
added_days[term.slug] = term
except TermModel.DoesNotExist:
pass
EntityModel._added_days_cache = added_days
return added_days
class AddedMonthTermsValidationMixin(BaseAddedDateTermsValidationMixin):
ADDED_MONTH_ROOT_TERM_SLUG = "added-month"
ADDED_MONTH_KEY = 'added-month-{:02d}'
@classmethod
def validate_term_model(cls):
"""
RUS: Добавляет месяц в модель терминов TermModel. Проверяет, есть ли месяц в модели TermModel,
и при его отсутствии создает диапазон месяцев (1-12) и разбивку по месяцам в этих диапазонах.
"""
# Устанавливаем таймаут для валидации
key = 'vldt:mnth_add'
need_validation = EntityModel._validate_term_model_cache.get(key, True)
if need_validation:
EntityModel._validate_term_model_cache[key] = False
system_flags = _default_system_flags_restriction
with transaction.atomic():
try: # added month
added_month = TermModel.objects.get(slug=cls.ADDED_MONTH_ROOT_TERM_SLUG, parent=None)
except TermModel.DoesNotExist:
added_month = TermModel(
slug=cls.ADDED_MONTH_ROOT_TERM_SLUG,
parent=None,
name=_('Added month'),
semantic_rule=TermModel.OR_RULE,
system_flags=system_flags)
added_month.save()
for i in range(1, 13):
month_key = cls.ADDED_MONTH_KEY.format(i)
with transaction.atomic():
try:
TermModel.objects.get(slug=month_key, parent=added_month)
except TermModel.DoesNotExist:
month = TermModel(slug=month_key,
parent_id=added_month.id,
name=_(calendar.month_name[i]),
semantic_rule=TermModel.OR_RULE,
system_flags=system_flags)
month.save()
super(AddedMonthTermsValidationMixin, cls).validate_term_model()
def validate_terms(self, origin, **kwargs):
"""
RUS: Проставляет по данным объектам соответствующий термин Месяц создания.
"""
context = kwargs["context"]
force_validate_terms = context.get("force_validate_terms", False)
if force_validate_terms or context.get("validate_added_date", False):
added_months = self.get_added_months()
if force_validate_terms:
self.terms.remove(*[x.id for x in added_months.values()])
elif origin is not None:
term = added_months[self.ADDED_MONTH_KEY.format(origin.local_created_at.month)]
self.terms.remove(term)
term = added_months[self.ADDED_MONTH_KEY.format(self.local_created_at.month)]
self.terms.add(term)
super(AddedMonthTermsValidationMixin, self).validate_terms(origin, **kwargs)
@staticmethod
def get_added_months():
"""
RUS: добавляет месяцы в модель EntityModel, если они отсутствуют.
"""
added_months = getattr(EntityModel, "_added_months_cache", None)
if added_months is None:
added_months = {}
try:
root = TermModel.objects.get(slug=AddedMonthTermsValidationMixin.ADDED_MONTH_ROOT_TERM_SLUG, parent=None)
for term in root.get_descendants(include_self=True):
added_months[term.slug] = term
except TermModel.DoesNotExist:
pass
else:
EntityModel._added_months_cache = added_months
return added_months
class AddedYearTermsValidationMixin(BaseAddedDateTermsValidationMixin):
ADDED_YEAR_ROOT_TERM_SLUG = "added-year"
ADDED_YEAR_KEY = 'added-year-{}'
@classmethod
def validate_term_model(cls):
"""
RUS: Добавляет год в модель терминов TermModel.
"""
# Устанавливаем таймаут для валидации
key = 'vldt:year_add'
need_validation = EntityModel._validate_term_model_cache.get(key, True)
if need_validation:
EntityModel._validate_term_model_cache[key] = False
system_flags = _default_system_flags_restriction
with transaction.atomic():
try: # added year
TermModel.objects.get(slug=cls.ADDED_YEAR_ROOT_TERM_SLUG)
except TermModel.DoesNotExist:
added_year = TermModel(
slug=cls.ADDED_YEAR_ROOT_TERM_SLUG,
parent=None,
name=_('Added year'),
semantic_rule=TermModel.XOR_RULE,
system_flags=system_flags)
added_year.save()
super(AddedYearTermsValidationMixin, cls).validate_term_model()
def validate_terms(self, origin, **kwargs):
"""
RUS: Проставляет по данным объектам соответствующий термин Год создания.
"""
context = kwargs["context"]
force_validate_terms = context.get("force_validate_terms", False)
if force_validate_terms or context.get("validate_added_date", False):
added_year = self.local_created_at.year
added_years = self.get_added_years(added_year)
if force_validate_terms:
self.terms.remove(*[x.id for x in added_years.values()])
elif origin is not None:
term = added_years.get(self.ADDED_YEAR_KEY.format(origin.local_created_at.year), None)
if term is not None:
self.terms.remove(term)
term = added_years[self.ADDED_YEAR_KEY.format(added_year)]
self.terms.add(term)
super(AddedYearTermsValidationMixin, self).validate_terms(origin, **kwargs)
@staticmethod
def get_added_years(year):
"""
RUS: Возвращает термин Год, если его нет в TermModel, то из списка наследников.
Если синонима нет в годах, снимаем ограничения из системных флагов
и если не находим в модели TermModel, то создаем его.
"""
year_key = EntityModel.ADDED_YEAR_KEY.format(year)
added_years = getattr(EntityModel, "_added_years_cache", {})
if year_key not in added_years:
try:
root = TermModel.objects.get(slug=AddedYearTermsValidationMixin.ADDED_YEAR_ROOT_TERM_SLUG, parent=None)
except TermModel.DoesNotExist:
pass
else:
if not added_years:
for term in root.get_descendants(include_self=False):
added_years[term.slug] = term
EntityModel._added_years_cache = added_years
if year_key not in added_years:
system_flags = _default_system_flags_restriction
(term, is_create) = TermModel.objects.get_or_create(
slug=year_key,
parent_id=root.id,
defaults={
'name': "{}".format(year),
'semantic_rule': TermModel.OR_RULE,
'system_flags': system_flags
}
)
added_years[year_key] = term
return added_years
class AddedDateTermsValidationMixin(AddedYearTermsValidationMixin, AddedMonthTermsValidationMixin,
AddedDayTermsValidationMixin):
pass
| 44.366883
| 121
| 0.586096
|
037cb54aac999a27c21c13f841feb80028eba68f
| 1,366
|
py
|
Python
|
ote_sdk/ote_sdk/utils/labels_utils.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/utils/labels_utils.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/utils/labels_utils.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:13:51.000Z
|
2020-12-13T22:13:51.000Z
|
"""
This module implements utilities for labels
"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import List, Optional
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.label_schema import LabelSchemaEntity
from ote_sdk.entities.scored_label import ScoredLabel
def get_empty_label(label_schema: LabelSchemaEntity) -> Optional[LabelEntity]:
"""
Get first empty label from label_schema
"""
empty_candidates = list(
set(label_schema.get_labels(include_empty=True))
- set(label_schema.get_labels(include_empty=False))
)
if empty_candidates:
return empty_candidates[0]
return None
def get_leaf_labels(label_schema: LabelSchemaEntity) -> List[LabelEntity]:
"""
Get leafs from label tree
"""
leaf_labels = []
all_labels = label_schema.get_labels(False)
for lbl in all_labels:
if not label_schema.get_children(lbl):
leaf_labels.append(lbl)
return leaf_labels
def get_ancestors_by_prediction(
label_schema: LabelSchemaEntity, prediction: ScoredLabel
) -> List[ScoredLabel]:
"""
Get all the ancestors for a given label node
"""
ancestor_labels = label_schema.get_ancestors(prediction.get_label())
return [ScoredLabel(al, prediction.probability) for al in ancestor_labels]
| 27.32
| 78
| 0.7306
|
aa128f184c0616907b9128b6a5fe85fb6b3d9ed1
| 64
|
py
|
Python
|
1(Multiples of 3 of 5)/multiples.py
|
thatdeep/project_euler
|
acce3a2f9814c80e330dc63ef60cf51da9e236c4
|
[
"MIT"
] | null | null | null |
1(Multiples of 3 of 5)/multiples.py
|
thatdeep/project_euler
|
acce3a2f9814c80e330dc63ef60cf51da9e236c4
|
[
"MIT"
] | null | null | null |
1(Multiples of 3 of 5)/multiples.py
|
thatdeep/project_euler
|
acce3a2f9814c80e330dc63ef60cf51da9e236c4
|
[
"MIT"
] | null | null | null |
print(len(set(range(3, 1001, 3)).union(set(range(5, 1001, 5)))))
| 64
| 64
| 0.640625
|
5ba7558cb7b54cdb49000ea90db79e54ac7800f7
| 18,505
|
py
|
Python
|
gitmanager.py
|
gruberan/SmokeDetector
|
30bd05a942e384b082dccd2a20b3c18b1e8b0cd5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
gitmanager.py
|
gruberan/SmokeDetector
|
30bd05a942e384b082dccd2a20b3c18b1e8b0cd5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
gitmanager.py
|
gruberan/SmokeDetector
|
30bd05a942e384b082dccd2a20b3c18b1e8b0cd5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# coding=utf-8
import sys
import platform
import time
import json
from datetime import datetime
from threading import Lock
import regex
import requests
from requests.auth import HTTPBasicAuth
from urllib.parse import quote_plus
from urllib.parse import quote
from globalvars import GlobalVars
if GlobalVars.on_windows:
# noinspection PyPep8Naming
from classes._Git_Windows import git, GitError
else:
from sh.contrib import git
from sh import ErrorReturnCode as GitError
from helpers import log, log_exception, only_blacklists_changed
from blacklists import *
class GitHubManager:
auth = HTTPBasicAuth(GlobalVars.github_username, GlobalVars.github_password)
repo = GlobalVars.bot_repo_slug
@classmethod
def create_pull_request(cls, payload):
"""
Creates a pull request on GitHub, returns the json'd response
"""
if isinstance(payload, dict):
payload = json.dumps(payload)
response = requests.post("https://api.github.com/repos/{}/pulls".format(cls.repo),
auth=cls.auth, data=payload)
return response.json()
@classmethod
def comment_on_thread(cls, thread_id, body):
url = "https://api.github.com/repos/{}/issues/{}/comments".format(cls.repo, thread_id)
payload = json.dumps({'body': body})
response = requests.post(url, auth=cls.auth, data=payload)
return response.json()
# noinspection PyRedundantParentheses,PyClassHasNoInit,PyBroadException
class GitManager:
gitmanager_lock = Lock()
@staticmethod
def get_origin_or_auth():
git_url = git.config("--get", "remote.origin.url").strip()
if git_url[0:19] == "https://github.com/" and GlobalVars.github_username and GlobalVars.github_password:
preformat_url = ('https://{}:{}@github.com/' + git_url[19:])
return preformat_url.format(quote(GlobalVars.github_username), quote(GlobalVars.github_password))
else:
return "origin"
@classmethod
def add_to_blacklist(cls, blacklist='', item_to_blacklist='', username='', chat_profile_link='',
code_permissions=False, metasmoke_down=False):
if blacklist == "":
return (False, 'GitManager: blacklist is not defined. Blame a developer.')
if item_to_blacklist == "":
return (False, 'GitManager: item_to_blacklist is not defined. Blame a developer.')
# item_to_blacklist = item_to_blacklist.replace("\\s", " ")
if blacklist == "website":
blacklist_type = Blacklist.WEBSITES
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "keyword":
blacklist_type = Blacklist.KEYWORDS
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "username":
blacklist_type = Blacklist.USERNAMES
ms_search_option = "&username_is_regex=1&username="
elif blacklist == "number":
blacklist_type = Blacklist.NUMBERS
ms_search_option = "&body="
elif blacklist == "watch_keyword":
blacklist_type = Blacklist.WATCHED_KEYWORDS
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "watch_number":
blacklist_type = Blacklist.WATCHED_NUMBERS
ms_search_option = "&body="
else:
return (False, 'GitManager: blacklist is not recognized. Blame a developer.')
blacklister = Blacklist(blacklist_type)
blacklist_file_name = blacklist_type[0]
try:
cls.gitmanager_lock.acquire()
status, message = cls.prepare_git_for_operation(blacklist_file_name)
if not status:
return (False, message)
now = str(int(time.time()))
if blacklist_type in {Blacklist.WATCHED_KEYWORDS, Blacklist.WATCHED_NUMBERS}:
op = 'watch'
item = item_to_blacklist
item_to_blacklist = "\t".join([now, username, item])
else:
op = 'blacklist'
item = item_to_blacklist
exists, line = blacklister.exists(item_to_blacklist)
if exists:
return (False, 'Already {}ed on line {} of {}'.format(op, line, blacklist_file_name))
watch_removed = False
if blacklist_type not in {Blacklist.WATCHED_KEYWORDS, Blacklist.WATCHED_NUMBERS}:
for watcher_type in {Blacklist.WATCHED_KEYWORDS, Blacklist.WATCHED_NUMBERS}:
watcher = Blacklist(watcher_type)
if watcher.exists(item_to_blacklist):
watch_removed = True
watcher.remove(item_to_blacklist)
blacklister.add(item_to_blacklist)
branch = "auto-blacklist-{0}".format(now)
git.checkout("-b", branch)
git.reset("HEAD")
git.add(blacklist_file_name)
if watch_removed:
git.add('watched_keywords.txt', 'watched_numbers.txt')
git("-c", "user.name=" + GlobalVars.git_name,
"-c", "user.email=" + GlobalVars.git_email,
"commit",
"--author={} <{}>".format(GlobalVars.git_name, GlobalVars.git_email),
"-m", "Auto {0} of `{1}` by {2}".format(op, item, username))
origin_or_auth = cls.get_origin_or_auth()
if code_permissions:
git.checkout("master")
git.merge(branch)
git.push(origin_or_auth, "master")
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
else:
git.push(origin_or_auth, branch)
git.checkout("master")
if GlobalVars.github_username is None or GlobalVars.github_password is None:
return (False, "Tell someone to set a GH password")
payload = {"title": "{0}: {1} {2}".format(username, op.title(), item),
"body": "[{0}]({1}) requests the {2} of the {3} `{4}`. See the MS search [here]"
"(https://metasmoke.erwaysoftware.com/search?utf8=%E2%9C%93{5}{6}) and the "
"Stack Exchange search [here](https://stackexchange.com/search?q=%22{7}%22).\n"
"<!-- METASMOKE-BLACKLIST-{8} {4} -->".format(
username, chat_profile_link, op, blacklist, # 0 1 2 3
item, ms_search_option, # 4 5
quote_plus(item), # 6
quote_plus(item.replace("\\W", " ").replace("\\.", ".")), # 7
blacklist.upper()), # 8
"head": branch,
"base": "master"}
response = GitHubManager.create_pull_request(payload)
log('debug', response)
try:
git.checkout("deploy") # Return to deploy, pending the accept of the PR in Master.
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
url, pr_num = response["html_url"], response["number"]
if metasmoke_down:
return (True,
"MS is not reachable, so I can't see if you have code privileges, but I've "
"[created PR#{1} for you]({0}).".format(
url, pr_num))
else:
return (True,
"You don't have code privileges, but I've [created PR#{1} for you]({0}).".format(
url, pr_num))
except KeyError:
git.checkout("deploy") # Return to deploy
try:
# Delete the branch in the local git tree, we'll create it again if the
# command is run again. This way, we keep things a little more clean in
# the local git tree
git.branch('-D', branch)
except GitError:
# It's OK if the branch doesn't get deleted, so long as we switch back to
# deploy, which we do in the finally block...
pass
# Error capture/checking for any "invalid" GH reply without an 'html_url' item,
# which will throw a KeyError.
if "bad credentials" in str(response['message']).lower():
# Capture the case when GH credentials are bad or invalid
return (False, "Something is wrong with the GH credentials, tell someone to check them.")
else:
# Capture any other invalid response cases.
return (False, "A bad or invalid reply was received from GH, the message was: %s" %
response['message'])
except Exception as err:
log_exception(*sys.exc_info())
return (False, "Git functions failed for unspecified reasons, details may be in error log.")
finally:
# Always return to `deploy` branch when done with anything.
git.checkout("deploy")
cls.gitmanager_lock.release()
if op == 'blacklist':
return (True, "Blacklisted `{0}`".format(item))
elif op == 'watch':
return (True, "Added `{0}` to watchlist".format(item))
@classmethod
def remove_from_blacklist(cls, item, username, blacklist_type="", code_privileged=False, metasmoke_down=False):
if not code_privileged:
if metasmoke_down:
return False, "MS is offline, and I can't determine if you are a blacklist manager or not. " \
"If you are a blacklist manager, then wait for MS to be back up before running " \
"this command."
else:
return False, "Ask a blacklist manager to run that for you. Use `!!/whois blacklister` to find " \
"out who's here."
try:
cls.gitmanager_lock.acquire()
git.checkout("master")
if blacklist_type == "watch":
blacklists = [Blacklist.WATCHED_KEYWORDS, Blacklist.WATCHED_NUMBERS]
list_type = "watchlist"
elif blacklist_type == "blacklist":
blacklists = [Blacklist.KEYWORDS, Blacklist.WEBSITES, Blacklist.USERNAMES, Blacklist.NUMBERS]
list_type = "blacklist"
else:
return False, "`blacklist_type` not set, blame a developer."
for blacklist in blacklists:
file_name = blacklist[0]
manager = Blacklist(blacklist)
exists, _line = manager.exists(item)
if exists:
break
if not exists:
return False, 'No such item `{}` in {}.'.format(item, list_type)
status, message = cls.prepare_git_for_operation(file_name)
if not status:
return False, message
branch = 'auto-un{}-{}'.format(blacklist_type, time.time())
git.checkout('-b', branch)
git.reset('HEAD')
manager.remove(item)
git.add(file_name)
git("-c", "user.name=" + GlobalVars.git_name,
"-c", "user.email=" + GlobalVars.git_email,
"commit",
"--author={} <{}>".format(GlobalVars.git_name, GlobalVars.git_email),
'-m', 'Auto un{} of `{}` by {}'.format(blacklist_type, item, username))
git.checkout('master')
git.merge(branch)
origin_or_auth = cls.get_origin_or_auth()
git.push(origin_or_auth, 'master')
try:
git.branch('-D', branch)
except GitError:
# It's OK if the branch doesn't get deleted, so long as we switch back to
# deploy, which we do in the finally block...
pass
except Exception as e:
log('error', '{}: {}'.format(type(e).__name__, e))
log_exception(*sys.exc_info())
return False, 'Git operations failed for unspecified reasons.'
finally:
git.checkout('deploy')
cls.gitmanager_lock.release()
# With no exception raised, list_type should be set
return True, 'Removed `{}` from {}'.format(item, list_type)
@classmethod
def merge_pull_request(cls, pr_id, comment=""):
response = requests.get("https://api.github.com/repos/{}/pulls/{}".format(GlobalVars.bot_repo_slug, pr_id))
if not response:
raise ConnectionError("Cannot connect to GitHub API")
pr_info = response.json()
if pr_info["user"]["login"] != "SmokeDetector":
raise ValueError("PR #{} is not created by me, so I can't approve it.".format(pr_id))
if "<!-- METASMOKE-BLACKLIST" not in pr_info["body"]:
raise ValueError("PR description is malformed. Blame a developer.")
if pr_info["state"] != "open":
raise ValueError("PR #{} is not currently open, so I won't merge it.".format(pr_id))
ref = pr_info['head']['ref']
if comment: # yay we have comments now
GitHubManager.comment_on_thread(pr_id, comment)
try:
# Remote checks passed, good to go here
cls.gitmanager_lock.acquire()
git.checkout('master')
origin_or_auth = cls.get_origin_or_auth()
git.fetch(origin_or_auth, '+refs/pull/{}/head'.format(pr_id))
git("-c", "user.name=" + GlobalVars.git_name,
"-c", "user.email=" + GlobalVars.git_email,
"merge",
'FETCH_HEAD', '--no-ff', '-m', 'Merge pull request #{} from {}/{}'.format(
pr_id, GlobalVars.bot_repo_slug.split("/")[0], ref))
git.push(origin_or_auth, 'master')
try:
git.push('-d', origin_or_auth, ref)
except GitError as e:
# TODO: PR merged, but branch deletion has something wrong, generate some text
pass
return "Merged pull request [#{0}](https://github.com/{1}/pull/{0}).".format(
pr_id, GlobalVars.bot_repo_slug)
finally:
git.checkout('deploy')
cls.gitmanager_lock.release()
@staticmethod
def prepare_git_for_operation(blacklist_file_name):
try:
git.checkout('master')
git.remote.update()
git.reset('--hard', 'origin/master')
except GitError as e:
if GlobalVars.on_windows:
return False, "Not doing this, we're on Windows."
log_exception(*sys.exc_info())
return False, "`git pull` has failed. This shouldn't happen. Details have been logged."
if GlobalVars.on_windows:
remote_ref = git.rev_parse("refs/remotes/origin/master").strip()
local_ref = git.rev_parse("master").strip()
else:
remote_ref = git("rev-parse", "refs/remotes/origin/master").strip()
local_ref = git("rev-parse", "master").strip()
if local_ref != remote_ref:
local_log = git.log(r"--pretty=`[%h]` *%cn*: %s", "-1", str(local_ref)).strip()
remote_log = git.log(r"--pretty=`[%h]` *%cn*: %s", "-1", str(remote_ref)).strip()
return False, "HEAD isn't at tip of origin's master branch (local {}, remote {})".format(
local_log, remote_log)
return True, None
@staticmethod
def current_git_status():
if GlobalVars.on_windows:
return git.status_stripped()
else:
return str(git.status())
@staticmethod
def current_branch():
return str(git('rev-parse', '--abbrev-ref', 'HEAD')).strip()
@staticmethod
def merge_abort():
git.merge("--abort")
@staticmethod
def reset_head():
git.reset("--hard", "HEAD")
git.clean("-f")
@staticmethod
def get_remote_diff():
git.fetch()
if GlobalVars.on_windows:
return git.diff_filenames("HEAD", "origin/deploy")
else:
return git.diff("--name-only", "HEAD", "origin/deploy")
@staticmethod
def get_local_diff():
if GlobalVars.on_windows:
return git.diff_filenames("HEAD", "master")
else:
return git.diff("--name-only", "HEAD", "master")
@staticmethod
def pull_remote():
git.pull()
@classmethod
def pull_local(cls):
diff = GitManager.get_local_diff()
if not only_blacklists_changed(diff):
return
try:
git.merge("--ff-only", "master")
origin_or_auth = cls.get_origin_or_auth()
git.push(origin_or_auth, "deploy")
except GitError:
return
@staticmethod
def sync_remote():
try:
git.fetch('--force')
git.checkout('master', '--force')
git.branch('--create-reflog', '-f', 'deploy', '-t', 'origin/deploy')
git.checkout('deploy', '--force')
git.branch('--create-reflog', '-f', 'master', '-t', 'origin/master')
return True, "Synced to origin/master and origin/deploy. You'll probably want to !!/reboot now."
except Exception as e:
return False, str(e)
@staticmethod
def sync_remote_hard():
try:
git.fetch('--force')
git.checkout('master', '--force')
git.reset('origin/master', '--hard')
git.checkout('deploy', '--force')
git.reset('origin/deploy', '--hard')
git.checkout('master', '--force')
git.checkout('deploy', '--force')
return True, "Synced hard to origin/master and origin/deploy."
except Exception as e:
return False, str(e)
| 42.345538
| 115
| 0.548879
|
6bef30d67ebacd187a126d8d3bb17a4b4e027472
| 3,256
|
py
|
Python
|
fastapi/maskrcnn.py
|
mkang30/ImageSegmentation
|
4180c0a3c4e68f242820887b56c10500670b00a9
|
[
"MIT"
] | 5
|
2020-07-21T06:14:23.000Z
|
2022-03-21T06:29:32.000Z
|
fastapi/maskrcnn.py
|
mkang30/ImageSegmentation
|
4180c0a3c4e68f242820887b56c10500670b00a9
|
[
"MIT"
] | 4
|
2021-06-08T22:01:54.000Z
|
2022-03-12T00:40:35.000Z
|
fastapi/maskrcnn.py
|
mkang30/ImageSegmentation
|
4180c0a3c4e68f242820887b56c10500670b00a9
|
[
"MIT"
] | 1
|
2021-06-04T03:36:30.000Z
|
2021-06-04T03:36:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 4 13:31:36 2020
@author: minseongkang
"""
import numpy as np
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from PIL import Image
import urllib.request
"""
This class represents the Instance Segmentation model Mask R-CNN
"""
class Segmentor:
def __init__(self):
#create a predictor
self._cfg = get_cfg()
self._predictor = self._makePredictor()
self._class = MetadataCatalog.get(self._cfg.DATASETS.TRAIN[0]).get("thing_classes")
"""
This method initalizes the model and configuration
to return the predictor
"""
def _makePredictor(self):
self._cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
self._cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
self._cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
return DefaultPredictor(self._cfg)
"""
This method takes an opencv image and perfroms instance segmentation
"""
def predict(self, image):
return self._predictor(image)
"""
This method takes an output of the model and returns the segmentation
map of the image
"""
def segMap(self, image,output):
v = Visualizer(image[:, :, ::-1], MetadataCatalog.get(self._cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(output["instances"].to("cpu"))
r = Image.fromarray(v.get_image()[:,:,::-1])
return r
"""
This method takes an output of the model and returns an array of images
of key objects in the input image.
"""
def decompose(self, image, output):
r = dict()
count = dict()
for i in range(len(output["instances"].pred_boxes)):
box = output["instances"].pred_boxes[i].tensor.numpy()[0]
dim = (box[0],box[1],box[2],box[3])
mask = output["instances"].pred_masks[i]
image2 = image[:,:,::-1].copy()
for j in range(len(image)):
for k in range(len(image[j])):
if mask[j][k]==False:
for l in range(len(image2[j][k])):
image2[j][k][l]=255
pic = Image.fromarray(image2)
pic = pic.crop(dim)
pic = self._transparent(pic)
cl = self._class[output["instances"].pred_classes[i]]
if cl in count:
count[cl]+=1
else:
count[cl]=1
r[cl+str(count[cl])]=pic
return r
"""
Input: PIL image
Output: PIL image with transparent background
"""
def _transparent(self,image):
r = image.convert("RGBA")
pixels = r.getdata()
newPixels = []
for i in pixels:
if i[0]==255 and i[1]==255 and i[2]==255:
newPixels.append((255,255,255,0))
else:
newPixels.append(i)
r.putdata(newPixels)
return r
| 31.61165
| 118
| 0.596437
|
05f1eaeda4caf29ffecb9990176a0d1ef0e91b31
| 7,742
|
py
|
Python
|
trax/layers/metrics_test.py
|
GeorgeDittmar/trax
|
a0483a12cb7ebece40b5e302e8e81fd9249c6ef6
|
[
"Apache-2.0"
] | 1
|
2020-11-20T22:29:41.000Z
|
2020-11-20T22:29:41.000Z
|
trax/layers/metrics_test.py
|
anilkeshwani/trax
|
327ca0999bcbc1794c263db6b09c3a202e82a065
|
[
"Apache-2.0"
] | null | null | null |
trax/layers/metrics_test.py
|
anilkeshwani/trax
|
327ca0999bcbc1794c263db6b09c3a202e82a065
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for metrics layers."""
from absl.testing import absltest
import numpy as np
import trax.layers as tl
class MetricsTest(absltest.TestCase):
def test_accuracy_even_weights(self):
layer = tl.Accuracy()
weights = np.array([1., 1., 1.])
targets = np.array([0, 1, 2])
model_outputs = np.array([[.7, .2, .1, 0.],
[.2, .7, .1, 0.],
[.2, .1, .7, 0.]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1.0)
model_outputs = np.array([[.2, .1, .7, 0.],
[.2, .1, .7, 0.],
[.2, .1, .7, 0.]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1 / 3)
def test_accuracy_uneven_weights(self):
layer = tl.Accuracy()
weights = np.array([1., 5., 2.])
targets = np.array([0, 1, 2])
model_outputs = np.array([[.7, .2, .1, 0.],
[.2, .7, .1, 0.],
[.2, .1, .7, 0.]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1.0)
model_outputs = np.array([[.2, .7, .1, 0.],
[.2, .7, .1, 0.],
[.2, .7, .1, 0.]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, .625)
model_outputs = np.array([[.7, .2, .1, 0.],
[.7, .2, .1, 0.],
[.7, .2, .1, 0.]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, .125)
def test_accuracy_binary_classifier(self):
layer = tl.Accuracy(classifier=tl.ThresholdToBinary())
targets = np.array([[0, 0, 1, 1],
[1, 1, 1, 0]])
weights = np.ones_like(targets)
model_outputs = np.array([[.499, .500, .501, .502],
[.503, .502, .501, .500]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1.0)
model_outputs = np.array([[.498, .499, .500, .501],
[.502, .501, .500, .499]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, .75)
def test_sequence_accuracy_weights_all_ones(self):
layer = tl.SequenceAccuracy()
targets = np.array([[0, 1, 0, 1],
[1, 0, 1, 1]])
weights = np.ones_like(targets)
# Model gets both sequences right; for each position in each sequence, the
# category (integer ID) selected by argmax matches the target category.
model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.4, .6]],
[[.3, .7], [.8, .2], [.1, .9], [.4, .6]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1.)
# Model gets the first element of the first sequence barely wrong.
model_outputs = np.array([[[.45, .55], [.2, .8], [.7, .3], [.4, .6]],
[[.3, .7], [.8, .2], [.1, .9], [.4, .6]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, .5)
# Model gets the last element of each sequence barely wrong.
model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.55, .45]],
[[.3, .7], [.8, .2], [.1, .9], [.52, .48]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 0.)
def test_sequence_accuracy_last_position_zero_weight(self):
layer = tl.SequenceAccuracy()
targets = np.array([[0, 1, 0, 0],
[1, 0, 1, 0]])
weights = np.array([[1., 1., 1., 0.],
[1., 1., 1., 0.]])
# Model gets both sequences right; output in final position would give
# wrong category but is ignored.
model_outputs = np.array([[[.9, .1], [.2, .8], [.7, .3], [.35, .65]],
[[.3, .7], [.8, .2], [.1, .9], [.35, .65]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 1.)
# Model gets the first element of the first sequence barely wrong.
model_outputs = np.array([[[.45, .55], [.2, .8], [.7, .3], [.6, .4]],
[[.3, .7], [.8, .2], [.1, .9], [.6, .4]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, .5)
# Model gets second-to-last element of each sequence barely wrong.
model_outputs = np.array([[[.9, .1], [.2, .8], [.48, .52], [.6, .4]],
[[.3, .7], [.8, .2], [.51, .49], [.6, .4]]])
accuracy = layer([model_outputs, targets, weights])
self.assertEqual(accuracy, 0.)
def test_binary_cross_entropy_loss(self):
# TODO(jonni): Clarify desired semantics/naming, then test it.
layer = tl.BinaryCrossEntropyLoss()
xs = [np.ones((9, 1)),
np.ones((9, 1)),
np.ones((9, 1))]
y = layer(xs)
self.assertEqual(y.shape, ())
def test_cross_entropy_loss(self):
# TODO(jonni): Clarify desired semantics/naming, then test it.
layer = tl.CrossEntropyLoss()
xs = [np.ones((9, 4, 4, 20)),
np.ones((9, 4, 4)),
np.ones((9, 4, 4))]
y = layer(xs)
self.assertEqual(y.shape, ())
def test_l2_loss(self):
layer = tl.L2Loss()
model_outputs = np.array([[1., 1.], [1., 1.]])
targets = np.array([[1., 1.], [1., 0.]])
weights = np.array([[1., 1.], [1., 0.]])
loss = layer([model_outputs, targets, weights])
np.testing.assert_allclose(loss, 0.0)
weights = np.array([[1., 0.], [0., 1.]])
loss = layer([model_outputs, targets, weights])
np.testing.assert_allclose(loss, 0.5)
def test_smooth_l1_loss(self):
layer = tl.SmoothL1Loss()
model_outputs = np.array([[1., 1.], [1., 2.]])
targets = np.array([[1., 1.], [1., 0.]])
l1_dist = 2
weights = np.array([[1., 1.], [1., 0.]])
loss = layer([model_outputs, targets, weights])
np.testing.assert_allclose(loss, 0.0)
weights = np.array([[1., 0.], [0., 1.]])
sum_weights = 2
loss = layer([model_outputs, targets, weights])
np.testing.assert_allclose(loss, (l1_dist-0.5) / sum_weights)
model_outputs = np.array([[1., 1.], [1., 1.5]])
targets = np.array([[1., 1.], [1., 1.]])
l1_dist = 0.5
loss = layer([model_outputs, targets, weights])
np.testing.assert_allclose(loss, 0.5 * l1_dist**2 / sum_weights)
def test_names(self):
layer = tl.L2Loss()
self.assertEqual('L2Loss_in3', str(layer))
layer = tl.Accuracy()
self.assertEqual('Accuracy_in3', str(layer))
layer = tl.SequenceAccuracy()
self.assertEqual('SequenceAccuracy_in3', str(layer))
layer = tl.BinaryCrossEntropyLoss()
self.assertEqual('BinaryCrossEntropyLoss_in3', str(layer))
layer = tl.CrossEntropyLoss()
self.assertEqual('CrossEntropyLoss_in3', str(layer))
layer = tl.BinaryCrossEntropySum()
self.assertEqual('BinaryCrossEntropySum_in3', str(layer))
layer = tl.CrossEntropySum()
self.assertEqual('CrossEntropySum_in3', str(layer))
if __name__ == '__main__':
absltest.main()
| 37.400966
| 78
| 0.560191
|
2178251f19e06c7f3edbe8190e006900c282c6a7
| 416
|
py
|
Python
|
example/cluster_sample.py
|
sabi2345/ooda_flow_diagram
|
f6ccc7ea5987d30910479240379bd8976d3202fc
|
[
"MIT"
] | null | null | null |
example/cluster_sample.py
|
sabi2345/ooda_flow_diagram
|
f6ccc7ea5987d30910479240379bd8976d3202fc
|
[
"MIT"
] | null | null | null |
example/cluster_sample.py
|
sabi2345/ooda_flow_diagram
|
f6ccc7ea5987d30910479240379bd8976d3202fc
|
[
"MIT"
] | null | null | null |
from graphviz import Digraph
g = Digraph('G', filename='cluster_edge.gv')
g.attr(compound='true')
with g.subgraph(name='cluster0') as c:
c.edges(['ab', 'ac', 'bd', 'cd'])
with g.subgraph(name='cluster1') as c:
c.edges(['eg', 'ef'])
g.edge('b', 'f', lhead='cluster1')
g.edge('d', 'e')
g.edge('c', 'g', ltail='cluster0', lhead='cluster1')
g.edge('c', 'e', ltail='cluster0')
g.edge('d', 'h')
print(g.source)
| 24.470588
| 52
| 0.605769
|
72af09744564f0fcc83d1f43a8bd1a04d8a8fa38
| 7,603
|
py
|
Python
|
demo/quant/quant_embedding/train.py
|
jiansowa/PaddleSlim
|
a45431c99a775782b7fe5633f313d36ff582e797
|
[
"Apache-2.0"
] | null | null | null |
demo/quant/quant_embedding/train.py
|
jiansowa/PaddleSlim
|
a45431c99a775782b7fe5633f313d36ff582e797
|
[
"Apache-2.0"
] | 1
|
2020-07-14T09:50:51.000Z
|
2020-07-14T09:50:51.000Z
|
demo/quant/quant_embedding/train.py
|
jiansowa/PaddleSlim
|
a45431c99a775782b7fe5633f313d36ff582e797
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import argparse
import logging
import os
import time
import math
import random
import numpy as np
import paddle
import paddle.fluid as fluid
import six
import reader
from net import skip_gram_word2vec
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(
description="PaddlePaddle Word2vec example")
parser.add_argument(
'--train_data_dir',
type=str,
default='./data/text',
help="The path of taining dataset")
parser.add_argument(
'--base_lr',
type=float,
default=0.01,
help="The number of learing rate (default: 0.01)")
parser.add_argument(
'--save_step',
type=int,
default=500000,
help="The number of step to save (default: 500000)")
parser.add_argument(
'--print_batch',
type=int,
default=10,
help="The number of print_batch (default: 10)")
parser.add_argument(
'--dict_path',
type=str,
default='./data/1-billion_dict',
help="The path of data dict")
parser.add_argument(
'--batch_size',
type=int,
default=500,
help="The size of mini-batch (default:500)")
parser.add_argument(
'--num_passes',
type=int,
default=10,
help="The number of passes to train (default: 10)")
parser.add_argument(
'--model_output_dir',
type=str,
default='models',
help='The path for model to store (default: models)')
parser.add_argument('--nce_num', type=int, default=5, help='nce_num')
parser.add_argument(
'--embedding_size',
type=int,
default=64,
help='sparse feature hashing space for index processing')
parser.add_argument(
'--is_sparse',
action='store_true',
required=False,
default=False,
help='embedding and nce will use sparse or not, (default: False)')
parser.add_argument(
'--with_speed',
action='store_true',
required=False,
default=False,
help='print speed or not , (default: False)')
return parser.parse_args()
def convert_python_to_tensor(weight, batch_size, sample_reader):
def __reader__():
cs = np.array(weight).cumsum()
result = [[], []]
for sample in sample_reader():
for i, fea in enumerate(sample):
result[i].append(fea)
if len(result[0]) == batch_size:
tensor_result = []
for tensor in result:
t = fluid.Tensor()
dat = np.array(tensor, dtype='int64')
if len(dat.shape) > 2:
dat = dat.reshape((dat.shape[0], dat.shape[2]))
elif len(dat.shape) == 1:
dat = dat.reshape((-1, 1))
t.set(dat, fluid.CPUPlace())
tensor_result.append(t)
tt = fluid.Tensor()
neg_array = cs.searchsorted(np.random.sample(args.nce_num))
neg_array = np.tile(neg_array, batch_size)
tt.set(
neg_array.reshape((batch_size, args.nce_num)),
fluid.CPUPlace())
tensor_result.append(tt)
yield tensor_result
result = [[], []]
return __reader__
def train_loop(args, train_program, reader, py_reader, loss, trainer_id,
weight):
py_reader.decorate_tensor_provider(
convert_python_to_tensor(weight, args.batch_size, reader.train()))
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.use_experimental_executor = True
print("CPU_NUM:" + str(os.getenv("CPU_NUM")))
exec_strategy.num_threads = int(os.getenv("CPU_NUM"))
build_strategy = fluid.BuildStrategy()
if int(os.getenv("CPU_NUM")) > 1:
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
train_exe = fluid.ParallelExecutor(
use_cuda=False,
loss_name=loss.name,
main_program=train_program,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
for pass_id in range(args.num_passes):
py_reader.start()
time.sleep(10)
epoch_start = time.time()
batch_id = 0
start = time.time()
try:
while True:
loss_val = train_exe.run(fetch_list=[loss.name])
loss_val = np.mean(loss_val)
if batch_id % args.print_batch == 0:
logger.info(
"TRAIN --> pass: {} batch: {} loss: {} reader queue:{}".
format(pass_id, batch_id,
loss_val.mean(), py_reader.queue.size()))
if args.with_speed:
if batch_id % 500 == 0 and batch_id != 0:
elapsed = (time.time() - start)
start = time.time()
samples = 1001 * args.batch_size * int(
os.getenv("CPU_NUM"))
logger.info("Time used: {}, Samples/Sec: {}".format(
elapsed, samples / elapsed))
if batch_id % args.save_step == 0 and batch_id != 0:
model_dir = args.model_output_dir + '/pass-' + str(
pass_id) + ('/batch-' + str(batch_id))
if trainer_id == 0:
fluid.io.save_params(executor=exe, dirname=model_dir)
print("model saved in %s" % model_dir)
batch_id += 1
except fluid.core.EOFException:
py_reader.reset()
epoch_end = time.time()
logger.info("Epoch: {0}, Train total expend: {1} ".format(
pass_id, epoch_end - epoch_start))
model_dir = args.model_output_dir + '/pass-' + str(pass_id)
if trainer_id == 0:
fluid.io.save_params(executor=exe, dirname=model_dir)
print("model saved in %s" % model_dir)
def GetFileList(data_path):
return os.listdir(data_path)
def train(args):
if not os.path.isdir(args.model_output_dir):
os.mkdir(args.model_output_dir)
filelist = GetFileList(args.train_data_dir)
word2vec_reader = reader.Word2VecReader(
args.dict_path, args.train_data_dir, filelist, 0, 1)
logger.info("dict_size: {}".format(word2vec_reader.dict_size))
np_power = np.power(np.array(word2vec_reader.id_frequencys), 0.75)
id_frequencys_pow = np_power / np_power.sum()
loss, py_reader = skip_gram_word2vec(
word2vec_reader.dict_size,
args.embedding_size,
is_sparse=args.is_sparse,
neg_num=args.nce_num)
optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.base_lr,
decay_steps=100000,
decay_rate=0.999,
staircase=True))
optimizer.minimize(loss)
# do local training
logger.info("run local training")
main_program = fluid.default_main_program()
train_loop(args, main_program, word2vec_reader, py_reader, loss, 0,
id_frequencys_pow)
if __name__ == '__main__':
args = parse_args()
train(args)
| 33.200873
| 82
| 0.574642
|
f16297d2b104e2e503e7ee5211f59295382f58f1
| 417
|
py
|
Python
|
venv/Scripts/pip3-script.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3-script.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip3-script.py
|
shehzadulislam/Assignment4
|
a9cced70be6ae5d2685027d68032d5849f638301
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\shehz\PycharmProjects\Assignment4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| 32.076923
| 69
| 0.673861
|
b9108ffa2e39756350c8070d0af9f4af7e62f33c
| 11,087
|
py
|
Python
|
teslamate_car_data.py
|
krezac/tesla-data-source
|
e78f7ecf37c368efff85b721aa1e4dc2940d1985
|
[
"MIT"
] | null | null | null |
teslamate_car_data.py
|
krezac/tesla-data-source
|
e78f7ecf37c368efff85b721aa1e4dc2940d1985
|
[
"MIT"
] | 1
|
2021-01-01T13:57:51.000Z
|
2021-01-01T13:57:51.000Z
|
teslamate_car_data.py
|
krezac/tesla-data-source
|
e78f7ecf37c368efff85b721aa1e4dc2940d1985
|
[
"MIT"
] | 1
|
2021-01-01T16:12:57.000Z
|
2021-01-01T16:12:57.000Z
|
from typing import Callable, List, Dict, Optional
import pendulum
import logging
from teslamate_data_source import TeslamateDataSource
from ds_types import Configuration, LapStatus, CarStatus, DriverChange, LapsList, JsonLapsResponse, JsonStatusResponse, ForecastResult
from labels import generate_labels
import lap_analyzer
from lap_forecast import do_forecast
from datetime import datetime, timezone
from gpxplotter.gpxread import vincenty
logger = logging.getLogger('app.car_data')
_get_configuration_func: Callable[[], Configuration] = None
_data_source = TeslamateDataSource()
_car_status: CarStatus = None
_car_status_formatted: JsonStatusResponse = None
_initial_status: CarStatus = None
_car_laps_list: List[LapStatus] = None
_car_laps_formatted: JsonLapsResponse = None
_car_charging_processes = None
_forecast_result: ForecastResult = None
def _add_calculated_fields(status: CarStatus, initial_status: CarStatus, configuration: Configuration):
start_time = pendulum.from_timestamp(configuration.startTime.timestamp(), tz='utc')
now = pendulum.now(tz='utc')
end_time = start_time.add(hours=configuration.hours)
status.start_time = start_time
status.end_time = end_time
status.start_odometer = initial_status.odometer
status.distance = status.odometer - initial_status.odometer if status.odometer and initial_status.odometer else 0
status.time_since_start = pendulum.period(start_time, now, True) if now >= start_time else pendulum.period(now, now, True)
status.time_to_end = pendulum.period(now, end_time, True) if now <= end_time else pendulum.period(now, now, True)
start = (configuration.startLatitude, configuration.startLongitude)
loc = (status.latitude, status.longitude)
status.direct_start_distance = vincenty(loc, start) / 1000.0 # from m to km
if _car_laps_list:
current_lap = _car_laps_list[-1]
status.lap = current_lap.id
status.lap_distance = current_lap.distance
status.lap_time = current_lap.duration
def _update_car_status():
global _initial_status
global _car_status
global _car_status_formatted
global _data_source
global _get_configuration_func
global _forecast_result
if not _initial_status:
logger.debug("updating initial car status")
_initial_status = _data_source.get_car_status(_get_configuration_func(), _get_configuration_func().startTime)
logger.debug("updating initial car status done")
logger.debug("updating car status")
_car_status = _data_source.get_car_status(_get_configuration_func(), pendulum.parse('2021-01-03 02:26:39', tz='utc'))
if _car_status and _initial_status:
logger.debug("updating calculated fields")
_add_calculated_fields(_car_status, _initial_status, _get_configuration_func())
_update_forecast_result()
# build the formatted form
_car_status_formatted = JsonStatusResponse(
lat=_car_status.latitude,
lon=_car_status.longitude,
mapLabels=generate_labels(_get_configuration_func().mapLabels, _car_status.dict()),
textLabels=generate_labels(_get_configuration_func().textLabels, _car_status.dict()),
forecastLabels=generate_labels(_get_configuration_func().forecastLabels, _forecast_result.dict() if _forecast_result else {})
)
logger.debug("updating car status done")
def _update_car_laps():
global _car_laps_list
global _car_laps_formatted
global _get_configuration_func
global _car_charging_processes
logger.debug("updating car laps")
configuration = _get_configuration_func()
positions = _data_source.get_car_positions(configuration)
_car_laps_list = lap_analyzer.find_laps(configuration, positions, configuration.startRadius, 0, 0)
_car_charging_processes = _data_source.get_charging_processes(configuration)
# load driver names
dates = [l.startTime for l in _car_laps_list]
if dates:
driver_map = _data_source.get_driver_changes(configuration, dates)
for l in _car_laps_list:
if l.startTime in driver_map:
l.driver_name = driver_map[l.startTime].name
# fill charging data
first = True
for l in _car_laps_list:
if first: # ignore any charging for first lap
first = False
continue
for charging in _car_charging_processes:
if not charging.end_date:
continue # incomplete records - they are visible in the db from time to time
charging.start_date = charging.start_date.replace(tzinfo=timezone.utc)
charging.end_date = charging.end_date.replace(tzinfo=timezone.utc)
if l.startTimePit <= charging.start_date and (not l.endTimePit or l.endTimePit > charging.start_date ):
# set the value
l.chargeStartTime = pendulum.from_timestamp(charging.start_date.timestamp())
l.chargeEndTime = pendulum.from_timestamp(charging.end_date.timestamp())
l.chargeEnergyAdded = charging.charge_energy_added
l.chargeStartSoc = charging.start_battery_level
l.chargeEndSoc = charging.end_battery_level
l.chargeStartRangeRated = charging.start_rated_range_km
l.chargeEndRangeRated = charging.end_rated_range_km
l.chargeRangeRatedAdded = charging.end_rated_range_km - charging.start_rated_range_km # the validator doesn't fill it for some reason
l.chargeSocAdded = charging.end_battery_level - charging.start_battery_level # the validator doesn't fill it for some reason
l.chargeDuration = pendulum.Period(charging.start_date, charging.end_date) if charging.start_date and charging.end_date else None
l.chargeMaxPower = charging.max_power
l.chargeEnergyPerHour = charging.charge_energy_added * 3600.0 / l.chargeDuration.in_seconds()
break # load only one charging
total_lap = _calculate_lap_total(_car_laps_list) if _car_laps_list else None
recent_lap = _car_laps_list[-1] if _car_laps_list else None
prev_lap_list = _car_laps_list[-configuration.previousLaps - 1:-1] if len(_car_laps_list) > 0 else []
prev_lap_list.reverse() # to have newest on top
total_formatted = generate_labels(_get_configuration_func().lapLabelsTotal, total_lap.dict() if total_lap else {})
previous_formatted = [generate_labels(_get_configuration_func().lapLabelsPrevious, lap.dict()) for lap in prev_lap_list]
recent_formatted = generate_labels(_get_configuration_func().lapLabelsRecent, recent_lap.dict() if recent_lap else {})
_car_laps_formatted = JsonLapsResponse(
total=total_formatted,
previous=previous_formatted,
recent=recent_formatted
)
logger.debug("updating car laps done")
def _calculate_lap_total(laps: List[LapStatus]) -> LapStatus:
start_lap = laps[0]
end_lap = laps[-1]
total_status = LapStatus(
id=f"{start_lap.id} - {end_lap.id}",
startTimePit=start_lap.startTimePit,
endTimePit=start_lap.endTimePit,
startTime=start_lap.startTime,
endTime=end_lap.endTime,
startOdo=start_lap.startOdo,
endOdo=end_lap.endOdo,
insideTemp=end_lap.insideTemp,
outsideTemp=end_lap.outsideTemp,
startSOC=start_lap.startSOC,
endSOC=end_lap.endSOC,
startRangeIdeal=start_lap.startRangeIdeal,
endRangeIdeal=end_lap.endRangeIdeal,
startRangeEst=start_lap.startRangeEst,
endRangeEst=end_lap.endRangeEst,
startRangeRated=start_lap.startRangeRated,
endRangeRated=end_lap.endRangeRated,
consumptionRated=start_lap.consumptionRated,
finished=False
)
# TODO calculate energy as sum
energy = 0
chargeSocAdded = 0
chargeEnergyAdded = 0
chargeRangeAdded = 0
now = pendulum.now(tz='utc')
duration = pendulum.Period(now, now)
pit_duration = pendulum.Period(now, now)
chargeDuration = pendulum.Period(now, now)
for lap in laps:
energy += lap.energy if lap.energy else 0
duration += lap.duration if lap.duration else pendulum.Period(now, now)
pit_duration += lap.pitDuration if pit_duration else pendulum.Period(now, now)
chargeSocAdded += lap.chargeSocAdded if lap.chargeSocAdded else 0
chargeEnergyAdded += lap.chargeEnergyAdded if lap.chargeEnergyAdded else 0
chargeRangeAdded += lap.chargeRangeRatedAdded if lap.chargeRangeRatedAdded else 0
chargeDuration += lap.chargeDuration if lap.chargeDuration else pendulum.Period(now, now)
total_status.energy = energy
total_status.duration = duration
total_status.pitDuration = pit_duration
total_status.chargeSocAdded = chargeSocAdded
total_status.chargeEnergyAdded = chargeEnergyAdded
total_status.chargeRangeRatedAdded = chargeEnergyAdded
total_status.chargeDuration = chargeDuration
return total_status
def _update_forecast_result():
global _car_laps_list
global _car_status
global _get_configuration_func
global _forecast_result
logger.info("updating forecast")
if not _car_status and _car_laps_list:
logger.info("no data, no forecast")
return
print(_get_configuration_func())
print(_car_laps_list)
print(_car_status)
_forecast_result = do_forecast(_get_configuration_func(), _car_laps_list, _car_status, pendulum.now(tz='utc'))
logger.info("updating done")
def get_car_status() -> CarStatus:
global _car_status
if not _car_status:
_update_car_status()
return _car_status
def get_car_status_formatted() -> CarStatus:
global _car_status_formatted
if not _car_status_formatted:
_update_car_status()
return _car_status_formatted
def get_car_laps_list() -> LapsList:
global _car_laps_list
if not _car_laps_list:
_update_car_laps()
return _car_laps_list
def get_car_laps_formatted() -> JsonLapsResponse:
global _car_laps_formatted
if not _car_laps_formatted:
_update_car_laps()
return _car_laps_formatted
def get_forecast_result() -> Optional[ForecastResult]:
global _forecast_result
if not _forecast_result:
_update_forecast_result()
return _forecast_result
def apply_driver_change(driver_change: DriverChange):
global _get_configuration_func
return _data_source.apply_driver_change(_get_configuration_func(), driver_change)
def get_driver_changes(self, dates: List[datetime]) -> Dict[datetime, DriverChange]:
global _get_configuration_func
return _data_source.get_driver_changes(_get_configuration_func(), dates)
def register_jobs(scheduler, get_configuration_func: Callable[[], Configuration]):
global _get_configuration_func
_get_configuration_func = get_configuration_func
scheduler.add_job(_update_car_status, 'interval', seconds=get_configuration_func().statusRefreshSeconds)
scheduler.add_job(_update_car_laps, 'interval', seconds=get_configuration_func().lapsRefreshSeconds)
| 40.761029
| 150
| 0.741319
|
d306847891fcffe133058b50a0f1b852209e0f44
| 182
|
py
|
Python
|
empoweru/wagtail_hooks.py
|
harsh1930/fitgirl-inc
|
a10ff5c49ba9ae9c0859ee721f8763fe1da592c2
|
[
"MIT"
] | 6
|
2018-09-11T15:30:10.000Z
|
2020-01-14T17:29:07.000Z
|
empoweru/wagtail_hooks.py
|
harsh1930/fitgirl-inc
|
a10ff5c49ba9ae9c0859ee721f8763fe1da592c2
|
[
"MIT"
] | 722
|
2018-08-29T17:27:38.000Z
|
2022-03-11T23:28:33.000Z
|
empoweru/wagtail_hooks.py
|
harsh1930/fitgirl-inc
|
a10ff5c49ba9ae9c0859ee721f8763fe1da592c2
|
[
"MIT"
] | 13
|
2018-08-29T07:42:01.000Z
|
2019-04-21T22:34:30.000Z
|
from wagtail.core import hooks
@hooks.register('construct_main_menu')
def hide_explorer_menu_item_from_frank(request, menu_items):
menu_items[:] = [item for item in menu_items ]
| 36.4
| 60
| 0.796703
|
5d64735092f050957f7570058609a67eb2b2236d
| 2,798
|
py
|
Python
|
examples/basic_operations/remove_ad.py
|
Insutanto/google-ads-python
|
f63e318ca39f2ecc6546fba69994456815727578
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/remove_ad.py
|
Insutanto/google-ads-python
|
f63e318ca39f2ecc6546fba69994456815727578
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/remove_ad.py
|
Insutanto/google-ads-python
|
f63e318ca39f2ecc6546fba69994456815727578
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example removes an existing ad."""
import argparse
import sys
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.util import ResourceName
def main(client, customer_id, ad_group_id, ad_id):
ad_group_ad_service = client.get_service('AdGroupAdService', version='v4')
ad_group_ad_operation = client.get_type('AdGroupAdOperation', version='v4')
resource_name = ad_group_ad_service.ad_group_ad_path(
customer_id, ResourceName.format_composite(ad_group_id, ad_id))
ad_group_ad_operation.remove = resource_name
try:
ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(
customer_id, [ad_group_ad_operation])
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Removed ad group ad %s.'
% ad_group_ad_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=('Removes an ad from the specified customer\'s ad group.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The ad group ID.')
parser.add_argument('-i', '--ad_id', type=str, required=True,
help='The ad ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id, args.ad_id)
| 43.046154
| 79
| 0.699071
|
65060f468bc44d25c1a5b2f958ec602d7da2f9d4
| 28,433
|
py
|
Python
|
paramak/reactor.py
|
PlasmaFAIR/paramak
|
8d40e8ddf398c55333eb2daa4a679ec634b5a00a
|
[
"MIT"
] | null | null | null |
paramak/reactor.py
|
PlasmaFAIR/paramak
|
8d40e8ddf398c55333eb2daa4a679ec634b5a00a
|
[
"MIT"
] | null | null | null |
paramak/reactor.py
|
PlasmaFAIR/paramak
|
8d40e8ddf398c55333eb2daa4a679ec634b5a00a
|
[
"MIT"
] | null | null | null |
import collections
import json
from collections import Counter
from collections.abc import Iterable
from pathlib import Path
from typing import List, Optional, Tuple, Union
import cadquery as cq
import matplotlib.pyplot as plt
from cadquery import Compound, exporters
import paramak
from paramak.utils import _replace, get_hash
class Reactor:
"""The Reactor object allows shapes and components to be added and then
collective operations to be performed on them. Combining all the shapes is
required for creating images of the whole reactor and creating a Graveyard
(bounding box) that is useful for neutronics simulations.
Args:
shapes_and_components: list of paramak.Shape objects
graveyard_size: The dimension of cube shaped the graveyard region used
by DAGMC. This attribute is used preferentially over
graveyard_offset.
graveyard_offset: The distance between the graveyard and the largest
shape. If graveyard_size is set the this is ignored.
largest_shapes: Identifying the shape(s) with the largest size in each
dimension (x,y,z) can speed up the production of the graveyard.
Defaults to None which finds the largest shapes by looping through
all the shapes and creating bounding boxes. This can be slow and
that is why the user is able to provide a subsection of shapes to
use when calculating the graveyard dimensions.
"""
def __init__(
self,
shapes_and_components: List[paramak.Shape] = [],
graveyard_size: float = 20_000.0,
graveyard_offset: Optional[float] = None,
largest_shapes: Optional[List[paramak.Shape]] = None,
):
self.shapes_and_components = shapes_and_components
self.graveyard_offset = graveyard_offset
self.graveyard_size = graveyard_size
self.largest_shapes = largest_shapes
self.input_variable_names: List[str] = [
# 'shapes_and_components', commented out to avoid calculating solids
"graveyard_size",
"graveyard_offset",
"largest_shapes",
]
self.stp_filenames: List[str] = []
self.stl_filenames: List[str] = []
self.graveyard = None
self.solid = None
self.reactor_hash_value = None
@property
def input_variables(self):
all_input_variables = {}
for name in self.input_variable_names:
all_input_variables[name] = getattr(self, name)
return all_input_variables
@property
def graveyard_size(self):
return self._graveyard_size
@graveyard_size.setter
def graveyard_size(self, value):
if value is None:
self._graveyard_size = None
elif not isinstance(value, (float, int)):
raise TypeError("graveyard_size must be a number")
elif value < 0:
raise ValueError("graveyard_size must be positive")
self._graveyard_size = value
@property
def graveyard_offset(self):
return self._graveyard_offset
@graveyard_offset.setter
def graveyard_offset(self, value):
if value is None:
self._graveyard_offset = None
elif not isinstance(value, (float, int)):
raise TypeError("graveyard_offset must be a number")
elif value < 0:
raise ValueError("graveyard_offset must be positive")
self._graveyard_offset = value
@property
def largest_dimension(self):
"""Calculates a bounding box for the Reactor and returns the largest
absolute value of the largest dimension of the bounding box"""
largest_dimension = 0
if self.largest_shapes is None:
shapes_to_bound = self.shapes_and_components
else:
shapes_to_bound = self.largest_shapes
for component in shapes_to_bound:
largest_dimension = max(largest_dimension, component.largest_dimension)
# self._largest_dimension = largest_dimension
return largest_dimension
@largest_dimension.setter
def largest_dimension(self, value):
self._largest_dimension = value
@property
def largest_shapes(self):
return self._largest_shapes
@largest_shapes.setter
def largest_shapes(self, value):
if not isinstance(value, (list, tuple, type(None))):
raise ValueError(
"paramak.Reactor.largest_shapes should be a " "list of paramak.Shapes"
)
self._largest_shapes = value
@property
def shapes_and_components(self):
"""Adds a list of parametric shape(s) and or parametric component(s)
to the Reactor object. This allows collective operations to be
performed on all the shapes in the reactor."""
if hasattr(self, "create_solids"):
ignored_keys = ["reactor_hash_value"]
if get_hash(self, ignored_keys) != self.reactor_hash_value:
self.create_solids()
self.reactor_hash_value = get_hash(self, ignored_keys)
return self._shapes_and_components
@shapes_and_components.setter
def shapes_and_components(self, value):
if not isinstance(value, (Iterable, str)):
raise ValueError("shapes_and_components must be a list")
self._shapes_and_components = value
@property
def solid(self):
"""This combines all the parametric shapes and components in the
reactor object.
"""
list_of_cq_vals = []
for shape_or_compound in self.shapes_and_components:
if isinstance(
shape_or_compound.solid,
(cq.occ_impl.shapes.Shape, cq.occ_impl.shapes.Compound),
):
for solid in shape_or_compound.solid.Solids():
list_of_cq_vals.append(solid)
else:
list_of_cq_vals.append(shape_or_compound.solid.val())
compound = cq.Compound.makeCompound(list_of_cq_vals)
return compound
@solid.setter
def solid(self, value):
self._solid = value
@property
def name(self):
"""Returns a list of names of the individual Shapes that make up the
reactor"""
all_names = []
for shape in self.shapes_and_components:
all_names.append(shape.name)
return all_names
def show(self, default_edgecolor: Tuple[float, float, float] = (0, 0, 0)):
"""Shows / renders the CadQuery the 3d object in Jupyter Lab. Imports
show from jupyter_cadquery.cadquery and returns show(Reactor.solid)
Args:
default_edgecolor: the color to use for the edges, passed to
jupyter_cadquery.cadquery show. Tuple of three values expected
individual values in the tuple should be floats between 0. and
1.
Returns:
jupyter_cadquery.cadquery.show object
"""
try:
from jupyter_cadquery.cadquery import Part, PartGroup, show
except ImportError:
msg = (
"To use Reactor.show() you must install jupyter_cadquery. To"
'install jupyter_cadquery type "pip install jupyter_cadquery"'
" in the terminal"
)
raise ImportError(msg)
parts = []
for shape_or_compound in self.shapes_and_components:
if shape_or_compound.name is None:
name = "Shape.name not set"
else:
name = shape_or_compound.name
scaled_color = [int(i * 255) for i in shape_or_compound.color[0:3]]
scaled_edge_color = [int(i * 255) for i in default_edgecolor[0:3]]
if isinstance(
shape_or_compound.solid,
(cq.occ_impl.shapes.Shape, cq.occ_impl.shapes.Compound),
):
for i, solid in enumerate(shape_or_compound.solid.Solids()):
parts.append(Part(solid, name=f"{name}{i}", color=scaled_color))
else:
parts.append(
Part(
shape_or_compound.solid.val(),
name=f"{name}",
color=scaled_color,
)
)
return show(PartGroup(parts), default_edgecolor=scaled_edge_color)
def export_stp(
self,
filename: Union[List[str], str] = None,
mode: Optional[str] = "solid",
units: Optional[str] = "mm",
) -> Union[List[str], str]:
"""Exports the 3D reactor model as a stp file or files.
Args:
filename: Accepts a single filename as a string which exports the
full reactor model to a single file. Alternativley filename can
also accept a list of strings where each string is the filename
of the the individual shapes that make it up. This will result
in separate files for each shape in the reactor. Defaults to
None which uses the Reactor.name with '.stp' appended to the end
of each entry.
mode: the object to export can be either 'solid' which exports 3D
solid shapes or the 'wire' which exports the wire edges of the
shape.
units: the units of the stp file, options are 'cm' or 'mm'.
Default is mm.
Returns:
The stp filename(s) created
"""
if isinstance(filename, str):
# exports a single file for the whole model
assembly = cq.Assembly(name="reactor")
for entry in self.shapes_and_components:
if entry.color is None:
assembly.add(entry.solid)
else:
assembly.add(entry.solid, color=cq.Color(*entry.color))
assembly.save(filename, exportType="STEP")
if units == "cm":
_replace(
filename, "SI_UNIT(.MILLI.,.METRE.)", "SI_UNIT(.CENTI.,.METRE.)"
)
return [filename]
if filename is None:
if None in self.name:
msg = (
"Shape.name is None and therefore it can't be used "
"to name a stp file. Try setting Shape.name for all "
"shapes in the reactor"
)
raise ValueError(msg)
filename = [f"{name}.stp" for name in self.name]
# exports the reactor solid as a separate stp files
if len(filename) != len(self.shapes_and_components):
msg = (
f"The Reactor contains {len(self.shapes_and_components)} "
f"Shapes and {len(filename)} filenames have be provided. "
f"The names of the shapes are {self.name}"
)
raise ValueError(msg)
for stp_filename, entry in zip(filename, self.shapes_and_components):
entry.export_stp(
filename=stp_filename,
mode=mode,
units=units,
verbose=False,
)
if units == "cm":
_replace(
stp_filename, "SI_UNIT(.MILLI.,.METRE.)", "SI_UNIT(.CENTI.,.METRE.)"
)
return filename
def export_brep(self, filename: str, merge: bool = True):
"""Exports a brep file for the Reactor.solid.
Args:
filename: the filename of exported the brep file.
merged: if the surfaces should be merged (True) or not (False).
Returns:
filename of the brep created
"""
path_filename = Path(filename)
if path_filename.suffix != ".brep":
msg = "When exporting a brep file the filename must end with .brep"
raise ValueError(msg)
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
if not merge:
self.solid.exportBrep(str(path_filename))
else:
import OCP
bldr = OCP.BOPAlgo.BOPAlgo_Splitter()
for shape in self.shapes_and_components:
bldr.AddArgument(shape.solid.val().wrapped)
bldr.SetNonDestructive(True)
bldr.Perform()
bldr.Images()
merged = cq.Compound(bldr.Shape())
merged.exportBrep(str(path_filename))
return str(path_filename)
def export_stl(
self,
filename: Union[List[str], str] = None,
tolerance: float = 0.001,
angular_tolerance: float = 0.1,
) -> Union[str, List[str]]:
"""Writes stl files (CAD geometry) for each Shape object in the reactor
Args:
filename: Accepts a single filename as a string which exports the
full reactor model to a single file. Alternativley filename can
also accept a list of strings where each string is the filename
of the the individual shapes that make it up. This will result
in separate files for each shape in the reactor. Defaults to
None which uses the Reactor.name with '.stl' appended to the end
of each entry.
tolerance (float): the precision of the faceting
include_graveyard: specify if the graveyard will be included or
not. If True the the Reactor.make_graveyard will be called
using Reactor.graveyard_size and Reactor.graveyard_offset
attribute values.
Returns:
list: a list of stl filenames created
"""
if isinstance(filename, str):
path_filename = Path(filename)
if path_filename.suffix != ".stl":
path_filename = path_filename.with_suffix(".stl")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
# add an include_graveyard that add graveyard if requested
exporters.export(
self.solid,
str(path_filename),
exportType="STL",
tolerance=tolerance,
angularTolerance=angular_tolerance,
)
return str(path_filename)
if filename is None:
if None in self.name:
msg = (
"Shape.name is None and therefore it can't be used "
"to name a stl file. Try setting Shape.name for all "
"shapes in the reactor"
)
raise ValueError()
filename = [f"{name}.stl" for name in self.name]
# exports the reactor solid as a separate stl files
if len(filename) != len(self.shapes_and_components):
msg = (
f"The Reactor contains {len(self.shapes_and_components)} "
f"Shapes and {len(filename)} filenames have be provided. "
f"The names of the shapes are {self.name}"
)
raise ValueError(msg)
for stl_filename, entry in zip(filename, self.shapes_and_components):
entry.export_stl(
filename=stl_filename,
tolerance=tolerance,
verbose=False,
)
return filename
def make_sector_wedge(
self,
height: Optional[float] = None,
radius: Optional[float] = None,
rotation_angle: Optional[float] = None,
) -> Union[paramak.Shape, None]:
"""Creates a rotated wedge shaped object that is useful for creating
sector models in DAGMC where reflecting surfaces are needed. If the
rotation
Args:
height: The height of the rotated wedge. If None then the
largest_dimension of the model will be used.
radius: The radius of the rotated wedge. If None then the
largest_dimension of the model will be used
rotation_angle: The rotation angle of the wedge will be the
inverse of the sector
Returns:
the paramak.Shape object created
"""
if rotation_angle is None:
if hasattr(self, "rotation_angle"):
rotation_angle = self.rotation_angle
if rotation_angle is None:
Warning(
"No sector_wedge can be made as rotation_angle"
" or Reactor.rotation_angle have not been set"
)
return None
if rotation_angle > 360:
Warning("No wedge can be made for a rotation angle of 360 or above")
return None
if rotation_angle == 360:
print("No sector wedge made as rotation angle is 360")
return None
if height is None:
height = self.largest_dimension * 2
if radius is None:
radius = self.largest_dimension * 2
sector_cutting_wedge = paramak.CuttingWedge(
height=height,
radius=radius,
rotation_angle=360 - rotation_angle,
surface_reflectivity=True,
azimuth_placement_angle=rotation_angle,
)
self.sector_wedge = sector_cutting_wedge
return sector_cutting_wedge
def export_svg(
self,
filename: Optional[str] = "reactor.svg",
projectionDir: Tuple[float, float, float] = (-1.75, 1.1, 5),
width: Optional[float] = 1000,
height: Optional[float] = 800,
marginLeft: Optional[float] = 120,
marginTop: Optional[float] = 100,
strokeWidth: Optional[float] = None,
strokeColor: Optional[Tuple[int, int, int]] = (0, 0, 0),
hiddenColor: Optional[Tuple[int, int, int]] = (100, 100, 100),
showHidden: Optional[bool] = False,
showAxes: Optional[bool] = False,
) -> str:
"""Exports an svg file for the Reactor.solid. If the filename provided
doesn't end with .svg it will be added.
Args:
filename: the filename of the svg file to be exported. Defaults to
"reactor.svg".
projectionDir: The direction vector to view the geometry from
(x, y, z). Defaults to (-1.75, 1.1, 5)
width: the width of the svg image produced in pixels. Defaults to
1000
height: the height of the svg image produced in pixels. Defaults to
800
marginLeft: the number of pixels between the left edge of the image
and the start of the geometry.
marginTop: the number of pixels between the top edge of the image
and the start of the geometry.
strokeWidth: the width of the lines used to draw the geometry.
Defaults to None which automatically selects an suitable width.
strokeColor: the color of the lines used to draw the geometry in
RGB format with each value between 0 and 255. Defaults to
(0, 0, 0) which is black.
hiddenColor: the color of the lines used to draw the geometry in
RGB format with each value between 0 and 255. Defaults to
(100, 100, 100) which is light grey.
showHidden: If the edges obscured by geometry should be included in
the diagram. Defaults to False.
showAxes: If the x, y, z axis should be included in the image.
Defaults to False.
Returns:
str: the svg filename created
"""
path_filename = Path(filename)
if path_filename.suffix != ".svg":
path_filename = path_filename.with_suffix(".svg")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
opt = {
"width": width,
"height": height,
"marginLeft": marginLeft,
"marginTop": marginTop,
"showAxes": showAxes,
"projectionDir": projectionDir,
"strokeColor": strokeColor,
"hiddenColor": hiddenColor,
"showHidden": showHidden,
}
if strokeWidth is not None:
opt["strokeWidth"] = strokeWidth
exporters.export(self.solid, str(path_filename), exportType="SVG", opt=opt)
print("Saved file as ", path_filename)
return str(path_filename)
def export_stp_graveyard(
self,
filename: Optional[str] = "graveyard.stp",
graveyard_size: Optional[float] = None,
graveyard_offset: Optional[float] = None,
) -> str:
"""Writes a stp file (CAD geometry) for the reactor graveyard. This
is needed for DAGMC simulations. This method also calls
Reactor.make_graveyard() with the graveyard_size and graveyard_size
values.
Args:
filename (str): the filename for saving the stp file. Appends
.stp to the filename if it is missing.
graveyard_size: directly sets the size of the graveyard. Defaults
to None which then uses the Reactor.graveyard_size attribute.
graveyard_offset: the offset between the largest edge of the
geometry and inner bounding shell created. Defaults to None
which then uses Reactor.graveyard_offset attribute.
Returns:
str: the stp filename created
"""
graveyard = self.make_graveyard(
graveyard_offset=graveyard_offset,
graveyard_size=graveyard_size,
)
path_filename = Path(filename)
if path_filename.suffix != ".stp":
path_filename = path_filename.with_suffix(".stp")
graveyard.export_stp(filename=str(path_filename))
return str(path_filename)
def make_graveyard(
self,
graveyard_size: Optional[float] = None,
graveyard_offset: Optional[float] = None,
) -> paramak.Shape:
"""Creates a graveyard volume (bounding box) that encapsulates all
volumes. This is required by DAGMC when performing neutronics
simulations. The graveyard size can be ascertained in two ways. Either
the size can be set directly using the graveyard_size which is the
quickest method. Alternativley the graveyard can be automatically sized
to the geometry by setting a graveyard_offset value. If both options
are set then the method will default to using the graveyard_size
preferentially.
Args:
graveyard_size: directly sets the size of the graveyard. Defaults
to None which then uses the Reactor.graveyard_size attribute.
graveyard_offset: the offset between the largest edge of the
geometry and inner bounding shell created. Defaults to None
which then uses Reactor.graveyard_offset attribute.
Returns:
CadQuery solid: a shell volume that bounds the geometry, referred
to as a graveyard in DAGMC
"""
if graveyard_size is not None:
graveyard_size_to_use = graveyard_size
elif self.graveyard_size is not None:
graveyard_size_to_use = self.graveyard_size
elif graveyard_offset is not None:
self.solid
graveyard_size_to_use = self.largest_dimension * 2 + graveyard_offset * 2
elif self.graveyard_offset is not None:
self.solid
graveyard_size_to_use = (
self.largest_dimension * 2 + self.graveyard_offset * 2
)
else:
raise ValueError(
"the graveyard_size, Reactor.graveyard_size, \
graveyard_offset and Reactor.graveyard_offset are all None. \
Please specify at least one of these attributes or arguments"
)
graveyard_shape = paramak.HollowCube(
length=graveyard_size_to_use,
name="graveyard",
)
self.graveyard = graveyard_shape
return graveyard_shape
def export_2d_image(
self,
filename: Optional[str] = "2d_slice.png",
xmin: Optional[float] = 0.0,
xmax: Optional[float] = 900.0,
ymin: Optional[float] = -600.0,
ymax: Optional[float] = 600.0,
) -> str:
"""Creates a 2D slice image (png) of the reactor.
Args:
filename (str): output filename of the image created
Returns:
str: png filename created
"""
path_filename = Path(filename)
if path_filename.suffix != ".png":
path_filename = path_filename.with_suffix(".png")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots()
# creates indvidual patches for each Shape which are combined together
for entry in self.shapes_and_components:
patch = entry._create_patch()
ax.add_collection(patch)
ax.axis("equal")
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_aspect("equal", "box")
Path(filename).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(filename, dpi=100)
plt.close()
print("\n saved 2d image to ", str(path_filename))
return str(path_filename)
def export_html_3d(
self,
filename: Optional[str] = "reactor_3d.html",
) -> Optional[str]:
"""Saves an interactive 3d html view of the Reactor to a html file.
Args:
filename: the filename used to save the html graph. Defaults to
reactor_3d.html
Returns:
str: filename of the created html file
"""
from ipywidgets.embed import embed_minimal_html
view = self.show()
if view is None:
return None
embed_minimal_html(filename, views=[view.cq_view.renderer], title="Renderer")
return filename
def export_html(
self,
filename: Optional[str] = "reactor.html",
facet_splines: Optional[bool] = True,
facet_circles: Optional[bool] = True,
tolerance: Optional[float] = 1.0,
view_plane: Optional[str] = "RZ",
):
"""Creates a html graph representation of the points for the Shape
objects that make up the reactor. Shapes are colored by their .color
property. Shapes are also labelled by their .name. If filename provided
doesn't end with .html then .html will be added.
Args:
filename: the filename used to save the html graph. Defaults to
reactor.html
facet_splines: If True then spline edges will be faceted. Defaults
to True.
facet_circles: If True then circle edges will be faceted. Defaults
to True.
tolerance: faceting toleranceto use when faceting cirles and
splines. Defaults to 1e-3.
view_plane: The plane to project. Options are 'XZ', 'XY', 'YZ',
'YX', 'ZY', 'ZX', 'RZ' and 'XYZ'. Defaults to 'RZ'. Defaults to
'RZ'.
Returns:
plotly.Figure(): figure object
"""
fig = paramak.utils.export_wire_to_html(
wires=self.solid.Edges(),
filename=filename,
view_plane=view_plane,
facet_splines=facet_splines,
facet_circles=facet_circles,
tolerance=tolerance,
title=f"coordinates of the {self.__class__.__name__} reactor, viewed from the {view_plane} plane",
mode="lines",
)
return fig
def volume(self, split_compounds: bool = False) -> List[float]:
"""Get the volumes of the Shapes in the Reactor.
Args:
split_compounds: If the Shape is a compound of Shapes and therefore
contains multiple volumes. This option allows access to the separate
volumes of each component within a Shape (True) or the volumes of
compounds can be summed (False).
Returns:
The the volumes of the Shapes
"""
all_volumes = []
for shape in self.shapes_and_components:
all_volumes.append(shape.volume(split_compounds=split_compounds))
return all_volumes
| 36.1743
| 110
| 0.597404
|
832d32efbd21d6a3d99a0662648bbfd6522235f8
| 2,999
|
py
|
Python
|
google/cloud/security/inventory/pipelines/load_projects_pipeline.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | 1
|
2018-03-26T08:15:21.000Z
|
2018-03-26T08:15:21.000Z
|
google/cloud/security/inventory/pipelines/load_projects_pipeline.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/security/inventory/pipelines/load_projects_pipeline.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load projects data into Inventory."""
from google.cloud.security.common.gcp_type.resource import LifecycleState
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadProjectsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load project data into Inventory."""
RESOURCE_NAME = 'projects'
def _transform(self, resource_from_api):
"""Yield an iterator of loadable iam policies.
Args:
resource_from_api (iterable): Resource manager project list
response.
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#response-body
Yields:
iterable: Loadable projects, as a per-project dictionary.
"""
for project in (project for d in resource_from_api\
for project in d.get('projects', [])):
yield {'project_number': project.get('projectNumber'),
'project_id': project.get('projectId'),
'project_name': project.get('name'),
'lifecycle_state': project.get('lifecycleState'),
'parent_type': project.get('parent', {}).get('type'),
'parent_id': project.get('parent', {}).get('id'),
'raw_project': parser.json_stringify(project),
'create_time': parser.format_timestamp(
project.get('createTime'),
self.MYSQL_DATETIME_FORMAT)}
def _retrieve(self):
"""Retrieve the project resources from GCP.
Returns:
iterable: resource manager project list response.
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#response-body
"""
return self.safe_api_call('get_projects',
self.RESOURCE_NAME,
lifecycleState=LifecycleState.ACTIVE)
def run(self):
"""Runs the data pipeline."""
projects_map = self._retrieve()
if projects_map:
loadable_projects = self._transform(projects_map)
self._load(self.RESOURCE_NAME, loadable_projects)
self._get_loaded_count()
| 41.082192
| 103
| 0.647549
|
612418bfb724ac9a4a11078923e2ddf2f58a883f
| 55,171
|
py
|
Python
|
chinook/ARPES_lib.py
|
jminar/chinook
|
3125bdfecc1ccef9aff2bb0ee0da5f50df4826ae
|
[
"MIT"
] | null | null | null |
chinook/ARPES_lib.py
|
jminar/chinook
|
3125bdfecc1ccef9aff2bb0ee0da5f50df4826ae
|
[
"MIT"
] | null | null | null |
chinook/ARPES_lib.py
|
jminar/chinook
|
3125bdfecc1ccef9aff2bb0ee0da5f50df4826ae
|
[
"MIT"
] | 1
|
2021-02-22T11:12:58.000Z
|
2021-02-22T11:12:58.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Created on Sat Nov 18 21:15:20 2017
#@author: rday
#MIT License
#Copyright (c) 2018 Ryan Patrick Day
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import sys
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.interpolate import interp1d
import scipy.ndimage as nd
from scipy.signal import hilbert
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import chinook.klib as K_lib
import chinook.orbital as olib
import chinook.radint_lib as radint_lib
import chinook.Tk_plot as Tk_plot
if Tk_plot.tk_query():
tk_found = True
else:
tk_found = False
import chinook.Ylm as Ylm
import chinook.rotation_lib as rotlib
import chinook.intensity_map as imap
import chinook.tilt as tilt
####PHYSICAL CONSTANTS RELEVANT TO CALCULATION#######
hb = 6.626*10**-34/(2*np.pi)
c = 3.0*10**8
q = 1.602*10**-19
A = 10.0**-10
me = 9.11*10**-31
mN = 1.67*10**-27
kb = 1.38*10**-23
###
class experiment:
'''
The experiment object is at the centre of the ARPES matrix element
calculation.This object keeps track of the experimental geometry as
well as a local copy of the tight-binding model and its dependents.
Such a copy is used to avoid corruption of these objects in the global
space during a given run of the ARPES experiment.
*args*:
- **TB**: instance of a tight-binding model object
- **ARPES_dict**: dictionary of relevant experimental parameters including
- *'hv'*: float, photon energy (eV),
- *'mfp'*: float, mean-free path (Angstrom),
- *'resolution'*: dictionary for energy and momentum resolution:
- *'dE'*: float, energy resolution (FWHM eV),
- *'dk'*: float, momentum resolution (FWHM 1/Angstrom)
- *'T'*: float, Temperature of sample (Kelvin)
- *'cube'*: dictionary momentum and energy domain
(*'kz'* as float, all others ( *'X'* , *'Y'* , *'E'* ) are list
or tuple of floats Xo,Xf,dX)
*optional args*:
In addition to the keys above, *ARPES_dict* can also be fed the following:
- *'spin'*: spin-ARPES measurement, list [+/-1,np.array([a,b,c])]
with the numpy array indicating the spin-projection
direction (with respect to) experimental frame.
- *'rad_type'*: string, radial wavefunctions, c.f. *chinook.rad_int.py* for details
- *'threads'*: int, number of threads on which to calculate the matrix elements.
Requires very large calculation to see improvement over single core.
- *'slab'*: boolean, will truncate the eigenfunctions beyond the penetration depth (specifically 4x penetration depth), default is False
- *'ang'*: float, rotation of sample about normal emission i.e. z-axis (radian), default is 0.0
- *'W'*: float, work function (eV), default is 4.0
***
'''
def __init__(self,TB,ARPES_dict):
self.TB = TB
if sum([o.spin for o in self.TB.basis])<len(self.TB.basis):
self.spin = True
else:
self.spin = False
try:
self.cube = (ARPES_dict['cube']['X'],ARPES_dict['cube']['Y'],ARPES_dict['cube']['E'])
self.coord_type = 'momentum'
except KeyError:
try:
self.cube = (ARPES_dict['cube']['Tx'],ARPES_dict['cube']['Ty'],ARPES_dict['cube']['E'])
self.coord_type = 'angle'
except KeyError:
print('Error: must pass either a momentum (X,Y,E) or angle (Tx,Ty,E) range of interest to "cube" key of input dictionary.')
return None
self.hv = ARPES_dict['hv']
self.dE = ARPES_dict['resolution']['E']/np.sqrt(8*np.log(2)) #energy resolution FWHM
self.dk = ARPES_dict['resolution']['k']/np.sqrt(8*np.log(2)) #momentum resolution FWHM
self.maps = []
self.SE_args = ARPES_dict['SE']
try:
self.mfp = ARPES_dict['mfp'] #photoelectron mean free path for escape
except KeyError:
self.mfp = 10.0
try:
self.kz = ARPES_dict['cube']['kz']
except KeyError:
self.kz = 0.0
try:
self.W = ARPES_dict['W']
except KeyError:
self.W = 4.0
try:
self.Vo = ARPES_dict['Vo']
except KeyError:
self.Vo = -1
try:
self.ang = ARPES_dict['angle']
except KeyError:
self.ang = 0.0
try:
self.pol = ARPES_dict['pol']
except KeyError:
self.pol = np.array([1,0,0])
try:
self.T = ARPES_dict['T']
except KeyError:
self.T = -1
try:
self.sarpes = ARPES_dict['spin']
except KeyError:
self.sarpes = None
try:
self.rad_type = ARPES_dict['rad_type']
except KeyError:
self.rad_type = 'slater'
try:
self.rad_args = ARPES_dict['rad_args']
except KeyError:
self.rad_args = None
try:
self.phase_shifts= ARPES_dict['phase_shifts']
except KeyError:
self.phase_shifts = None
try:
self.slit = ARPES_dict['slit']
except KeyError:
self.slit = 'H'
try:
self.truncate = ARPES_dict['slab']
except KeyError:
self.truncate = False
try:
self.threads = ARPES_dict['threads']
except KeyError:
self.threads = 0
def update_pars(self,ARPES_dict,datacube=False):
'''
Several experimental parameters can be updated without re-calculating
the ARPES intensity explicitly. Specifically here, we can update
resolution in both energy and momentum, as well as temperature,
spin-projection, self-energy function, and polarization.
*args*:
- **ARPES_dict**: dictionary, specifically containing
- *'resolution'*: dictionary with 'E':float and 'k':float
- *'T'*: float, temperature, a negative value will suppress the Fermi function
- *'spin'*: list of [int, numpy array of 3 float] indicating projection and spin vector
- *'SE'*: various types accepted, see *SE_gen* for details
- *'pol'*: numpy array of 3 complex float, polarization of light
*kwargs*:
- **datacube**: bool, if updating in *spectral*, only the above can be changed. If instead, updating
at the start of *datacube*, can also pass:
- **hv**: float, photon energy, eV
- **ang**: float, sample orientation around normal, radiants
- **rad_type**: string, radial integral type
- **rad_args**: various datatype, see *radint_lib* for details
- **kz**: float, out-of-plane momentum, inverse Angstrom
- **mfp**: float, mean-free path, Angstrom
'''
if 'resolution' in ARPES_dict.keys():
try:
self.dE = ARPES_dict['resolution']['E']/np.sqrt(8*np.log(2)) #energy resolution FWHM
self.dk = ARPES_dict['resolution']['k']/np.sqrt(8*np.log(2)) #momentum resolution FWHM
except KeyError:
print('Energy "E" and momentum "k" resolutions not passed in "resolution" dictionary. \n Retaining original values.')
if 'T' in ARPES_dict.keys():
self.T = ARPES_dict['T']
if 'spin' in ARPES_dict.keys():
self.sarpes = ARPES_dict['spin']
if 'SE' in ARPES_dict.keys():
self.SE_args = ARPES_dict['SE']
if 'pol' in ARPES_dict.keys():
self.pol = ARPES_dict['pol']
if 'slit' in ARPES_dict.keys():
self.slit = ARPES_dict['slit']
if datacube:
if 'hv' in ARPES_dict.keys():
self.hv = ARPES_dict['hv']
if 'rad_type' in ARPES_dict.keys():
self.rad_type = ARPES_dict['rad_type']
if 'rad_args' in ARPES_dict.keys():
self.rad_args = ARPES_dict['rad_args']
if 'Vo' in ARPES_dict.keys():
self.Vo = ARPES_dict['Vo']
if 'kz' in ARPES_dict.keys():
self.kz = ARPES_dict['kz']
if 'mfp' in ARPES_dict.keys():
self.mfp = ARPES_dict['mfp']
def diagonalize(self):
'''
Diagonalize the Hamiltonian over the desired range of momentum, reshaping the
band-energies into a 1-dimensional array. If the user has not selected a energy
grain for calculation, automatically calculate this.
*return*:
None, however *experiment* attributes *X*, *Y*, *ph*, *TB.Kobj*, *Eb*, *Ev*, *cube*
are modified.
'''
if self.Vo>0:
kn = (self.hv-self.W)
Vo_args =[self.Vo,kn]
else:
Vo_args = None
if self.coord_type=='momentum':
x = np.linspace(*self.cube[0])
y = np.linspace(*self.cube[1])
X,Y = np.meshgrid(x,y)
self.X = X
self.Y = Y
k_arr,self.ph = K_lib.kmesh(self.ang,self.X,self.Y,self.kz,Vo_args)
elif self.coord_type=='angle':
k_arr = tilt.gen_kpoints(self.hv-self.W,(self.cube[0][2],self.cube[1][2]),self.cube[0][:2],self.cube[1][:2],self.kz)
self.X = np.reshape(k_arr[:,0],(self.cube[1][2],self.cube[0][2]))
self.Y = np.reshape(k_arr[:,1],(self.cube[1][2],self.cube[0][2]))
self.ph = np.arctan2(k_arr[:,1],k_arr[:,0])
self.TB.Kobj = K_lib.kpath(k_arr)
self.Eb,self.Ev = self.TB.solve_H()
if len(self.cube[2])==2:
#user only passed the energy limits, not the grain--automate generation of the grain size
band_dE_max = find_mean_dE(self.Eb)
NE_pts = int(10*(self.cube[2][1]-self.cube[2][0])/band_dE_max)
self.cube[2].append(NE_pts)
self.Eb = np.reshape(self.Eb,(np.shape(self.Eb)[-1]*np.shape(self.X)[0]*np.shape(self.X)[1]))
def truncate_model(self):
'''
For slab calculations, the number of basis states becomes a significant memory load,
as well as a time bottleneck. In reality, an ARPES calculation only needs the small
number of basis states near the surface. Then for slab-calculations, we can truncate
the basis and eigenvectors used in the calculation to dramatically improve our
capacity to perform such calculations. We keep all eigenvectors, but retain only the
projection of the basis states within 2*the mean free path of the surface. The
states associated with this projection are retained, while remainders are not.
*return*:
- **tmp_basis**: list, truncated subset of the basis' orbital objects
- **Evec**: numpy array of complex float corresponding to the truncated eigenvector
array containing only the surface-projected wavefunctions
'''
depths = np.array([abs(oi.depth) for oi in self.basis])
i_start = np.where(depths<4*self.mfp)[0][0]
tmp_basis = []
#CASE 1: BASIS INCLUDES BOTH SPIN DOF
if self.spin:
switch = (int(len(self.basis)/2))
tmp_basis = self.basis[i_start:switch] + self.basis[(switch+i_start):]
Evec = np.zeros((np.shape(self.Ev)[0],len(tmp_basis),np.shape(self.Ev)[-1]),dtype=complex)
Evec[:,:(switch-i_start),:] =self.Ev[:,i_start:switch,:]
Evec[:,(switch-i_start):,:] = self.Ev[:,(switch+i_start):,:]
#CASE 2: BASIS IS SPINLESS
else:
tmp_basis = self.basis[i_start:]
Evec=self.Ev[:,i_start:,:]
return tmp_basis,Evec
def rot_basis(self):
'''
Rotate the basis orbitals and their positions in the lab frame to be consistent with the
experimental geometry
*return*:
- list of orbital objects, representing a rotated version of the original basis if the
angle is finite. Otherwise, just return the original basis.
'''
tmp_base = []
if abs(self.ang)>0.0:
for o in range(len(self.TB.basis)):
oproj = np.copy(self.TB.basis[o].proj)
l = self.TB.basis[o].l
nproj,_ = olib.rot_projection(l,oproj,[np.array([0,0,1]),self.ang])
tmp = self.TB.basis[o].copy()
tmp.proj = nproj
tmp_base.append(tmp)
return tmp_base
else:
return self.TB.basis
###############################################################################
###############################################################################
################## MAIN MATRIX ELEMENT EVALUATION ###########################
###############################################################################
###############################################################################
def datacube(self,ARPES_dict=None):
'''
This function computes the photoemission matrix elements.
Given a kmesh to calculate the photoemission over, the mesh is reshaped to an nx3 array and the Hamiltonian
diagonalized over this set of k points. The matrix elements are then calculated for each
of these E-k points
*kwargs*:
- **ARPES_dict**: can optionally pass a dictionary of experimental parameters, to update those defined
in the initialization of the *experiment* object.
*return*:
- boolean, True if function finishes successfully.
'''
if ARPES_dict is not None:
self.update_pars(ARPES_dict,True)
self.basis = self.rot_basis()
print('Initiate diagonalization: ')
self.diagonalize()
print('Diagonalization Complete.')
nstates = len(self.basis)
if self.truncate:
self.basis,self.Ev = self.truncate_model()
dE = (self.cube[2][1]-self.cube[2][0])/self.cube[2][2]
dig_range = (self.cube[2][0]-5*dE,self.cube[2][1]+5*dE)
self.pks = np.array([[i,np.floor(np.floor(i/nstates)/np.shape(self.X)[1]),np.floor(i/nstates)%np.shape(self.X)[1],self.Eb[i]] for i in range(len(self.Eb)) if dig_range[0]<=self.Eb[i]<=dig_range[-1]])
if len(self.pks)==0:
raise ValueError('ARPES Calculation Error: no states found in energy window. Consider refining the region of interest')
self.Mk = np.zeros((len(self.pks),2,3),dtype=complex)
kn = (2.*me/hb**2*(self.hv+self.pks[:,3]-self.W)*q)**0.5*A
self.th = np.array([np.arccos((kn[i]**2-self.X[int(self.pks[i,1]),int(self.pks[i,2])]**2-self.Y[int(self.pks[i,1]),int(self.pks[i,2])]**2)**0.5/kn[i]) if (kn[i]**2-self.X[int(self.pks[i,1]),int(self.pks[i,2])]**2-self.Y[int(self.pks[i,1]),int(self.pks[i,2])]**2)>=0 else -1 for i in range(len(self.pks))])
self.prefactors = np.array([o.sigma*np.exp((-0.5/abs(self.mfp))*abs(o.depth)) for o in self.basis])
self.Largs,self.Margs,Gmats,self.orbital_pointers = all_Y(self.basis)
self.Gbasis = Gmats[self.orbital_pointers]
self.proj_arr = projection_map(self.basis)
rad_dict = {'hv':self.hv,'W':self.W,'rad_type':self.rad_type,'rad_args':self.rad_args,'phase_shifts':self.phase_shifts}
self.Bfuncs,self.radint_pointers = radint_lib.make_radint_pointer(rad_dict,self.basis,dig_range)
print('Begin computing matrix elements: ')
valid_indices = np.array([i for i in range(len(self.pks)) if (self.th[i]>=0)])# and self.cube[2][0]<=self.pks[i][3]<=self.cube[2][1])])
if self.threads>0:
self.thread_Mk(self.threads,valid_indices)
else:
self.serial_Mk(valid_indices)
print('\nDone matrix elements')
return True
def M_compute(self,i):
'''
The core method called during matrix element computation.
*args*:
- **i**: integer, index and energy of state
*return*:
- **Mtmp**: numpy array (2x3) of complex float corresponding to the matrix element
projection for dm = -1,0,1 (columns) and spin down or up (rows) for a given
state in k and energy.
'''
nstates = len(self.TB.basis)
phi = self.ph[int(self.pks[i,0]/nstates)]
th = self.th[i]
Ylm_calls = Yvect(self.Largs,self.Margs,th,phi)[self.orbital_pointers]
Mtmp = np.zeros((2,3),dtype=complex)
B_eval = np.array([[b[0](self.pks[i,3]),b[1](self.pks[i,3])] for b in self.Bfuncs])
pref = np.einsum('i,ij->ij',np.einsum('i,i->i',self.prefactors,self.Ev[int(self.pks[i,0]/nstates),:,int(self.pks[i,0]%nstates)]),B_eval[self.radint_pointers])
Gtmp = np.einsum('ij,ijkl->ikl',self.proj_arr,np.einsum('ijkl,ijkl->ijkl',Ylm_calls,self.Gbasis))
if self.spin:
Mtmp[0,:] = np.einsum('ij,ijk->k',pref[:int(len(self.basis)/2)],Gtmp[:int(len(self.basis)/2)])
Mtmp[1,:] = np.einsum('ij,ijk->k',pref[int(len(self.basis)/2):],Gtmp[int(len(self.basis)/2):])
else:
Mtmp[0,:] = np.einsum('ij,ijk->k',pref,Gtmp)
return Mtmp
def serial_Mk(self,indices):
'''
Run matrix element on a single thread, directly modifies the *Mk* attribute.
*args*:
- **indices**: list of all state indices for execution; restricting states
in *cube_indx* to those within the desired window
'''
for ii in indices:
sys.stdout.write('\r'+progress_bar(ii+1,len(self.pks)))
self.Mk[ii,:,:]+=self.M_compute(ii)
def thread_Mk(self,N,indices):
'''
Run matrix element on *N* threads using multiprocess functions, directly modifies the *Mk*
attribute.
NOTE 21/2/2019 -- this has not been optimized to show any measureable improvement over serial execution.
May require a more clever way to do this to get a proper speedup.
*args*:
- **N**: int, number of threads
- **indices**: list of int, all state indices for execution; restricting
states in cube_indx to those within the desired window.
'''
div = int(len(indices)/N)
pool = ThreadPool(N)
results = np.array(pool.map(self.Mk_wrapper,[indices[ii*div:(ii+1)*div] for ii in range(N)]))
pool.close()
pool.join()
results = results.reshape(len(indices),2,3)
self.Mk[indices] = results
def Mk_wrapper(self,ilist):
'''
Wrapper function for use in multiprocessing, to run each of the processes
as a serial matrix element calculation over a sublist of state indices.
*args*:
- **ilist**: list of int, all state indices for execution.
*return*:
- **Mk_out**: numpy array of complex float with shape (len(ilist), 2,3)
'''
Mk_out = np.zeros((len(ilist),2,3),dtype=complex)
for ii in list(enumerate(ilist)):
Mk_out[ii[0],:,:] += self.M_compute(ii[1])
return Mk_out
###############################################################################
###############################################################################
####################### DATA VIEWING #########################################
###############################################################################
###############################################################################
def SE_gen(self):
'''
Self energy arguments are passed as a list, which supports mixed-datatype.
The first entry in list is a string, indicating the type of self-energy,
and the remaining entries are the self-energy.
*args*:
- **SE_args**: list, first entry can be 'func', 'poly', 'constant', or 'grid'
indicating an executable function, polynomial factors, constant, or a grid of values
*return*:
- SE, numpy array of complex float, with either shape of the datacube,
or as a one dimensional array over energy only.
'''
w = np.linspace(*self.cube[2])
if self.SE_args[0] == 'func':
kx = np.linspace(*self.cube[0])
ky = np.linspace(*self.cube[1])
X,Y,W = np.meshgrid(kx,ky,w)
try:
SE = self.SE_args[1](X,Y,W)
except TypeError:
print('Using local (k-independent) self-energy.')
SE = self.SE_args[1](w)
elif self.SE_args[0] == 'grid':
SE = np.interp(w,self.SE_args[1],self.SE_args[2])
elif self.SE_args[0] == 'poly':
SE = -1.0j*abs(poly(w,self.SE_args[1:]))
elif self.SE_args[0] == 'constant':
SE = -1.0j*abs(self.SE_args[1])
return SE
def smat_gen(self,svector=None):
'''
Define the spin-projection matrix related to a spin-resolved ARPES experiment.
*return*:
- **Smat**: numpy array of 2x2 complex float corresponding to Pauli operator along the desired direction
'''
try:
sv = svector/np.linalg.norm(svector)
except TypeError:
try:
sv = self.sarpes[1]/np.linalg.norm(self.sarpes[1])
except IndexError:
print('ERROR: Invalid spin-entry. See documentation for ARPES_lib.experiment')
return None
th = np.arccos(sv[2])
ph = np.arctan2(sv[1],sv[0])
if abs(self.ang)>0:
ph+=self.ang
Smat = np.array([[np.cos(th/2),np.exp(-1.0j*ph)*np.sin(th/2)],[np.sin(th/2),-np.exp(-1.0j*ph)*np.cos(th/2)]])
return Smat
def sarpes_projector(self):
'''
For use in spin-resolved ARPES experiments, project the computed
matrix element values onto the desired spin-projection direction.
In the event that the spin projection direction is not along the
standard out-of-plane quantization axis, we rotate the matrix elements
computed into the desired basis direction.
*return*:
- **spin_projected_Mk**: numpy array of complex float with same
shape as *Mk*
'''
if self.coord_type == 'momentum':
Smat = self.smat_gen()
spin_projected_Mk = np.einsum('ij,kjl->kil',Smat,self.Mk)
elif self.coord_type == 'angle':
if self.slit=='H':
th =0.5*(self.cube[0][0]+self.cube[0][1])
phvals = np.linspace(*self.cube[1])
pk_index = 1
Rmats = np.array([np.matmul(rotlib.Rodrigues_Rmat(np.array([1,0,0]),-ph),rotlib.Rodrigues_Rmat(np.array([0,1,0]),-th)) for ph in phvals])
elif self.slit=='V':
ph = 0.5*(self.cube[1][0]+self.cube[1][1])
thvals = np.linspace(*self.cube[0])
Rmats = np.array([np.matmul(rotlib.Rodrigues_Rmat(np.array([0,np.cos(-ph),np.sin(-ph)]),-th),rotlib.Rodrigues_Rmat(np.array([1,0,0])-ph)) for th in thvals])
pk_index = 2
svectors = np.einsum('ijk,k->ij',Rmats,self.sarpes[1])
Smats = np.array([self.smat_gen(sv) for sv in svectors])
all_mats = Smats[np.array([int(self.pks[i,pk_index]) for i in range(len(self.pks))])]
spin_projected_Mk = np.einsum('ijk,ikl->ijl',all_mats,self.Mk)
return spin_projected_Mk
def gen_all_pol(self):
'''
Rotate polarization vector, as it appears for each angle in the experiment.
Assume that it only rotates with THETA_y (vertical cryostat), and that the polarization
vector defined by the user relates to centre of THETA_x axis.
Right now only handles zero vertical rotation (just tilt)
*return*:
- numpy array of len(expmt.cube[1]) x 3 complex float, rotated polarization vectors
expressed in basis of spherical harmonics
'''
if self.slit=='H':
th =0.5*(self.cube[0][0]+self.cube[0][1])
phvals = np.linspace(*self.cube[1])
Rmats = np.array([np.matmul(rotlib.Rodrigues_Rmat(np.array([1,0,0]),-ph),rotlib.Rodrigues_Rmat(np.array([0,1,0]),-th)) for ph in phvals])
pk_index = 1
elif self.slit=='V':
ph = 0.5*(self.cube[1][0]+self.cube[1][1])
thvals = np.linspace(*self.cube[0])
Rmats = np.array([np.matmul(rotlib.Rodrigues_Rmat(np.array([0,np.cos(-ph),np.sin(-ph)]),-th),rotlib.Rodrigues_Rmat(np.array([1,0,0]),-ph)) for th in thvals])
pk_index = 2
rot_pols = np.einsum('ijk,k->ij',Rmats,self.pol)
rot_pols_sph = pol_2_sph(rot_pols)
peak_pols = np.array([rot_pols_sph[int(self.pks[i,pk_index])] for i in range(len(self.pks))])
return peak_pols
def T_distribution(self):
'''
Compute the Fermi-distribution for a fixed temperature, over the domain of energy of interest
*return*:
- **fermi**: numpy array of float, same length as energy domain array defined by *cube[2]* attribute.
'''
if np.sign(self.T)>-1:
fermi = vf(np.linspace(*self.cube[2])/(kb*self.T/q))
else:
fermi = np.ones(self.cube[2][2])
return fermi
def spectral(self,ARPES_dict=None,slice_select=None,add_map = False,plot_bands=False,ax=None):
'''
Take the matrix elements and build a simulated ARPES spectrum.
The user has several options here for the self-energy to be used, c.f. *SE_gen()* for details.
Gaussian resolution broadening is the last operation performed, to be consistent with the
practical experiment. *slice_select* instructs the method to also produce a plot of the designated
slice through momentum or energy. If this is done, the function also returns the associated matplotlib.Axes
object for further manipulation of the plot window.
*kwargs*:
- **ARPES_dict**: dictionary, experimental configuration. See *experiment.__init__* and *experiment.update_pars()*
- **slice_select**: tuple, of either (int,int) or (str,float) format. If (int,int), first is axis index (0,1,2 for x,y,E) and the second is the index of the array. More useful typically is (str,float) format, with str as 'x', 'kx', 'y', 'ky', 'E', 'w' and the float the value requested. It will find the index along this direction closest to the request. Note the strings are not case-sensitive.
- **add_map**: boolean, add intensity map to list of intensity maps. If true, a list of intensity objects is appended, otherwise, the intensity map is overwritten
- **plot_bands**: boolean, plot bandstructure from tight-binding over the intensity map
- **ax**: matplotlib Axes, only relevant if **slice_select**, option to pass existing Axes to plot onto
*return*:
- **I**: numpy array of float, raw intensity map.
- **Ig**: numpy array of float, resolution-broadened intensity map.
- **ax**: matplotlib Axes, for further modifications to plot only if **slice_select** True
'''
if not hasattr(self,'Mk'):
self.datacube()
if ARPES_dict is not None:
self.update_pars(ARPES_dict)
if self.sarpes is not None:
spin_Mk = self.sarpes_projector()
if self.coord_type == 'momentum':
pol = pol_2_sph(self.pol)
M_factor = np.power(abs(np.einsum('ij,j->i',spin_Mk[:,int((self.sarpes[0]+1)/2),:],pol)),2)
elif self.coord_type == 'angle':
all_pol = self.gen_all_pol()
M_factor = np.power(abs(np.einsum('ij,ij->i',spin_Mk[:,int((self.sarpes[0]+1)/2),:],all_pol)),2)
else:
if self.coord_type == 'momentum':
pol = pol_2_sph(self.pol)
M_factor = np.sum(np.power(abs(np.einsum('ijk,k->ij',self.Mk,pol)),2),axis=1)
elif self.coord_type == 'angle':
all_pol = self.gen_all_pol()
M_factor = np.sum(np.power(abs(np.einsum('ijk,ik->ij',self.Mk,all_pol)),2),axis=1)
SE = self.SE_gen()
fermi = self.T_distribution()
w = np.linspace(*self.cube[2])
I = np.zeros((self.cube[1][2],self.cube[0][2],self.cube[2][2]))
if np.shape(SE)==np.shape(I):
SE_k = True
else:
SE_k = False
for p in range(len(self.pks)):
if not SE_k:
I[int(np.real(self.pks[p,1])),int(np.real(self.pks[p,2])),:] += M_factor[p]*np.imag(-1./(np.pi*(w-self.pks[p,3]-(SE-0.0005j))))*fermi
else:
I[int(np.real(self.pks[p,1])),int(np.real(self.pks[p,2])),:]+= M_factor[p]*np.imag(-1./(np.pi*(w-self.pks[p,3]-(SE[int(np.real(self.pks[p,1])),int(np.real(self.pks[p,2])),:]-0.0005j))))*fermi
kxg = (self.cube[0][2]*self.dk/(self.cube[0][1]-self.cube[0][0]) if abs(self.cube[0][1]-self.cube[0][0])>0 else 0)
kyg = (self.cube[1][2]*self.dk/(self.cube[1][1]-self.cube[1][0]) if abs(self.cube[1][1]-self.cube[1][0])>0 else 0)
wg = (self.cube[2][2]*self.dE/(self.cube[2][1]-self.cube[2][0]) if abs(self.cube[2][1]-self.cube[2][0])>0 else 0)
Ig = nd.gaussian_filter(I,(kyg,kxg,wg))
if slice_select!=None:
ax_img = self.plot_intensity_map(Ig,slice_select,plot_bands,ax)
if add_map:
self.maps.append(imap.intensity_map(len(self.maps),Ig,self.cube,self.kz,self.T,self.hv,self.pol,self.dE,self.dk,self.SE_args,self.sarpes,self.ang))
else:
self.maps = [imap.intensity_map(len(self.maps),Ig,self.cube,self.kz,self.T,self.hv,self.pol,self.dE,self.dk,self.SE_args,self.sarpes,self.ang)]
if slice_select:
return I,Ig,ax_img
else:
return I,Ig
def gen_imap(self,I_arr):
new_map = imap.intensity_map(len(self.maps),I_arr,self.cube,self.kz,self.T,self.hv,self.pol,self.dE,self.dk,self.SE_args,self.sarpes,self.ang)
return new_map
def plot_intensity_map(self,plot_map,slice_select,plot_bands=False,ax_img=None):
'''
Plot a slice of the intensity map computed in *spectral*. The user selects either
an array index along one of the axes, or the fixed value of interest, allowing
either integer, or float selection.
*args*:
- **plot_map**: numpy array of shape (self.cube[0],self.cube[1],self.cube[2]) of float
- **slice_select**: list of either [int,int] or [str,float], corresponding to
dimension, index or label, value. The former option takes dimensions 0,1,2 while
the latter can handle 'x', 'kx', 'y', 'ky', 'energy', 'w', or 'e', and is not
case-sensitive.
- **plot_bands**: boolean, option to overlay a constant-momentum cut with
the dispersion calculated from tight-binding
- **ax_img**: matplotlib Axes, for option to plot onto existing Axes
*return*:
- **ax_img**: matplotlib axis object
'''
if ax_img is None:
fig,ax_img = plt.subplots()
fig.set_tight_layout(False)
if type(slice_select[0]) is str:
str_opts = [['x','kx'],['y','ky'],['energy','w','e']]
dim = 0
for i in range(3):
if slice_select[0].lower() in str_opts[i]:
dim = i
x = np.linspace(*self.cube[dim])
index = np.where(abs(x-slice_select[1])==abs(x-slice_select[1]).min())[0][0]
slice_select = [dim,int(index)]
#new option
index_dict = {2:(0,1),1:(2,0),0:(2,1)}
X,Y = np.meshgrid(np.linspace(*self.cube[index_dict[slice_select[0]][0]]),np.linspace(*self.cube[index_dict[slice_select[0]][1]]))
limits = np.zeros((3,2),dtype=int)
limits[:,1] = np.shape(plot_map)[1],np.shape(plot_map)[0],np.shape(plot_map)[2]
limits[slice_select[0]] = [slice_select[1],slice_select[1]+1]
ax_xlimit = (self.cube[index_dict[slice_select[0]][0]][0],self.cube[index_dict[slice_select[0]][0]][1])
ax_ylimit = (self.cube[index_dict[slice_select[0]][1]][0],self.cube[index_dict[slice_select[0]][1]][1])
plottable = np.squeeze(plot_map[limits[1,0]:limits[1,1],limits[0,0]:limits[0,1],limits[2,0]:limits[2,1]])
p = ax_img.pcolormesh(X,Y,plottable,cmap=cm.magma)
if plot_bands and slice_select[0]!=2:
k = np.linspace(*self.cube[index_dict[slice_select[0]][1]])
if slice_select[0]==1:
indices = np.array([len(k)*slice_select[1] + ii for ii in range(len(k))])
elif slice_select[0]==0:
indices = np.array([slice_select[1] + ii*self.cube[0][2] for ii in range(len(k))])
for ii in range(len(self.TB.basis)):
ax_img.plot(self.TB.Eband[indices,ii],k,alpha=0.4,c='w')
#
ax_img.set_xlim(*ax_xlimit)
ax_img.set_ylim(*ax_ylimit)
plt.colorbar(p,ax=ax_img)
plt.tight_layout()
return ax_img
def plot_gui(self):
'''
Generate the Tkinter gui for exploring the experimental parameter-space
associated with the present experiment.
*args*:
- **ARPES_dict**: dictionary of experimental parameters, c.f. the
*__init__* function for details.
*return*:
- **Tk_win**: Tkinter window.
'''
if tk_found:
TK_win = Tk_plot.plot_intensity_interface(self)
return TK_win
else:
print('This tool is not active without tkinter')
return None
###############################################################################
###############################################################################
################### WRITE ARPES MAP TO FILE ###################################
###############################################################################
###############################################################################
def write_map(self,_map,directory):
'''
Write the intensity maps to a series of text files in the indicated directory.
*args*:
- **_map**: numpy array of float to write to file
- **directory**: string, name of directory + the file-lead name
*return*:
- boolean, True
'''
for i in range(np.shape(_map)[2]):
filename = directory + '_{:d}.txt'.format(i)
self.write_Ik(filename,_map[:,:,i])
return True
def write_params(self,Adict,parfile):
'''
Generate metadata text file associated with the saved map.
*args*:
- **Adict**: dictionary, ARPES_dict same as in above functions, containing
relevant experimental parameters for use in saving the metadata associated
with the related calculation.
- **parfile**: string, destination for the metadata
'''
RE_pol = list(np.real(Adict['pol']))
IM_pol = list(np.imag(Adict['pol']))
with open(parfile,"w") as params:
params.write("Photon Energy: {:0.2f} eV \n".format(Adict['hv']))
params.write("Temperature: {:0.2f} K \n".format(Adict['T'][1]))
params.write("Polarization: {:0.3f}+{:0.3f}j {:0.3f}+{:0.3f}j {:0.3f}+{:0.3f}j\n".format(RE_pol[0],IM_pol[0],RE_pol[1],IM_pol[1],RE_pol[2],IM_pol[2]))
params.write("Energy Range: {:0.6f} {:0.6f} {:0.6f}\n".format(Adict['cube']['E'][0],Adict['cube']['E'][1],Adict['cube']['E'][2]))
params.write("Kx Range: {:0.6f} {:0.6f} {:0.6f}\n".format(Adict['cube']['X'][0],Adict['cube']['X'][1],Adict['cube']['X'][2]))
params.write("Ky Range: {:0.6f} {:0.6f} {:0.6f}\n".format(Adict['cube']['Y'][0],Adict['cube']['Y'][1],Adict['cube']['Y'][2]))
params.write("Kz Value: {:0.6f}\n".format(Adict['cube']['kz']))
try:
params.write("Azimuthal Rotation: {:0.6f}\n".format(Adict['angle']))
except ValueError:
pass
params.write("Energy Resolution: {:0.4f} eV\n".format(Adict['resolution']['E']))
params.write("Momentum Resolution: {:0.4f} eV\n".format(Adict['resolution']['k']))
params.write('Self Energy: '+'+'.join(['{:0.04f}w^{:d}'.format(Adict['SE'][i],i) for i in range(len(Adict['SE']))])+'\n')
try:
params.write("Spin Projection ({:s}): {:0.4f} {:0.4f} {:0.4f}\n".format(('up' if Adict['spin'][0]==1 else 'down'),Adict['spin'][1][0],Adict['spin'][1][1],Adict['spin'][1][2]))
except TypeError:
pass
params.close()
def write_Ik(self,filename,mat):
'''
Function for producing the textfiles associated with a 2 dimensional numpy array of float
*args*:
- **filename**: string indicating destination of file
- **mat**: numpy array of float, two dimensional
*return*:
- boolean, True
'''
with open(filename,"w") as destination:
for i in range(np.shape(mat)[0]):
tmpline = " ".join(map(str,mat[i,:]))
tmpline+="\n"
destination.write(tmpline)
destination.close()
return True
###############################################################################
###############################################################################
######################## SUPPORT FUNCTIONS#####################################
###############################################################################
###############################################################################
def find_mean_dE(Eb):
'''
Find the average spacing between adjacent points along the dispersion calculated.
*args*:
- **Eb**: numpy array of float, eigenvalues
*return*:
- **dE_mean**: float, average difference between consecutive eigenvalues.
'''
dE_mean = abs(np.subtract(Eb[1:,:],Eb[:-1,:])).mean()
return dE_mean
def con_ferm(ekbt):
'''
Typical values in the relevant domain for execution of the Fermi distribution will
result in an overflow associated with 64-bit float. To circumvent, set fermi-function
to zero when the argument of the exponential in the denominator is too large.
*args*:
- **ekbt**: float, (E-u)/kbT in terms of eV
*return*:
- **fermi**: float, evaluation of Fermi function.
'''
fermi = 0.0
if ekbt<709:
fermi = 1.0/(np.exp(ekbt)+1)
return fermi
vf = np.vectorize(con_ferm)
def pol_2_sph(pol):
'''
return polarization vector in spherical harmonics -- order being Y_11, Y_10, Y_1-1.
If an array of polarization vectors is passed, use the einsum function to broadcast over
all vectors.
*args*:
- **pol**: numpy array of 3 complex float, polarization vector in Cartesian coordinates (x,y,z)
*return*:
- numpy array of 3 complex float, transformed polarization vector.
'''
M = np.sqrt(0.5)*np.array([[-1,1.0j,0],[0,0,np.sqrt(2)],[1.,1.0j,0]])
if len(np.shape(pol))>1:
return np.einsum('ij,kj->ik',M,pol).T
else:
return np.dot(M,pol)
def poly(input_x,poly_args):
'''
Recursive polynomial function.
*args*:
- **input_x**: float, int or numpy array of numeric type, input value(s) at which to evaluate the polynomial
- **poly_args**: list of coefficients, in INCREASING polynomial order i.e. [a_0,a_1,a_2] for y = a_0 + a_1 * x + a_2 *x **2
*return*:
- recursive call to *poly*, if *poly_args* is reduced to a single value, return explicit evaluation of the function.
Same datatype as input, with int changed to float if *poly_args* are float, polynomial evaluated over domain of *input_x*
'''
if len(poly_args)==0:
return 0
else:
return input_x**(len(poly_args)-1)*poly_args[-1] + poly(input_x,poly_args[:-1])
#
def progress_bar(N,Nmax):
'''
Utility function, generate string to print matrix element calculation progress.
*args*:
- **N**: int, number of iterations complete
- **Nmax**: int, total number of iterations to complete
*return*:
- **st**: string, progress status
'''
frac = N/Nmax
st = ''.join(['|' for i in range(int(frac*30))])
st = '{:30s}'.format(st)+'{:3d}%'.format(int(frac*100))
return st
###############################################################################
###############################################################################
######################## ANGULAR INTEGRALS ####################################
###############################################################################
###############################################################################
def G_dic():
'''
Initialize the gaunt coefficients associated with all possible transitions relevant
*return*:
- **Gdict**: dictionary with keys as a string representing (l,l',m,dm) "ll'mdm" and values complex float.
All unacceptable transitions set to zero.
'''
llp = [[l,lp] for l in range(4) for lp in ([l-1,l+1] if (l-1)>=0 else [l+1])]
llpmu = [[l[0],l[1],m,u] for l in llp for m in np.arange(-l[0],l[0]+1,1) for u in [-1,0,1]]
keyvals = [[str(l[0])+str(l[1])+str(l[2])+str(l[3]), Ylm.gaunt(l[0],l[2],l[1]-l[0],l[3])] for l in llpmu]
G_dict = dict(keyvals)
for gi in G_dict:
if np.isnan(G_dict[gi]):
G_dict[gi]=0.0
return G_dict
def all_Y(basis):
'''
Build L-M argument array input arguments for every combination of l,m in the basis. The idea is for a given k-point to have a single call
to evaluate all spherical harmonics at once. The pointer array orb_point is a list of lists, where for each projection in the basis, the integer
in the list indicates which row (first axis) of the Ylm array should be taken. This allows for very quick access to the l+/-1, m+/-1,0 Ylm evaluation
required.
*args*:
- **basis**: list of orbital objects
*return*:
- **l_args**: numpy array of int, of shape len(*lm_inds*),3,2, with the latter two indicating the final state orbital angular momentum
- **m_args**: numpy array of int, of shape len(*lm_inds*),3,2, with the latter two indicating the final state azimuthal angular momentum
- **g_arr**: numpy array of float, shape len(*lm_inds*),3,2, providing the related Gaunt coefficients.
- **orb_point**: numpy array of int, matching the related sub-array of *l_args*, *m_args*, *g_arr* related to each orbital in basis
'''
maxproj = max([len(o.proj) for o in basis])
Gvals = G_dic()
lm_inds = []
l_args = []
m_args =[]
g_arr = []
orb_point = []
for o in basis:
point = np.zeros(maxproj)
for pi in range(len(o.proj)):
p = o.proj[pi]
lm = (p[2],p[3])
if lm not in lm_inds:
Yarr = ((np.ones((3,2))*np.array([lm[0]-1,lm[0]+1])).T,(np.ones((2,3))*np.array([lm[1]-1,lm[1]+0,lm[1]+1])))
l_args.append(Yarr[0])
m_args.append(Yarr[1])
g_arr.append(Gmat_make(lm,Gvals))
lm_inds.append(lm)
point[pi] = lm_inds.index(lm)
orb_point.append(point)
return np.array(l_args),np.array(m_args),np.array(g_arr),np.array(orb_point).astype(int)
def projection_map(basis):
'''
In order to improve efficiency, an array of orbital projections is generated, carrying all and each
orbital projection for the elements of the model basis. As these do not in general have the same length,
the second dimension of this array corresponds to the largest of the sets of projections associated with
a given orbital. This will in practice remain a modest number of order 1, since at worst we assume f-orbitals,
in which case the projection can be no larger than 7 long. So output will be at worst len(basis)x7 complex float
*args*:
- **basis**: list of orbital objects
*return*:
- **projarr**: numpy array of complex float
'''
maxproj = max([len(o.proj) for o in basis])
projarr = np.zeros((len(basis),maxproj),dtype=complex)
for ii in range(len(basis)):
for pj in range(len(basis[ii].proj)):
proj = basis[ii].proj[pj]
projarr[ii,pj] = proj[0]+1.0j*proj[1]
return projarr
Yvect = np.vectorize(Ylm.Y,otypes=[complex])
def Gmat_make(lm,Gdictionary):
'''
Use the dictionary of relevant Gaunt coefficients to generate a small 2x3 array of
float which carries the relevant Gaunt coefficients for a given initial state.
*args*:
- **lm**: tuple of 2 int, initial state orbital angular momentum and azimuthal angular momentum
- **Gdictionary**: pre-calculated dictionary of Gaunt coefficients, with key-values associated with "ll'mdm"
*return*:
- **mats**: numpy array of float 2x3
'''
l = int(lm[0])
m = int(lm[1])
mats = np.zeros((2,3))
for lp in (-1,1):
for u in range(-1,2):
try:
mats[int((lp+1)/2),u+1] = Gdictionary['{:d}{:d}{:d}{:d}'.format(l,l+lp,m,u)]
except KeyError:
continue
return mats
def gen_SE_KK(w,SE_args):
'''
The total self-energy is computed using Kramers' Kronig relations:
The user can pass the self-energy in the form of either a callable function, a list of polynomial coefficients, or as a numpy array with shape Nx2 (with the first
column an array of frequency values, and the second the values of a function). For the latter option, the user is responsible for ensuring that the function goes
to zero at the tails of the domain. In the former two cases, the 'cut' parameter is used to impose an exponential cutoff near the edge of the domain to ensure this
is the case. In all cases the input imaginary self-energy must be single-signed to ensure it is purely even function. It is forced to be negative in all cases to give
a positive spectral function.
With the input defined, along with the energy range of interest to the calculation, a MUCH larger domain (100x in the maximal extent of the energy region of interest) is defined
wf. This is the domain over which we evaluate the Hilbert transform, which itself is carried out using:
the scipy.signal.hilbert() function. This function acting on an array f: H(f(x)) -> f(x) + i Hf(x). It relies on the FFT performed on the product of the sgn(w) and F(w) functions,
and then IFFT back so that we can use this to extract the real part of the self energy, given only the input.
args:
w -- numpy array energy values for the spectral peaks used in the ARPES simulation
SE_args -- dictionary containing the 'imfunc' key value pair (values being either callable, list of polynomial prefactors (increasing order) or numpy array of energy and Im(SE) values)
-- for the first two options, a 'cut' key value pair is also required to force the function to vanish at the boundary of the Hilbert transform integration window.
return: self energy as a numpy array of complex float. The indexing matches that of w, the spectral features to be plotted in the matrix element simulation.
'''
if ('imfunc' not in SE_args):
print('Self-Energy Error: Incorrect Dictionary key inputs. User requires "imfunc" for functional form for imaginary part')
print('Returning a constant array of Im(SE) = -0.01, Re(SE) = 0.0')
return -0.01j*np.ones(len(w))
else:
if type(SE_args['imfunc'])==np.ndarray and np.shape(SE_args['imfunc'])[1]==2:
wf = SE_args['imfunc'][:,0]
imSE = SE_args['imfunc'][:,1]
else:
wlim = abs(w).max()
wf = np.arange(-100*wlim,100*wlim,(w[1]-w[0]))
if callable(SE_args['imfunc']):
imSE = SE_args['imfunc'](wf)
if np.real(imSE).max()==0.0:
print('Input WARNING: The imaginary part of the self-energy should be passed as real-valued function (i.e. suppress the 1.0j factor). Converting imaginary part to real float and proceeding.')
imSE = np.imag(imSE)
elif type(SE_args['imfunc'])==list or type(SE_args['imfunc'])==tuple or type(SE_args['imfunc'])==np.ndarray:
if np.real(SE_args['imfunc']).max()==0.0:
print('Input WARNING: Pass arguments for imaginary part of self-energy as real values. Passing imaginary part as the functional arguments')
SE_args['imfunc'] = np.imag(SE_args['imfunc'])
imSE = abs(poly(wf,SE_args['imfunc']))
else:
print('Invalid self-energy input format. Please see ARPES_lib.gen_SE for further details on input parameters')
print('Returning a constant array of Im(SE) = 0.01, Re(SE) = 0.0')
return -0.01j*np.ones(len(w))
### IMPOSE THE CUTOFF!
if abs(SE_args['cut'])>wf[-1]:
SE_args['cut'] = 0.9*wf[-1]
print('WARNING: INVALID CUTOFF (BEYOND HILBERT TRANSFORM DOMAIN). CUTTING TO: {:0.02f}'.format(SE_args['cut']))
wcut = np.where(abs(wf-abs(SE_args['cut']))==abs(wf-abs(SE_args['cut'])).min())[0][0],np.where(abs(wf+abs(SE_args['cut']))==abs(wf+abs(SE_args['cut'])).min())[0][0]
cut_width = wf[5]-wf[0]
imSE[:wcut[1]] = np.exp(-abs(wf[:wcut[1]]-wf[wcut[1]])/cut_width)*imSE[wcut[1]]
imSE[wcut[0]:] = np.exp(-abs(wf[wcut[0]:]-wf[wcut[0]])/cut_width)*imSE[wcut[0]]
##Check that IM(Self Energy) is positive/negative semi-definite. If postive, make negative
sign_imSE = np.sign(imSE)
sign_imSE = sign_imSE[abs(sign_imSE)>0]
if sum(sign_imSE)<len(sign_imSE):
print('WARNING: Invalid definition of imaginary part of self energy--values must all be single-signed. Returning constant -0.01j as Self-energy.')
return -0.01j*np.ones(len(w))
if sign_imSE[0]>0: #imaginary part of self energy should be <0
imSE*=-1
SEf = hilbert(imSE)
reSE = -SEf.imag
imSE = SEf.real
roi = np.where(wf<w.min())[0][-1]-10,np.where(wf>w.max())[0][0]+10
im_interp = interp1d(wf[roi[0]:roi[1]],imSE[roi[0]:roi[1]])
re_interp = interp1d(wf[roi[0]:roi[1]],reSE[roi[0]:roi[1]])
return re_interp(w) + im_interp(w)*1.0j
###
| 43.169797
| 408
| 0.543438
|
65871d9167d953aad8ae2926c7bbe23b3a3c1871
| 3,942
|
py
|
Python
|
indico/modules/events/abstracts/clone.py
|
kadet1090/indico
|
36a1036017bdce5c910e889fea7017f18a1dd026
|
[
"MIT"
] | 1
|
2021-08-11T19:13:18.000Z
|
2021-08-11T19:13:18.000Z
|
indico/modules/events/abstracts/clone.py
|
kadet1090/indico
|
36a1036017bdce5c910e889fea7017f18a1dd026
|
[
"MIT"
] | null | null | null |
indico/modules/events/abstracts/clone.py
|
kadet1090/indico
|
36a1036017bdce5c910e889fea7017f18a1dd026
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.events.abstracts.models.email_templates import AbstractEmailTemplate
from indico.modules.events.abstracts.models.review_questions import AbstractReviewQuestion
from indico.modules.events.abstracts.settings import abstracts_reviewing_settings, abstracts_settings, boa_settings
from indico.modules.events.cloning import EventCloner
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
class AbstractSettingsCloner(EventCloner):
name = 'abstracts_settings'
friendly_name = _('Call for Abstracts (settings, email templates, review questions)')
requires = {'contribution_types', 'tracks'}
@property
def is_visible(self):
return self.old_event.type_ == EventType.conference
def has_conflicts(self, target_event):
return bool(target_event.abstract_review_questions) or bool(target_event.abstract_email_templates)
@no_autoflush
def run(self, new_event, cloners, shared_data, event_exists=False):
self._contrib_type_id_map = {old.id: new.id
for old, new in shared_data['contribution_types']['contrib_type_map'].iteritems()}
self._track_id_map = {old.id: new.id for old, new in shared_data['tracks']['track_map'].iteritems()}
self._clone_settings(new_event)
self._clone_email_templates(new_event)
self._clone_review_questions(new_event)
db.session.flush()
def _clone_settings(self, new_event):
old_settings = abstracts_settings.get_all(self.old_event, no_defaults=True)
offset = new_event.start_dt - self.old_event.start_dt
for key in ('start_dt', 'end_dt', 'modification_end_dt'):
if not old_settings.get(key):
continue
old_settings[key] += offset
abstracts_settings.set_multi(new_event, old_settings)
abstracts_reviewing_settings.set_multi(new_event, abstracts_reviewing_settings.get_all(self.old_event,
no_defaults=True))
boa_settings.set_multi(new_event, boa_settings.get_all(self.old_event, no_defaults=True))
def _clone_email_templates(self, new_event):
attrs = get_simple_column_attrs(AbstractEmailTemplate) - {'rules'}
for old_tpl in self.old_event.abstract_email_templates:
tpl = AbstractEmailTemplate()
tpl.populate_from_attrs(old_tpl, attrs)
tpl.rules = filter(None, map(self._clone_email_template_rule, old_tpl.rules))
new_event.abstract_email_templates.append(tpl)
def _clone_email_template_rule(self, old_rule):
rule = {'state': old_rule['state']}
if 'track' in old_rule:
try:
rule['track'] = [self._track_id_map[t] for t in old_rule['track']]
except KeyError:
return None
if 'contribution_type' in old_rule:
try:
rule['contribution_type'] = [self._contrib_type_id_map[ct] for ct in old_rule['contribution_type']]
except KeyError:
return None
return rule
def _clone_review_questions(self, new_event):
attrs = get_simple_column_attrs(AbstractReviewQuestion)
for old_question in self.old_event.abstract_review_questions:
question = AbstractReviewQuestion()
question.populate_from_attrs(old_question, attrs)
new_event.abstract_review_questions.append(question)
| 47.493976
| 119
| 0.699645
|
f11c1e7b232788560290d087dcf9db32e2b5a70b
| 432
|
py
|
Python
|
WEB(BE)/vacation/urls.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | 1
|
2022-03-09T17:04:26.000Z
|
2022-03-09T17:04:26.000Z
|
WEB(BE)/vacation/urls.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | null | null | null |
WEB(BE)/vacation/urls.py
|
osamhack2021/WEB_SONAGI-ON_updraft
|
c63d62b8348ba991811814aeafa337a6b3785ca2
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import VacationWriteView, VacationRewriteView, VacationDeleteView, VacationListView
urlpatterns = [
path("write", VacationWriteView.as_view(), name="write-vacation"),
path("rewrite", VacationRewriteView.as_view(), name="rewrite-vacation"),
path("delete", VacationDeleteView.as_view(), name="delete-vacation"),
path("list", VacationListView.as_view(), name="list-vacation")
]
| 48
| 95
| 0.75463
|
43ad9f5f7d9eac7b6b62d791d66abddd146c1227
| 2,419
|
py
|
Python
|
ch12/adaptive_lr.py
|
kbrezinski/stat-453-deep-learning
|
b10240b5c3a970231dcea9221d3d179d26fc197d
|
[
"BSD-3-Clause"
] | null | null | null |
ch12/adaptive_lr.py
|
kbrezinski/stat-453-deep-learning
|
b10240b5c3a970231dcea9221d3d179d26fc197d
|
[
"BSD-3-Clause"
] | null | null | null |
ch12/adaptive_lr.py
|
kbrezinski/stat-453-deep-learning
|
b10240b5c3a970231dcea9221d3d179d26fc197d
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import matplotlib.pyplot as plt
import numpy as np
plot = True
Tensor = torch.Tensor
class RMSProp:
def __init__(self, params: Tensor, lr: float, beta: int = 0.9, eps: float = 1e-8):
self.params = params
self.beta = beta
self.lr = lr
self.rms = 0
self.eps = eps
def step(self):
curr_rms = (1 - self.beta) * self.params.grad.pow(2)
self.rms = (self.beta * self.rms) + curr_rms
self.params -= self.lr * self.params.grad / (torch.sqrt(self.rms) + self.eps)
self.zero_grad()
def zero_grad(self):
self.params.grad = None
class Adam:
def __init__(self, params: Tensor,
lr: float,
beta1: float = 0.9,
beta2: float = 0.999):
# user learning inputs
self.params = params
self.beta = (beta1, beta2)
self.lr = lr
# stateful Adam properties
self.rms = 0
self.mom = 0
self.t = 1
self.eps = 1e-8
def step(self):
curr_mom = (self.beta[0] * self.mom) + ((1 - self.beta[0]) * self.params.grad)
curr_rms = (self.beta[1] * self.rms) + ((1 - self.beta[1]) * self.params.grad.pow(2))
self.rms = curr_rms
self.mom = curr_mom
num = self.lr * (self.mom / (1 - self.beta[0] ** self.t))
denom = torch.sqrt(self.rms / (1 - self.beta[1] ** self.t)) + self.eps
self.params -= num / denom
self.zero_grad()
self.t += 1
def zero_grad(self):
self.params.grad = None
def main() -> None:
X = (torch.arange(0., 10.) + (.1 * torch.randn(1, 10))).unsqueeze(-1)
y = torch.arange(0., 10.)
w = torch.nn.Parameter(torch.ones(1), requires_grad=True)
# optimizer = RMSProp(params=w, lr=1e-3)
optimizer = Adam(params=w, lr=1e-3)
# begin training
for i in range(10):
# forward pass
loss = (torch.matmul(X, optimizer.params) - y).pow(2).sum()
loss.backward()
with torch.no_grad():
print(f"mean_square: {optimizer.rms} | mean_square: {optimizer.mom} | grad: {optimizer.params.grad.item():.4f}")
optimizer.step()
print(f"loss: {loss.item():.6f}")
if plot:
plt.scatter(X, y)
plt.plot(np.arange(0., 10.), np.arange(0., 10.) * w.detach().numpy(), '-r')
plt.show()
if __name__ == "__main__":
main()
| 24.938144
| 124
| 0.542373
|
5b45e3c6b4982892f7c9aad14676849f0553e104
| 13,322
|
py
|
Python
|
lib/galaxy/webapps/galaxy/api/visualizations.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/api/visualizations.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 6
|
2021-11-11T20:57:49.000Z
|
2021-12-10T15:30:33.000Z
|
lib/galaxy/webapps/galaxy/api/visualizations.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Visualizations resource control over the API.
NOTE!: this is a work in progress and functionality and data structures
may change often.
"""
import json
import logging
from fastapi import (
Body,
Path,
Response,
status,
)
from galaxy import (
exceptions,
util,
web,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
)
from galaxy.web import expose_api
from galaxy.webapps.base.controller import UsesVisualizationMixin
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.visualizations import VisualizationsService
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
Router,
)
log = logging.getLogger(__name__)
router = Router(tags=["visualizations"])
VisualizationIdPathParam: EncodedDatabaseIdField = Path(
..., title="Visualization ID", description="The encoded database identifier of the Visualization."
)
@router.cbv
class FastAPIVisualizations:
service: VisualizationsService = depends(VisualizationsService)
@router.get(
"/api/visualizations/{id}/sharing",
summary="Get the current sharing status of the given Page.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
) -> SharingStatus:
"""Return the sharing status of the item."""
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/visualizations/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
) -> SharingStatus:
"""Makes this item accessible by a URL link and return the current sharing status."""
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/visualizations/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
) -> SharingStatus:
"""Makes this item inaccessible by a URL link and return the current sharing status."""
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/visualizations/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
) -> SharingStatus:
"""Makes this item publicly available by a URL link and return the current sharing status."""
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/visualizations/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
) -> SharingStatus:
"""Removes this item from the published list and return the current sharing status."""
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/visualizations/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
"""Shares this item with specific users and return the current sharing status."""
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/visualizations/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = VisualizationIdPathParam,
payload: SetSlugPayload = Body(...),
):
"""Sets a new slug to access this item by URL. The new slug must be unique."""
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
class VisualizationsController(BaseGalaxyAPIController, UsesVisualizationMixin, UsesAnnotations):
"""
RESTful controller for interactions with visualizations.
"""
service: VisualizationsService = depends(VisualizationsService)
@expose_api
def index(self, trans: GalaxyWebTransaction, **kwargs):
"""
GET /api/visualizations:
"""
rval = []
user = trans.user
# TODO: search for: title, made by user, creation time range, type (vis name), dbkey, etc.
# TODO: limit, offset, order_by
# TODO: deleted
# this is the default search - user's vis, vis shared with user, published vis
visualizations = self.get_visualizations_by_user(trans, user)
visualizations += self.get_visualizations_shared_with_user(trans, user)
visualizations += self.get_published_visualizations(trans, exclude_user=user)
# TODO: the admin case - everything
for visualization in visualizations:
item = self.get_visualization_summary_dict(visualization)
item = trans.security.encode_dict_ids(item)
item["url"] = web.url_for("visualization", id=item["id"])
rval.append(item)
return rval
@expose_api
def show(self, trans: GalaxyWebTransaction, id: str, **kwargs):
"""
GET /api/visualizations/{viz_id}
"""
# TODO: revisions should be a contents/nested controller like viz/xxx/r/xxx)?
# the important thing is the config
# TODO:?? /api/visualizations/registry -> json of registry.listings?
visualization = self.get_visualization(trans, id, check_ownership=False, check_accessible=True)
dictionary = trans.security.encode_dict_ids(self.get_visualization_dict(visualization))
dictionary["url"] = web.url_for(
controller="visualization",
action="display_by_username_and_slug",
username=visualization.user.username,
slug=visualization.slug,
)
dictionary["annotation"] = self.get_item_annotation_str(trans.sa_session, trans.user, visualization)
# need to encode ids in revisions as well
encoded_revisions = []
for revision in dictionary["revisions"]:
# NOTE: does not encode ids inside the configs
encoded_revisions.append(trans.security.encode_id(revision))
dictionary["revisions"] = encoded_revisions
dictionary["latest_revision"] = trans.security.encode_dict_ids(dictionary["latest_revision"])
if trans.app.visualizations_registry:
visualization = trans.app.visualizations_registry.get_plugin(dictionary["type"])
dictionary["plugin"] = visualization.to_dict()
return dictionary
@expose_api
def create(self, trans: GalaxyWebTransaction, payload: dict, **kwargs):
"""
POST /api/visualizations
creates a new visualization using the given payload
POST /api/visualizations?import_id={encoded_visualization_id}
imports a copy of an existing visualization into the user's workspace
"""
rval = None
if "import_id" in payload:
import_id = payload["import_id"]
visualization = self.import_visualization(trans, import_id, user=trans.user)
else:
payload = self._validate_and_parse_payload(payload)
# must have a type (I've taken this to be the visualization name)
if "type" not in payload:
raise exceptions.RequestParameterMissingException("key/value 'type' is required")
vis_type = payload.pop("type", False)
payload["save"] = True
try:
# generate defaults - this will err if given a weird key?
visualization = self.create_visualization(trans, vis_type, **payload)
except ValueError as val_err:
raise exceptions.RequestParameterMissingException(str(val_err))
rval = {"id": trans.security.encode_id(visualization.id)}
return rval
@expose_api
def update(self, trans: GalaxyWebTransaction, id: str, payload: dict, **kwargs):
"""
PUT /api/visualizations/{encoded_visualization_id}
"""
rval = None
payload = self._validate_and_parse_payload(payload)
# there's a differentiation here between updating the visualiztion and creating a new revision
# that needs to be handled clearly here
# or alternately, using a different controller like PUT /api/visualizations/{id}/r/{id}
# TODO: consider allowing direct alteration of revisions title (without a new revision)
# only create a new revsion on a different config
# only update owned visualizations
visualization = self.get_visualization(trans, id, check_ownership=True)
title = payload.get("title", visualization.latest_revision.title)
dbkey = payload.get("dbkey", visualization.latest_revision.dbkey)
config = payload.get("config", visualization.latest_revision.config)
latest_config = visualization.latest_revision.config
if (
(title != visualization.latest_revision.title)
or (dbkey != visualization.latest_revision.dbkey)
or (json.dumps(config) != json.dumps(latest_config))
):
revision = self.add_visualization_revision(trans, visualization, config, title, dbkey)
rval = {"id": id, "revision": revision.id}
# allow updating vis title
visualization.title = title
trans.sa_session.flush()
return rval
def _validate_and_parse_payload(self, payload):
"""
Validate and parse incomming data payload for a visualization.
"""
# This layer handles (most of the stricter idiot proofing):
# - unknown/unallowed keys
# - changing data keys from api key to attribute name
# - protection against bad data form/type
# - protection against malicious data content
# all other conversions and processing (such as permissions, etc.) should happen down the line
# keys listed here don't error when attempting to set, but fail silently
# this allows PUT'ing an entire model back to the server without attribute errors on uneditable attrs
valid_but_uneditable_keys = (
"id",
"model_class"
# TODO: fill out when we create to_dict, get_dict, whatevs
)
# TODO: deleted
# TODO: importable
ValidationError = exceptions.RequestParameterInvalidException
validated_payload = {}
for key, val in payload.items():
# TODO: validate types in VALID_TYPES/registry names at the mixin/model level?
if key == "type":
if not isinstance(val, str):
raise ValidationError(f"{key} must be a string or unicode: {str(type(val))}")
val = util.sanitize_html.sanitize_html(val)
elif key == "config":
if not isinstance(val, dict):
raise ValidationError(f"{key} must be a dictionary: {str(type(val))}")
elif key == "annotation":
if not isinstance(val, str):
raise ValidationError(f"{key} must be a string or unicode: {str(type(val))}")
val = util.sanitize_html.sanitize_html(val)
# these are keys that actually only be *updated* at the revision level and not here
# (they are still valid for create, tho)
elif key == "title":
if not isinstance(val, str):
raise ValidationError(f"{key} must be a string or unicode: {str(type(val))}")
val = util.sanitize_html.sanitize_html(val)
elif key == "slug":
if not isinstance(val, str):
raise ValidationError(f"{key} must be a string: {str(type(val))}")
val = util.sanitize_html.sanitize_html(val)
elif key == "dbkey":
if not isinstance(val, str):
raise ValidationError(f"{key} must be a string or unicode: {str(type(val))}")
val = util.sanitize_html.sanitize_html(val)
elif key not in valid_but_uneditable_keys:
continue
# raise AttributeError( 'unknown key: %s' %( str( key ) ) )
validated_payload[key] = val
return validated_payload
| 39.767164
| 111
| 0.654256
|
f017f04cf276557ad0b4aa91fd4351882bb16242
| 4,620
|
py
|
Python
|
route/user_setting.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
route/user_setting.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
route/user_setting.py
|
lsh23/openNAMU
|
18f780afe5e81ef1f347fe556b6fd98bb4914a53
|
[
"BSD-3-Clause"
] | null | null | null |
from .tool.func import *
def user_setting_2(conn, server_init):
curs = conn.cursor()
support_language = server_init.server_set_var['language']['list']
ip = ip_check()
if ban_check() == 1:
return re_error('/ban')
if ip_or_user(ip) == 0:
if flask.request.method == 'POST':
auto_list = ['email', 'skin', 'lang']
for auto_data in auto_list:
if flask.request.form.get(auto_data, '') != '':
curs.execute(db_change('select data from user_set where name = ? and id = ?'), [auto_data, ip])
if curs.fetchall():
curs.execute(db_change("update user_set set data = ? where name = ? and id = ?"), [flask.request.form.get(auto_data, ''), auto_data, ip])
else:
curs.execute(db_change("insert into user_set (name, id, data) values (?, ?, ?)"), [auto_data, ip, flask.request.form.get(auto_data, '')])
conn.commit()
return redirect('/change')
else:
curs.execute(db_change('select data from user_set where name = "email" and id = ?'), [ip])
data = curs.fetchall()
if data:
email = data[0][0]
else:
email = '-'
div2 = load_skin()
div3 = ''
curs.execute(db_change('select data from user_set where name = "lang" and id = ?'), [flask.session['id']])
data = curs.fetchall()
if not data:
curs.execute(db_change('select data from other where name = "language"'))
data = curs.fetchall()
if not data:
data = [['en-US']]
for lang_data in support_language:
if data and data[0][0] == lang_data:
div3 = '<option value="' + lang_data + '">' + lang_data + '</option>' + div3
else:
div3 += '<option value="' + lang_data + '">' + lang_data + '</option>'
oauth_provider = load_oauth('_README')['support']
oauth_content = '<ul>'
for i in range(len(oauth_provider)):
curs.execute(db_change('select name, picture from oauth_conn where wiki_id = ? and provider = ?'), [flask.session['id'], oauth_provider[i]])
oauth_data = curs.fetchall()
if len(oauth_data) == 1:
oauth_content += '<li>{}</li>'.format(oauth_provider[i].capitalize() + ' : <img src="{}" width="17px" height="17px"> {}'.format(oauth_data[0][1], oauth_data[0][0]))
else:
oauth_content += '<li>{}</li>'.format(oauth_provider[i].capitalize() + ' <a href="/oauth/{}/init">({})</a>'.format(oauth_provider[i], load_lang('connect')))
oauth_content += '</ul>'
http_warring = '<hr class=\"main_hr\"><span>' + load_lang('http_warring') + '</span>'
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('user_setting'), wiki_set(), custom(), other2([0, 0])],
data = '''
<form method="post">
<span>''' + load_lang('id') + ''' : ''' + ip + '''</span>
<hr class=\"main_hr\">
<a href="/pw_change">(''' + load_lang('password_change') + ''')</a>
<hr class=\"main_hr\">
<span>''' + load_lang('email') + ''' : ''' + email + '''</span> <a href="/email_change">(''' + load_lang('email_change') + ''')</a>
<hr class=\"main_hr\">
<span>''' + load_lang('skin') + '''</span>
<hr class=\"main_hr\">
<select name="skin">''' + div2 + '''</select>
<hr class=\"main_hr\">
<span>''' + load_lang('language') + '''</span>
<hr class=\"main_hr\">
<select name="lang">''' + div3 + '''</select>
<hr class=\"main_hr\">
<span>''' + load_lang('oauth_connection') + '''</span>
''' + oauth_content + '''
<hr class=\"main_hr\">
<button type="submit">''' + load_lang('save') + '''</button>
''' + http_warring + '''
</form>
''',
menu = [['user', load_lang('return')]]
))
else:
return redirect('/login')
| 49.148936
| 184
| 0.456926
|
21d79843c44930e6d6aa473a327bb0eb2c5a06bb
| 2,318
|
py
|
Python
|
examples/pixelcnn/main.py
|
joelgarde/flax
|
7d12d20f8272ce9c639711e92db89fdaf7f1a94a
|
[
"Apache-2.0"
] | 4
|
2020-05-28T11:25:47.000Z
|
2021-04-30T13:08:48.000Z
|
examples/pixelcnn/main.py
|
joelgarde/flax
|
7d12d20f8272ce9c639711e92db89fdaf7f1a94a
|
[
"Apache-2.0"
] | null | null | null |
examples/pixelcnn/main.py
|
joelgarde/flax
|
7d12d20f8272ce9c639711e92db89fdaf7f1a94a
|
[
"Apache-2.0"
] | 2
|
2021-04-30T13:08:58.000Z
|
2021-12-08T00:50:25.000Z
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for running the PixelCNN example.
This file is intentionally kept short. The majority for logic is in libraries
than can be easily tested and imported in Colab.
"""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import sample
import train
import jax
from ml_collections import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('workdir', None, 'Directory to store model data.')
config_flags.DEFINE_config_file(
'config',
None,
'File path to the training hyperparameter configuration.',
lock_config=True)
flags.DEFINE_bool('sample', False, 'Sample from a model in workdir.')
flags.mark_flags_as_required(['config', 'workdir'])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], 'GPU')
logging.info('JAX host: %d / %d', jax.host_id(), jax.host_count())
logging.info('JAX local devices: %r', jax.local_devices())
# Add a note so that we can tell which task is which JAX host.
# (Depending on the platform task 0 is not guaranteed to be host 0)
platform.work_unit().set_task_status(
f'host_id: {jax.host_id()}, host_count: {jax.host_count()}')
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, 'workdir')
if FLAGS.sample:
sample.save_images(
sample.generate_sample(FLAGS.config, FLAGS.workdir), 'sample.png')
else:
train.train_and_evaluate(FLAGS.config, FLAGS.workdir)
if __name__ == '__main__':
app.run(main)
| 32.647887
| 77
| 0.731665
|
7cbaeda80df6c36a7e60e860da660bbc8fefe504
| 6,272
|
py
|
Python
|
toontown/toon/DistributedNPCFisherman.py
|
DankMickey/Project-Altis-Educational-Source
|
0a74999fb52d4e690a41b984703119f63c372d20
|
[
"Apache-2.0"
] | 1
|
2021-06-25T02:56:32.000Z
|
2021-06-25T02:56:32.000Z
|
toontown/toon/DistributedNPCFisherman.py
|
AnythingTechPro/Project-Altis
|
7ead614abdb5072ca06323982de461f4e775d1b3
|
[
"Apache-2.0"
] | null | null | null |
toontown/toon/DistributedNPCFisherman.py
|
AnythingTechPro/Project-Altis
|
7ead614abdb5072ca06323982de461f4e775d1b3
|
[
"Apache-2.0"
] | 2
|
2021-02-25T06:02:05.000Z
|
2021-06-19T03:11:22.000Z
|
from direct.gui.DirectGui import *
from direct.interval.LerpInterval import LerpPosHprInterval
from direct.task.Task import Task
from pandac.PandaModules import *
import time
from toontown.toon.DistributedNPCToonBase import *
from toontown.toon import NPCToons
from toontown.chat.ChatGlobals import *
from toontown.fishing import FishSellGUI
from toontown.nametag.NametagGlobals import *
from toontown.toonbase import TTLocalizer
class DistributedNPCFisherman(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.isLocalToon = 0
self.av = None
self.button = None
self.popupInfo = None
self.fishGui = None
self.nextCollision = 0
self.npcType = 'Fisherman'
return
def disable(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupFishGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.popupInfo:
self.popupInfo.destroy()
self.popupInfo = None
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.av = None
if self.isLocalToon:
base.localAvatar.posCamera(0, 0)
DistributedNPCToonBase.disable(self)
return
def generate(self):
DistributedNPCToonBase.generate(self)
self.fishGuiDoneEvent = 'fishGuiDone'
def announceGenerate(self):
DistributedNPCToonBase.announceGenerate(self)
def initToonState(self):
self.setAnimState('neutral', 1.05, None, None)
npcOrigin = self.cr.playGame.hood.loader.geom.find('**/npc_fisherman_origin_%s;+s' % self.posIndex)
if not npcOrigin.isEmpty():
self.reparentTo(npcOrigin)
self.clearMat()
else:
self.notify.warning('announceGenerate: Could not find npc_fisherman_origin_' + str(self.posIndex))
return
def getCollSphereRadius(self):
return 1.0
def handleCollisionSphereEnter(self, collEntry):
self.currentTime = time.time()
if self.nextCollision > self.currentTime:
self.nextCollision = self.currentTime + 2
else:
base.cr.playGame.getPlace().fsm.request('purchase')
self.sendUpdate('avatarEnter', [])
self.nextCollision = self.currentTime + 2
def __handleUnexpectedExit(self):
self.notify.warning('unexpected exit')
self.av = None
return
def setupAvatars(self, av):
self.ignoreAvatars()
av.stopLookAround()
av.lerpLookAt(Point3(-0.5, 4, 0), time=0.5)
self.stopLookAround()
self.lerpLookAt(Point3(av.getPos(self)), time=0.5)
def resetFisherman(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupFishGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.show()
self.startLookAround()
self.detectAvatars()
self.clearMat()
if self.isLocalToon:
self.freeAvatar()
return Task.done
def setMovie(self, mode, npcId, avId, extraArgs, timestamp):
timeStamp = ClockDelta.globalClockDelta.localElapsedTime(timestamp)
self.remain = NPCToons.CLERK_COUNTDOWN_TIME - timeStamp
self.npcId = npcId
self.isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.SELL_MOVIE_CLEAR:
return
if mode == NPCToons.SELL_MOVIE_TIMEOUT:
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.isLocalToon:
self.ignore(self.fishGuiDoneEvent)
if self.popupInfo:
self.popupInfo.reparentTo(hidden)
if self.fishGui:
self.fishGui.destroy()
self.fishGui = None
self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_START:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
self.accept(self.av.uniqueName('disable'), self.__handleUnexpectedExit)
self.setupAvatars(self.av)
if self.isLocalToon:
camera.wrtReparentTo(render)
quat = Quat()
quat.setHpr((-150, -2, 0))
camera.posQuatInterval(1, Point3(-5, 9, base.localAvatar.getHeight() - 0.5), quat, other=self, blendType='easeOut').start()
if self.isLocalToon:
taskMgr.doMethodLater(1.0, self.popupFishGUI, self.uniqueName('popupFishGUI'))
elif mode == NPCToons.SELL_MOVIE_COMPLETE:
chatStr = TTLocalizer.STOREOWNER_THANKSFISH
self.setChatAbsolute(chatStr, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_TROPHY:
self.av = base.cr.doId2do.get(avId)
if self.av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
else:
numFish, totalNumFish = extraArgs
self.setChatAbsolute(TTLocalizer.STOREOWNER_TROPHY % (numFish, totalNumFish), CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_NOFISH:
chatStr = TTLocalizer.STOREOWNER_NOFISH
self.setChatAbsolute(chatStr, CFSpeech | CFTimeout)
self.resetFisherman()
elif mode == NPCToons.SELL_MOVIE_NO_MONEY:
self.notify.warning('SELL_MOVIE_NO_MONEY should not be called')
self.resetFisherman()
return
def __handleSaleDone(self, sell):
self.ignore(self.fishGuiDoneEvent)
self.sendUpdate('completeSale', [sell])
self.fishGui.destroy()
self.fishGui = None
return
def popupFishGUI(self, task):
self.setChatAbsolute('', CFSpeech)
self.acceptOnce(self.fishGuiDoneEvent, self.__handleSaleDone)
self.fishGui = FishSellGUI.FishSellGUI(self.fishGuiDoneEvent)
| 38.012121
| 139
| 0.626276
|
c2bc8f745d1537156d93a6d3fa986af332259e23
| 2,913
|
py
|
Python
|
samples/client/petstore/python/petstore_api/models/dog.py
|
GitHub30/openapi-generator
|
790f3d46aa5e6510d6f3701022d224f5f38fc73c
|
[
"Apache-2.0"
] | 5
|
2019-12-03T13:50:09.000Z
|
2021-11-14T12:59:48.000Z
|
samples/openapi3/client/petstore/python/petstore_api/models/dog.py
|
cedricziel/openapi-generator
|
a6a1264f252abd7c55e58a6653cdf308df88826e
|
[
"Apache-2.0"
] | 7
|
2021-03-01T21:26:03.000Z
|
2022-02-27T10:10:20.000Z
|
samples/openapi3/client/petstore/python/petstore_api/models/dog.py
|
cedricziel/openapi-generator
|
a6a1264f252abd7c55e58a6653cdf308df88826e
|
[
"Apache-2.0"
] | 4
|
2019-04-08T17:06:09.000Z
|
2020-06-09T18:16:08.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Dog(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'breed': 'str'
}
attribute_map = {
'breed': 'breed'
}
def __init__(self, breed=None): # noqa: E501
"""Dog - a model defined in OpenAPI""" # noqa: E501
self._breed = None
self.discriminator = None
if breed is not None:
self.breed = breed
@property
def breed(self):
"""Gets the breed of this Dog. # noqa: E501
:return: The breed of this Dog. # noqa: E501
:rtype: str
"""
return self._breed
@breed.setter
def breed(self, breed):
"""Sets the breed of this Dog.
:param breed: The breed of this Dog. # noqa: E501
:type: str
"""
self._breed = breed
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Dog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.778761
| 174
| 0.542739
|
252df59419c2216b1ff5ff2d4d2e1272e736d48b
| 5,021
|
py
|
Python
|
tests/unit/test_ansible_inventory.py
|
dav-pascual/mrack
|
f31b4ef1f1f847c3e95567ec012323be65a1e177
|
[
"Apache-2.0"
] | 1
|
2020-06-25T10:17:23.000Z
|
2020-06-25T10:17:23.000Z
|
tests/unit/test_ansible_inventory.py
|
dav-pascual/mrack
|
f31b4ef1f1f847c3e95567ec012323be65a1e177
|
[
"Apache-2.0"
] | 4
|
2020-07-15T15:57:51.000Z
|
2020-07-28T22:04:21.000Z
|
tests/unit/test_ansible_inventory.py
|
dav-pascual/mrack
|
f31b4ef1f1f847c3e95567ec012323be65a1e177
|
[
"Apache-2.0"
] | 3
|
2020-04-20T11:46:52.000Z
|
2020-07-01T11:55:51.000Z
|
import pytest
from mrack.errors import ConfigError
from mrack.outputs.ansible_inventory import AnsibleInventoryOutput, get_group
from .mock_data import (
common_inventory_layout,
create_metadata,
get_db_from_metadata,
provisioning_config,
)
def ensure_all_groups_present(metadata, inventory):
"""
Ensure that all groups defined in metadata hosts objects are present.
And contain the host
"""
for domain in metadata["domains"]:
for meta_host in domain["hosts"]:
for groupname in meta_host.get("groups"):
group = get_group(inventory, groupname)
assert group, "All defined groups in host must be in inventory"
assert "hosts" in group, "Group must contain hosts dict"
hosts = group["hosts"]
assert meta_host["name"] in hosts, "Group must contain the host"
def ensure_hosts_in_all_group(db, inventory):
"""Ensure that group 'all' contains all hosts from DB."""
all_group = inventory["all"]
hosts = all_group["hosts"]
db_hosts = db.hosts
assert len(db_hosts) > 0, "Make sure we do not work on empty data set"
required_attrs = [
"ansible_host",
"ansible_python_interpreter",
"ansible_user",
"meta_domain",
"meta_fqdn",
"meta_os",
"meta_ip",
"meta_provider_id",
"meta_role",
]
for name, dbhost in db_hosts.items():
assert dbhost.name in hosts, "All hosts must be present in inventory"
invhost = hosts[dbhost.name]
assert dbhost.ip_addr == invhost["meta_ip"], "IP from DB in inventory"
for attr in required_attrs:
assert attr in required_attrs, "All required attrs are in host definition"
@pytest.fixture()
def metadata():
return create_metadata(ipaservers=1, ipaclients=1, ads=1)
@pytest.fixture()
def db(metadata):
return get_db_from_metadata(metadata)
@pytest.fixture()
def db_meta_extra(metadata):
return get_db_from_metadata(
metadata,
host_extra={ # Sample data
"meta_compose_id": "ID.0-20220317.0",
"meta_compose_url": "http://dummy.com/compose/compose_id",
},
)
def empty_layout():
return {
"all": {},
}
class TestAnsibleInventory:
@pytest.mark.parametrize(
"layout",
[
# It is more tolerant with falsy values
common_inventory_layout(),
None,
{},
empty_layout(),
[],
False,
],
)
def test_layouts(self, layout, db, metadata):
config = provisioning_config(layout)
ans_inv = AnsibleInventoryOutput(config, db, metadata)
inventory = ans_inv.create_inventory()
assert "all" in inventory, "Inventory must have group 'all'"
all_group = inventory["all"]
assert "hosts" in all_group, "All group must have 'hosts' dict"
assert "children" in all_group, "All group must have 'children' dict"
ensure_all_groups_present(metadata, inventory)
ensure_hosts_in_all_group(db, inventory)
@pytest.mark.parametrize(
"layout",
[
# Non-dict truthy values are not valid
["test"],
"test",
True,
(True, False),
],
)
def test_invalid_layouts(self, layout, db, metadata):
config = provisioning_config(layout)
ans_inv = AnsibleInventoryOutput(config, db, metadata)
with pytest.raises(ConfigError) as excinfo:
ans_inv.create_inventory()
assert "dictionary" in str(excinfo.value)
def test_meta_extra(self, db_meta_extra, metadata):
config = provisioning_config()
ans_inv = AnsibleInventoryOutput(config, db_meta_extra, metadata)
inventory = ans_inv.create_inventory()
first_hostname = metadata["domains"][0]["hosts"][0]["name"]
first_host = inventory["all"]["hosts"][first_hostname]
assert (
"meta_compose_url" in first_host
), "Host must have 'meta_compose_url' field"
assert "meta_compose_id" in first_host, "Host must have 'meta_compose_id' field"
def test_not_meta_extra(self, db, metadata):
"""
Because some images (such as Windows images) don't have extra meta data fields
like meta_compose_id and meta_compose_url, inventory shouldn't output them
if not passed.
"""
config = provisioning_config()
ans_inv = AnsibleInventoryOutput(config, db, metadata)
inventory = ans_inv.create_inventory()
first_hostname = metadata["domains"][0]["hosts"][0]["name"]
first_host = inventory["all"]["hosts"][first_hostname]
assert (
"meta_compose_url" not in first_host
), "Host must NOT have 'meta_compose_url' field"
assert (
"meta_compose_id" not in first_host
), "Host must NOT have 'meta_compose_id' field"
| 30.065868
| 88
| 0.625971
|
496090dee1b1a37f0258399f919c53044fd29a93
| 2,953
|
py
|
Python
|
src/z_ainc.py
|
takumihonda/BAIU2018_5.3.6
|
7d0768406aa44a0aeb03eacd5ab0a1141f5158e4
|
[
"MIT"
] | null | null | null |
src/z_ainc.py
|
takumihonda/BAIU2018_5.3.6
|
7d0768406aa44a0aeb03eacd5ab0a1141f5158e4
|
[
"MIT"
] | null | null | null |
src/z_ainc.py
|
takumihonda/BAIU2018_5.3.6
|
7d0768406aa44a0aeb03eacd5ab0a1141f5158e4
|
[
"MIT"
] | null | null | null |
from netCDF4 import Dataset
import numpy as np
import os
import sys
from datetime import datetime, timedelta
from tools_BAIU import prep_proj_multi
quick = True
quick = False
def main( time=datetime(2018, 7, 2, 0), hpa=500 ):
ctime = time.strftime('%Y%m%d%H%M%S')
anal = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6/{0:}/fcst_sno_np00001/mean/p_history.pe000000.nc".format( ctime )
gues = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6/{0:}/fcst_sno_np00001/mean_gues/p_history.pe000000.nc".format( ctime )
with Dataset( anal, "r", format="NETCDF4") as nc:
prs = nc.variables["pressure"][:] # hPa
idx_v = np.abs( ( prs - hpa ) ).argmin()
za = nc.variables["Gprs"][0,idx_v,:,:]
with Dataset( gues, "r", format="NETCDF4") as nc:
prs = nc.variables["pressure"][:] # hPa
idx_v = np.abs( ( prs - hpa ) ).argmin()
zb = nc.variables["Gprs"][0,idx_v,:,:]
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots( 1, 1, figsize=( 8, 6.5 ) )
fig.subplots_adjust( left=0.05, bottom=0.05, right=0.95, top=0.95,
wspace=0.1, hspace=0.3)
lons = 105 + 6
lone = 165 - 6
late = 50
lats = 16
ax_l = [ ax1, ] # ax5, ax6 ]
m_l = prep_proj_multi('merc', ax_l, ll_lon=lons, ur_lon=lone,
ll_lat=lats, ur_lat=late, fs=6 )
x2d, y2d = m_l[0](lon, lat)
cmap = plt.cm.get_cmap("RdBu_r")
levs = np.arange( -20, 22, 2 )
var = za - zb
print( np.max( var ))
print( lon.shape )
SHADE = ax1.contourf( x2d, y2d, var, cmap=cmap, levels=levs,
extend='both' )
pos = ax1.get_position()
cb_width = 0.015
cb_height = pos.height*0.98
ax_cb = fig.add_axes( [pos.x1, pos.y0+0.01, cb_width, cb_height] )
cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'vertical', ticks=levs )
cb.ax.tick_params( labelsize=8 )
tit = "Z{0:0=3} analysis increment at {1:}".format( hpa, time.strftime('%HUTC %m/%d/%Y') )
ax1.text( 0.5, 1.01, tit,
fontsize=12, transform=ax1.transAxes,
ha='center',
va='bottom',
)
ofig = "1p_ainc_Z{0:0=3}_{1:}".format( hpa, time.strftime('%m%d%H') )
if not quick:
opath = "png/1p_z_ainc"
os.makedirs( opath, exist_ok=True )
ofig = os.path.join(opath, ofig + ".png")
plt.savefig(ofig,bbox_inches="tight", pad_inches = 0.1)
print(ofig)
plt.clf()
else:
print(ofig)
plt.show()
#######################
time = datetime( 2018, 7, 2, 12, 0 )
time = datetime( 2018, 7, 2, 6, 0 )
time = datetime( 2018, 7, 2, 18, 0 )
#time = datetime( 2018, 7, 3, 0, 0 )
stime = datetime( 2018, 7, 2, 0, 0 )
etime = datetime( 2018, 7, 3, 0, 0 )
#etime = stime
hpa = 850
hpa = 300
time = stime
while time <= etime:
main( time=time, hpa=hpa )
time += timedelta( hours=6 )
| 27.858491
| 143
| 0.585506
|
095ac6e5e6caaac1e0a5f43d3b45c7984e64a738
| 15,814
|
py
|
Python
|
tf2onnx/utils.py
|
brevettiai/tensorflow-onnx
|
d4be5af3c2ee6cd9a2053499715661bda7c392ae
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/utils.py
|
brevettiai/tensorflow-onnx
|
d4be5af3c2ee6cd9a2053499715661bda7c392ae
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/utils.py
|
brevettiai/tensorflow-onnx
|
d4be5af3c2ee6cd9a2053499715661bda7c392ae
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.utils - misc utilities for tf2onnx
"""
import os
import re
import shutil
import tempfile
import types
import zipfile
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import numpy as np
from google.protobuf import text_format
from onnx import helper, onnx_pb, defs, numpy_helper, ModelProto, __version__
from . import constants
logger = logging.getLogger(__file__)
#
# mapping dtypes from onnx to numpy
#
ONNX_TO_NUMPY_DTYPE = {
onnx_pb.TensorProto.FLOAT: np.float32,
onnx_pb.TensorProto.FLOAT16: np.float16,
onnx_pb.TensorProto.DOUBLE: np.float64,
onnx_pb.TensorProto.INT32: np.int32,
onnx_pb.TensorProto.INT16: np.int16,
onnx_pb.TensorProto.INT8: np.int8,
onnx_pb.TensorProto.UINT8: np.uint8,
onnx_pb.TensorProto.UINT16: np.uint16,
onnx_pb.TensorProto.INT64: np.int64,
onnx_pb.TensorProto.UINT64: np.uint64,
onnx_pb.TensorProto.BOOL: np.bool,
onnx_pb.TensorProto.COMPLEX64: np.complex64,
onnx_pb.TensorProto.COMPLEX128: np.complex128,
onnx_pb.TensorProto.STRING: np.object,
}
#
# onnx dtype names
#
ONNX_DTYPE_NAMES = {
onnx_pb.TensorProto.FLOAT: "float",
onnx_pb.TensorProto.FLOAT16: "float16",
onnx_pb.TensorProto.DOUBLE: "double",
onnx_pb.TensorProto.INT32: "int32",
onnx_pb.TensorProto.INT16: "int16",
onnx_pb.TensorProto.INT8: "int8",
onnx_pb.TensorProto.UINT8: "uint8",
onnx_pb.TensorProto.UINT16: "uint16",
onnx_pb.TensorProto.INT64: "int64",
onnx_pb.TensorProto.STRING: "string",
onnx_pb.TensorProto.BOOL: "bool",
onnx_pb.TensorProto.COMPLEX64: "complex64",
onnx_pb.TensorProto.COMPLEX128: "complex128"
}
class TensorValueInfo(object):
def __init__(self, tensor_id, g):
self.id = tensor_id
if self.id:
self.dtype = g.get_dtype(tensor_id)
self.shape = g.get_shape(tensor_id)
ONNX_UNKNOWN_DIMENSION = -1
ONNX_EMPTY_INPUT = ""
# index for internally generated names
INTERNAL_NAME = 1
# Fake onnx op type which is used for Graph input.
GRAPH_INPUT_TYPE = "NON_EXISTENT_ONNX_TYPE"
def make_name(name):
"""Make op name for inserted ops."""
global INTERNAL_NAME
INTERNAL_NAME += 1
return "{}__{}".format(name, INTERNAL_NAME)
def split_nodename_and_shape(name):
"""input name with shape into name and shape."""
# pattern for a node name
inputs = []
shapes = {}
# input takes in most cases the format name:0, where 0 is the output number
# in some cases placeholders don't have a rank which onnx can't handle so we let uses override the shape
# by appending the same, ie : [1,28,28,3]
name_pattern = r"(?:([\w\d/\-\._:]+)(\[[\-\d,]+\])?),?"
splits = re.split(name_pattern, name)
for i in range(1, len(splits), 3):
inputs.append(splits[i])
if splits[i + 1] is not None:
shape = [int(n) for n in splits[i + 1][1:-1].split(",")]
shape = [n if n >= 0 else None for n in shape]
shapes[splits[i]] = shape
if not shapes:
shapes = None
return inputs, shapes
def map_numpy_to_onnx_dtype(np_dtype):
for onnx_dtype, numpy_dtype in ONNX_TO_NUMPY_DTYPE.items():
if numpy_dtype == np_dtype:
return onnx_dtype
raise ValueError("unsupported numpy dtype '%s' for mapping to onnx" % np_dtype)
def map_onnx_to_numpy_type(onnx_type):
return ONNX_TO_NUMPY_DTYPE[onnx_type]
def node_name(name):
"""Get node name without io#."""
pos = name.find(":")
if pos >= 0:
return name[:pos]
return name
def make_onnx_shape(shape):
"""shape with -1 is not valid in onnx ... make it a name."""
if shape:
# don't do this if input is a scalar
return [make_name("unk") if i == -1 else i for i in shape]
return shape
def port_name(name, nr=0):
"""Map node output number to name."""
return name + ":" + str(nr)
class SeqType:
"""Wrap around TensorProto.* to signify a tensor sequence of a given type"""
def __init__(self, tensor_dtype):
self.dtype = tensor_dtype
def __eq__(self, other):
if isinstance(other, SeqType):
return self.dtype == other.dtype
return NotImplemented
def __repr__(self):
return "SeqType(%r)" % self.dtype
def make_onnx_inputs_outputs(name, elem_type, shape, **kwargs):
"""Wrapper for creating onnx graph inputs or outputs
name, # type: Text
elem_type, # type: TensorProto.DataType
shape, # type: Optional[Sequence[int]]
"""
if elem_type is None:
elem_type = onnx_pb.TensorProto.UNDEFINED
elif isinstance(elem_type, SeqType):
return helper.make_sequence_value_info(name, elem_type.dtype, make_onnx_shape(shape), **kwargs)
return helper.make_tensor_value_info(
name,
elem_type,
make_onnx_shape(shape),
**kwargs
)
def find_opset(opset):
"""Find opset."""
if opset is None or opset == 0:
opset = defs.onnx_opset_version()
if opset > constants.PREFERRED_OPSET:
# if we use a newer onnx opset than most runtimes support, default to the one most supported
opset = constants.PREFERRED_OPSET
return opset
def save_onnx_model(save_path_root, onnx_file_name, feed_dict, model_proto, include_test_data=False, as_text=False,
external_tensor_storage=None):
"""Save onnx model as file. Save a pbtxt file as well if as_text is True"""
save_path = save_path_root
if not os.path.exists(save_path):
os.makedirs(save_path)
if include_test_data:
data_path = os.path.join(save_path, "test_data_set_0")
if not os.path.exists(data_path):
os.makedirs(data_path)
i = 0
for data_key in feed_dict:
data = feed_dict[data_key]
t = numpy_helper.from_array(data)
t.name = data_key
data_full_path = os.path.join(data_path, "input_" + str(i) + ".pb")
save_protobuf(data_full_path, t)
i += 1
if external_tensor_storage is None:
target_path = os.path.join(save_path, onnx_file_name + ".onnx")
save_protobuf(target_path, model_proto)
else:
zip_path = os.path.join(save_path, onnx_file_name + ".zip")
save_onnx_zip(zip_path, model_proto, external_tensor_storage)
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(save_path)
target_path = os.path.join(save_path, "__MODEL_PROTO.onnx")
if as_text:
save_protobuf(target_path + ".pbtxt", model_proto, as_text=True)
return target_path
def save_onnx_zip(target_path, model_proto, external_tensor_storage):
with zipfile.ZipFile(target_path, 'w') as z:
z.writestr("__MODEL_PROTO.onnx", model_proto.SerializeToString())
for k, v in external_tensor_storage.name_to_tensor_data.items():
z.writestr(k, v)
def make_sure(bool_val, error_msg, *args):
if not bool_val:
raise ValueError("make_sure failure: " + error_msg % args)
def is_cpp_protobuf():
return isinstance(ModelProto().ParseFromString, types.BuiltinFunctionType)
def construct_graph_from_nodes(parent_g, nodes, outputs, shapes, dtypes):
"""Construct Graph from nodes and outputs with specified shapes and dtypes."""
# pylint: disable=protected-access
g = parent_g.create_new_graph_with_same_config()
g.parent_graph = parent_g
nodes = set(nodes)
all_outputs = set()
for op in nodes:
all_outputs |= set(op.output)
branches = {}
body_graphs = op.graph.contained_graphs.pop(op.name, None)
if body_graphs:
for attr_name, body_graph in body_graphs.items():
body_graph.parent_graph = g
branches[attr_name] = body_graph
_ = g.make_node(op.type, op.input, outputs=op.output, attr=op.attr, name=op.name,
skip_conversion=op.skip_conversion, infer_shape_dtype=False, branches=branches)
for i in all_outputs:
if i not in g._output_shapes:
g._output_shapes[i] = parent_g._output_shapes[i]
if i not in g._dtypes:
g._dtypes[i] = parent_g._dtypes[i]
# handle cell graph: insert identity node, since sometimes we need output same output_id
# as state_output and scan_out, but ONNX don't allow the same output_id to appear more
# than once as output node.
new_output_names = []
for output, shape, dtype in zip(outputs, shapes, dtypes):
node = g.make_node("Identity", inputs=[output], op_name_scope="sub_graph_ending_node",
shapes=[shape], dtypes=[dtype], infer_shape_dtype=False)
new_output_names.append(node.output[0])
g.outputs = new_output_names
return g
def tf_name_scope(name):
return '/'.join(name.split('/')[:-1])
def get_temp_directory():
return os.environ.get("TF2ONNX_TEMP_DIRECTORY", tempfile.mkdtemp())
def delete_directory(path):
if os.path.exists(path):
shutil.rmtree(path)
def save_protobuf(path, message, as_text=False):
dir_name = os.path.dirname(path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def model_proto_from_file(model_path):
model_proto = ModelProto()
with open(model_path, "rb") as f:
model_proto.ParseFromString(f.read())
return model_proto
def model_proto_from_zip(zip_path, external_tensor_storage):
model_proto = ModelProto()
with zipfile.ZipFile(zip_path, 'r') as z:
for n in z.namelist():
f = z.open(n)
if n.endswith(".onnx"):
model_proto.ParseFromString(f.read())
else:
external_tensor_storage.name_to_tensor_data[n] = f.read()
return model_proto
def is_list_or_tuple(obj):
return isinstance(obj, (list, tuple))
def is_unknown_dimension(dim):
""" Return true if dim is not a positive integer value. """
if dim is None or not isinstance(dim, int):
return True
return dim <= 0
def merge_shapes(shape1, shape2):
"""
Merge 2 shapes, return merged shape, choose more specific dimension value from either side.
Raise exception for mismatch.
"""
if shape1 is None:
return shape2
if shape2 is None:
return shape1
make_sure(is_list_or_tuple(shape1), "invalid type for shape1")
make_sure(is_list_or_tuple(shape2), "invalid type for shape2")
make_sure(len(shape1) == len(shape2), "shapes rank mismatch: shape1=%s, shape2=%s", shape1, shape2)
merged = []
for d1, d2 in zip(shape1, shape2):
d = d1
if is_unknown_dimension(d1):
d = d2
elif not is_unknown_dimension(d2):
make_sure(d1 == d2, "shapes dimension mismatch: shape1=%s, shape2=%s", shape1, shape2)
merged.append(d)
return merged
def are_shapes_compatible(src, dest):
"""
Returns True iff src is compatible with dest.
None is compatible with all shapes, different ranks are not considered as compatible
"""
try:
merge_shapes(src, dest)
return True
except: # pylint: disable=bare-except
return False
def are_shapes_equal(src, dest):
""" Check whether 2 shapes are equal. """
if src is None:
return dest is None
if dest is None:
return src is None
make_sure(is_list_or_tuple(src), "invalid type for src")
make_sure(is_list_or_tuple(dest), "invalid type for dest")
if len(src) != len(dest):
return False
return all(i == j for i, j in zip(src, dest))
def create_vague_shape_like(shape):
make_sure(len(shape) >= 0, "rank should be >= 0")
return [-1 for i in enumerate(shape)]
def get_onnx_version():
return __version__
def make_opsetid(domain, version):
make_sure(isinstance(version, int), "version must be an integer")
return helper.make_opsetid(domain, version)
def is_onnx_domain(domain):
if domain is None or domain == "":
return True
return False
def parse_bool(val):
if val is None:
return False
return val.lower() in ("yes", "true", "t", "y", "1")
_is_debug_mode = parse_bool(os.environ.get(constants.ENV_TF2ONNX_DEBUG_MODE))
def is_debug_mode():
return _is_debug_mode
def set_debug_mode(enabled):
global _is_debug_mode
_is_debug_mode = enabled
def get_max_value(np_dtype):
return np.iinfo(np_dtype).max
def get_min_value(np_dtype):
return np.iinfo(np_dtype).min
def get_url(url, path, max_retries=5):
""" Download url and save to path. """
retries = Retry(total=max_retries, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retries)
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
response = session.get(url, allow_redirects=True)
if response.status_code not in [200]:
response.raise_for_status()
dir_name = os.path.dirname(path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
with open(path, "wb") as f:
f.write(response.content)
def have_same_inference_value(g, output_1, output_2):
"""
If two outputs have the same value in inference.
Check whether they come from the same subgraph and the same subgraphs
contain nodes with the same attributes and share the same ancestors.
"""
def is_same(node_1, node_2):
# go further util two instance isn't the same
if node_1 == node_2:
return True
# check body graph
if node_1.get_body_graphs() or node_2.get_body_graphs():
logger.warning("Comparing two nodes containing body graph isn't supported.")
return False
# check domain
if node_1.domain != node_2.domain:
return False
# check type
if node_1.type != node_2.type:
return False
# check onnx attributes
if node_1.get_onnx_attrs().keys() != node_2.get_onnx_attrs().keys():
return False
for name in node_1.get_onnx_attrs().keys(): # pylint: disable=consider-iterating-dictionary
if node_1.get_attr_value(name) != node_2.get_attr_value(name):
return False
return True
if output_1 == output_2:
return True
node_1 = g.get_node_by_output(output_1)
node_2 = g.get_node_by_output(output_2)
# compare their domain, attr, etc. see __eq__ in Node class
if not is_same(node_1, node_2):
return False
for inp_1, inp_2 in zip(node_1.input, node_2.input):
if not have_same_inference_value(g, inp_1, inp_2):
return False
return True
def is_tf_reverse_op(op):
return op.type in ("ReverseV2", "ReverseSequence")
def is_tf_concat_op(op):
return op.type in ("Concat", "ConcatV2", "ConcatV3")
def is_tf_tensor_array_gather_op(op):
return op.type in ("TensorArrayGatherV2", "TensorArrayGatherV3")
def is_tf_tensor_array_write_op(op):
return op.type in ("TensorArrayWriteV2", "TensorArrayWriteV3")
def is_tf_tensor_array_read_op(op):
return op.type in ("TensorArrayReadV2", "TensorArrayReadV3")
def is_tf_tensor_array_op(op):
return op.type in ("TensorArrayV2", "TensorArrayV3")
def is_tf_loopcond_op(op):
return op.type == "LoopCond"
def is_tf_select_op(op):
return op.type in ("Select", "SelectV2")
def is_tf_slice_op(op):
return op.type == "Slice"
def is_tf_const_op(op):
return op.type in ["Const", "ConstV2"]
| 30.064639
| 115
| 0.665423
|
966e66fcb677b846e5397370ec8ba028e58fdde8
| 4,081
|
py
|
Python
|
strategies/custsignalmod.py
|
jthhk/Binance-volatility-trading-bot
|
d815716d2161c5d07cea0506049d73450bd5ef5b
|
[
"MIT"
] | 4
|
2021-11-10T11:47:39.000Z
|
2022-02-03T07:07:37.000Z
|
strategies/custsignalmod.py
|
jthhk/Binance-volatility-trading-bot
|
d815716d2161c5d07cea0506049d73450bd5ef5b
|
[
"MIT"
] | null | null | null |
strategies/custsignalmod.py
|
jthhk/Binance-volatility-trading-bot
|
d815716d2161c5d07cea0506049d73450bd5ef5b
|
[
"MIT"
] | 1
|
2022-01-14T13:03:56.000Z
|
2022-01-14T13:03:56.000Z
|
# Available indicators here: https://python-tradingview-ta.readthedocs.io/en/latest/usage.html#retrieving-the-analysis
from tradingview_ta import TA_Handler, Interval, Exchange
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used for directory handling
import glob
# used for dates
from datetime import date, datetime, timedelta
import time
import threading
# my helper utils
from helpers.os_utils import(rchop)
from helpers.parameters import parse_args, load_config
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
parsed_config = load_config(config_file)
USE_MOST_VOLUME_COINS = parsed_config['trading_options']['USE_MOST_VOLUME_COINS']
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
OSC_INDICATORS = ['MACD', 'Stoch.RSI', 'RSI'] # Indicators to use in Oscillator analysis
OSC_THRESHOLD = 3 # Must be less or equal to number of items in OSC_INDICATORS
MA_INDICATORS = ['EMA20', 'EMA100', 'EMA200'] # Indicators to use in Moving averages analysis
MA_THRESHOLD = 3 # Must be less or equal to number of items in MA_INDICATORS
INTERVAL = Interval.INTERVAL_1_MINUTE #Timeframe for analysis
EXCHANGE = 'BINANCE'
SCREENER = 'CRYPTO'
if USE_MOST_VOLUME_COINS == True:
#if ABOVE_COINS_VOLUME == True:
TICKERS = "volatile_volume_" + str(date.today()) + ".txt"
else:
TICKERS = 'tickers.txt' #'signalsample.txt'
#TICKERS = 'signalsample.txt'
TIME_TO_WAIT = 1 # Minutes to wait between analysis
FULL_LOG = False # List analysis result to console
def analyze(pairs):
signal_coins = {}
analysis = {}
handler = {}
if os.path.exists('signals/custsignalmod.exs'):
os.remove('signals/custsignalmod.exs')
for pair in pairs:
handler[pair] = TA_Handler(
symbol=pair,
exchange=EXCHANGE,
screener=SCREENER,
interval=INTERVAL,
timeout= 10)
for pair in pairs:
try:
analysis = handler[pair].get_analysis()
except Exception as e:
print("Signalsample:")
print("Exception:")
print(e)
print (f'Coin: {pair}')
print (f'handler: {handler[pair]}')
oscCheck=0
maCheck=0
for indicator in OSC_INDICATORS:
if analysis.oscillators ['COMPUTE'][indicator] == 'BUY': oscCheck +=1
for indicator in MA_INDICATORS:
if analysis.moving_averages ['COMPUTE'][indicator] == 'BUY': maCheck +=1
if FULL_LOG:
print(f'Custsignalmod:{pair} Oscillators:{oscCheck}/{len(OSC_INDICATORS)} Moving averages:{maCheck}/{len(MA_INDICATORS)}')
if oscCheck >= OSC_THRESHOLD and maCheck >= MA_THRESHOLD:
signal_coins[pair] = pair
print(f'Custsignalmod: Signal detected on {pair} at {oscCheck}/{len(OSC_INDICATORS)} oscillators and {maCheck}/{len(MA_INDICATORS)} moving averages.')
with open('signals/custsignalmod.exs','a+') as f:
f.write(pair + '\n')
return signal_coins
def do_work():
try:
signal_coins = {}
pairs = {}
pairs=[line.strip() for line in open(TICKERS)]
for line in open(TICKERS):
pairs=[line.strip() + PAIR_WITH for line in open(TICKERS)]
while True:
if not threading.main_thread().is_alive(): exit()
print(f'Custsignalmod: Analyzing {len(pairs)} coins')
signal_coins = analyze(pairs)
print(f'Custsignalmod: {len(signal_coins)} coins above {OSC_THRESHOLD}/{len(OSC_INDICATORS)} oscillators and {MA_THRESHOLD}/{len(MA_INDICATORS)} moving averages Waiting {TIME_TO_WAIT} minutes for next analysis.')
time.sleep((TIME_TO_WAIT*60))
except Exception as e:
print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')
print("Error on line {}".format(sys.exc_info()[-1].tb_lineno))
pass
except KeyboardInterrupt as ki:
pass
| 36.115044
| 224
| 0.657927
|
4df3bd6ac92ce6f5531252987c9efa62d1d20269
| 779
|
py
|
Python
|
checkov/common/bridgecrew/integration_features/integration_feature_registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/common/bridgecrew/integration_features/integration_feature_registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/common/bridgecrew/integration_features/integration_feature_registry.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
class IntegrationFeatureRegistry:
def __init__(self):
self.features = []
def register(self, integration_feature):
self.features.append(integration_feature)
self.features.sort(key=lambda f: f.order)
def run_pre_scan(self):
for integration in self.features:
if integration.is_valid():
integration.pre_scan()
def run_pre_runner(self):
for integration in self.features:
if integration.is_valid():
integration.pre_runner()
def run_post_runner(self, scan_reports):
for integration in self.features:
if integration.is_valid():
integration.post_runner(scan_reports)
integration_feature_registry = IntegrationFeatureRegistry()
| 26.862069
| 59
| 0.654685
|
1e9a0c30ea225459810cadbdc2ebb0220071d3b7
| 7,293
|
py
|
Python
|
loc/helper/deco.py
|
guluc3m/loc-server
|
b25b6b2deec5d27c840d60f33e5aa33bd56ba08a
|
[
"MIT"
] | null | null | null |
loc/helper/deco.py
|
guluc3m/loc-server
|
b25b6b2deec5d27c840d60f33e5aa33bd56ba08a
|
[
"MIT"
] | 7
|
2017-12-10T17:12:04.000Z
|
2017-12-29T12:23:18.000Z
|
loc/helper/deco.py
|
guluc3m/loc-server
|
b25b6b2deec5d27c840d60f33e5aa33bd56ba08a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# League of Code server implementation
# https://github.com/guluc3m/loc-server
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Grupo de Usuarios de Linux UC3M <http://gul.es>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Decorator functions."""
from flask import current_app, jsonify, request
from functools import wraps
from werkzeug.exceptions import BadRequest
from loc.helper import util
from loc.helper import messages as m
from loc.models import Role, User, UserRole
import jwt
def login_required(f):
"""Require a JWT token to access the decorated view.
In case the token received is not valid, the request is aborted with a
401 HTTP status code.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
try:
jwt_token = request.get_json().get('token')
except BadRequest as e:
# TODO log except
return util.api_error(m.JWT_MISSING), 500
if not jwt_token:
return util.api_fail(token=m.JWT_MISSING), 401
# Decode
try:
decoded = jwt.decode(jwt_token, current_app.config['SECRET_KEY'])
except jwt.exceptions.DecodeError:
# TODO log
return util.api_error(m.JWT_ERROR), 500
except jwt.ExpiredSignatureError:
return util.api_error(m.JWT_EXPIRED), 401
# Get user
user = User.query.filter_by(
id=decoded.get('sub', -1),
is_deleted=False
).first()
if not user:
return util.api_error(m.USER_NOT_FOUND), 401
# Token was invalidated?
if decoded.get('counter', -1) != user._jwt_counter:
return util.api_error(m.JWT_EXPIRED), 401
return f(*args, **kwargs)
return decorated_function
def role_required(role):
"""Require a JWT token and a specific user role to access the decorated view.
In case the token received is not valid, or hte user does not have the
required role, the request is aborted with a 401 HTTP status code.
Args:
role (str): Name of the required role
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
jwt_token = request.get_json().get('token')
except BadRequest as e:
# TODO log except
return util.api_error(m.JWT_MISSING), 500
if not jwt_token:
return util.api_fail(token=m.JWT_MISSING), 401
# Decode
try:
decoded = jwt.decode(jwt_token, current_app.config['SECRET_KEY'])
except jwt.exceptions.DecodeError:
# TODO log
return util.api_error(m.JWT_ERROR), 500
except jwt.ExpiredSignatureError:
return util.api_error(m.JWT_EXPIRED), 401
# Get user
user = User.query.filter_by(
id=decoded.get('sub', -1),
is_deleted=False
).first()
if not user:
return util.api_error(m.USER_NOT_FOUND), 401
# Token was invalidated?
if decoded.get('counter', -1) != user._jwt_counter:
return util.api_error(m.JWT_EXPIRED), 401
# Check role
user_role = (
UserRole
.query
.join(Role, UserRole.role_id==Role.id)
.filter(UserRole.user_id==user.id)
.filter(Role.name==role)
).first()
if not user_role:
return util.api_error(m.ROLE_MISSING), 401
return f(*args, **kwargs)
return decorated_function
return decorator
def check_required(params):
"""Check that the specified parameters are provided and valid.
Args:
params (list[tuple]): List of tuples containing the parameter name
and the data type that the parameter should be.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
json = request.get_json()
# Check if JSON was provided
if not json:
response = {}
for p in params:
response[p[0]] = m.FIELD_MISSING
return util.api_fail(**response), 400
# Check for missing fields and wrong data types
errors = {}
for p in params:
name = p[0]
p_type = p[1]
# Missing field
if name not in json.keys():
errors[name] = m.FIELD_MISSING
continue
# Wrong data type
if not isinstance(json[name], p_type):
errors[name] = m.INVALID_TYPE
# Return errors if any
if errors:
return util.api_fail(**errors), 400
return f(*args, **kwargs)
return decorated_function
return decorator
def check_optional(params):
"""Check that the specified parameters are valid.
This is for optional parameters that may not appear in the request, so
they will only be checked if present.
Args:
params (list[tuple]): List of tuples containing the parameter name
and the data type that the parameter should be.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
json = request.get_json()
# Check if JSON was provided
if not json:
# Nothing to do
return f(*args, **kwargs)
# Check for missing fields and wrong data types
errors = {}
for p in params:
name = p[0]
p_type = p[1]
# Missing field, skip it
if name not in json.keys():
continue
# Wrong data type
if not isinstance(json[name], p_type):
errors[name] = m.INVALID_TYPE
# Return errors if any
if errors:
return util.api_fail(**errors), 400
return f(*args, **kwargs)
return decorated_function
return decorator
| 30.3875
| 81
| 0.586179
|
b7c3273036ce2ce55748461c81618e2faca4799f
| 905
|
py
|
Python
|
examples/dump_ast.py
|
eliben/pycparser
|
615317a473f09b9b9d444313ac15e52e7763c7c1
|
[
"BSD-3-Clause"
] | 2,407
|
2015-01-03T03:05:38.000Z
|
2022-03-31T13:31:25.000Z
|
examples/dump_ast.py
|
eliben/pycparser
|
615317a473f09b9b9d444313ac15e52e7763c7c1
|
[
"BSD-3-Clause"
] | 355
|
2015-01-11T20:35:32.000Z
|
2022-03-31T03:07:53.000Z
|
examples/dump_ast.py
|
eliben/pycparser
|
615317a473f09b9b9d444313ac15e52e7763c7c1
|
[
"BSD-3-Clause"
] | 618
|
2015-01-09T14:10:57.000Z
|
2022-03-22T05:16:44.000Z
|
#-----------------------------------------------------------------
# pycparser: dump_ast.py
#
# Basic example of parsing a file and dumping its parsed AST.
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
from __future__ import print_function
import argparse
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
sys.path.extend(['.', '..'])
from pycparser import parse_file
if __name__ == "__main__":
argparser = argparse.ArgumentParser('Dump AST')
argparser.add_argument('filename', help='name of file to parse')
argparser.add_argument('--coord', help='show coordinates in the dump',
action='store_true')
args = argparser.parse_args()
ast = parse_file(args.filename, use_cpp=False)
ast.show(showcoord=args.coord)
| 32.321429
| 74
| 0.603315
|
9a101dd99242a871a2b0b965bd6888bb306a6e86
| 29,267
|
py
|
Python
|
quantumflow/stdgates.py
|
stjordanis/quantumflow
|
bf965f0ca70cd69b387f9ca8407ab38da955e925
|
[
"Apache-2.0"
] | 99
|
2018-12-03T20:41:39.000Z
|
2022-02-21T13:56:08.000Z
|
quantumflow/stdgates.py
|
stjordanis/quantumflow
|
bf965f0ca70cd69b387f9ca8407ab38da955e925
|
[
"Apache-2.0"
] | null | null | null |
quantumflow/stdgates.py
|
stjordanis/quantumflow
|
bf965f0ca70cd69b387f9ca8407ab38da955e925
|
[
"Apache-2.0"
] | 24
|
2018-12-03T20:41:41.000Z
|
2022-01-03T01:11:45.000Z
|
# Copyright 2016-2018, Rigetti Computing
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
""" QuantumFlow Standard Gates """
# Kudos: Many gate definitions adapted from Nick Rubin's reference-qvm
from math import sqrt, pi
import copy
import numpy as np
from . import backend as bk
from .qubits import Qubit
from .ops import Gate
from .gates import I
__all__ = ['I', 'X', 'Y', 'Z', 'H', 'S', 'T', 'PHASE', 'RX', 'RY', 'RZ', 'CZ',
'CNOT', 'SWAP', 'ISWAP', 'CPHASE00', 'CPHASE01', 'CPHASE10',
'CPHASE', 'PSWAP', 'CCNOT', 'CSWAP',
'RN', 'TX', 'TY', 'TZ', 'TH', 'ZYZ',
'CAN', 'XX', 'YY', 'ZZ', 'PISWAP', 'EXCH',
'CANONICAL',
'S_H', 'T_H', 'STDGATES']
# Standard 1 qubit gates
class X(Gate):
r"""
A 1-qubit Pauli-X gate.
.. math::
X() &\equiv \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
qubits = [q0]
super().__init__([[0, 1], [1, 0]], qubits)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
def __pow__(self, t: float) -> Gate:
return TX(t, *self.qubits)
class Y(Gate):
r"""
A 1-qubit Pauli-Y gate.
.. math::
Y() &\equiv \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}
mnemonic: "Minus eye high".
"""
def __init__(self, q0: Qubit = 0) -> None:
qubits = [q0]
super().__init__(np.asarray([[0, -1.0j], [1.0j, 0]]), qubits)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
def __pow__(self, t: float) -> Gate:
return TY(t, *self.qubits)
class Z(Gate):
r"""
A 1-qubit Pauli-Z gate.
.. math::
Z() &\equiv \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
qubits = [q0]
super().__init__([[1, 0], [0, -1.0]], qubits)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
def __pow__(self, t: float) -> Gate:
return TZ(t, *self.qubits)
class H(Gate):
r"""
A 1-qubit Hadamard gate.
.. math::
H() \equiv \frac{1}{\sqrt{2}}
\begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
unitary = np.asarray([[1, 1], [1, -1]]) / sqrt(2)
qubits = [q0]
super().__init__(unitary, qubits)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
def __pow__(self, t: float) -> Gate:
return TH(t, *self.qubits)
class S(Gate):
r"""
A 1-qubit phase S gate, equivalent to ``PHASE(pi/2)``. The square root
of the Z gate (up to global phase). Also commonly denoted as the P gate.
.. math::
S() \equiv \begin{pmatrix} 1 & 0 \\ 0 & i \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
qubits = [q0]
super().__init__(np.asarray([[1.0, 0.0], [0.0, 1.0j]]), qubits)
@property
def H(self) -> Gate:
return S_H(*self.qubits)
def __pow__(self, t: float) -> Gate:
return PHASE(pi / 2 * t, *self.qubits)
class T(Gate):
r"""
A 1-qubit T (pi/8) gate, equivalent to ``PHASE(pi/4)``. The forth root
of the Z gate (up to global phase).
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & e^{i \pi / 4} \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
unitary = [[1.0, 0.0], [0.0, bk.ccast(bk.cis(pi / 4.0))]]
qubits = [q0]
super().__init__(unitary, qubits)
@property
def H(self) -> Gate:
return T_H(*self.qubits)
def __pow__(self, t: float) -> Gate:
return PHASE(pi / 4 * t, *self.qubits)
class PHASE(Gate):
r"""
A 1-qubit parametric phase shift gate
.. math::
\text{PHASE}(\theta) \equiv \begin{pmatrix}
1 & 0 \\ 0 & e^{i \theta} \end{pmatrix}
"""
def __init__(self, theta: float, q0: Qubit = 0) -> None:
ctheta = bk.ccast(theta)
unitary = [[1.0, 0.0], [0.0, bk.cis(ctheta)]]
qubits = [q0]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = self.params['theta']
theta = 2. * pi - theta % (2. * pi)
return PHASE(theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta'] * t
return PHASE(theta, *self.qubits)
class RX(Gate):
r"""A 1-qubit Pauli-X parametric rotation gate.
.. math::
R_X(\theta) = \begin{pmatrix}
\cos(\frac{\theta}{2}) & -i \sin(\theta/2) \\
-i \sin(\theta/2) & \cos(\theta/2)
\end{pmatrix}
Args:
theta: Angle of rotation in Bloch sphere
"""
def __init__(self, theta: float, q0: Qubit = 0) -> None:
ctheta = bk.ccast(theta)
unitary = [[bk.cos(ctheta / 2), -1.0j * bk.sin(ctheta / 2)],
[-1.0j * bk.sin(ctheta / 2), bk.cos(ctheta / 2)]]
qubits = [q0]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = self.params['theta']
return RX(-theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta']
return RX(theta * t, *self.qubits)
class RY(Gate):
r"""A 1-qubit Pauli-Y parametric rotation gate
.. math::
R_Y(\theta) \equiv \begin{pmatrix}
\cos(\theta / 2) & -\sin(\theta / 2)
\\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix}
Args:
theta: Angle of rotation in Bloch sphere
"""
def __init__(self, theta: float, q0: Qubit = 0) -> None:
ctheta = bk.ccast(theta)
unitary = [[bk.cos(ctheta / 2.0), -bk.sin(ctheta / 2.0)],
[bk.sin(ctheta / 2.0), bk.cos(ctheta / 2.0)]]
qubits = [q0]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = self.params['theta']
return RY(-theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta']
return RY(theta * t, *self.qubits)
class RZ(Gate):
r"""A 1-qubit Pauli-X parametric rotation gate
.. math::
R_Z(\theta)\equiv \begin{pmatrix}
\cos(\theta/2) - i \sin(\theta/2) & 0 \\
0 & \cos(\theta/2) + i \sin(\theta/2)
\end{pmatrix}
Args:
theta: Angle of rotation in Bloch sphere
"""
def __init__(self, theta: float, q0: Qubit = 0) -> None:
ctheta = bk.ccast(theta)
unitary = [[bk.exp(-ctheta * 0.5j), 0],
[0, bk.exp(ctheta * 0.5j)]]
qubits = [q0]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = self.params['theta']
return RZ(-theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta']
return RZ(theta * t, *self.qubits)
# Standard 2 qubit gates
class CZ(Gate):
r"""A controlled-Z gate
Equivalent to ``controlled_gate(Z())`` and locally equivalent to
``CAN(1/2,0,0)``
.. math::
\text{CZ}() = \begin{pmatrix} 1&0&0&0 \\ 0&1&0&0 \\
0&0&1&0 \\ 0&0&0&-1 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0, q1: Qubit = 1) -> None:
unitary = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]]
params = None
qubits = [q0, q1]
super().__init__(unitary, qubits, params)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
class CNOT(Gate):
r"""A controlled-not gate
Equivalent to ``controlled_gate(X())``, and
locally equivalent to ``CAN(1/2, 0, 0)``
.. math::
\text{CNOT}() \equiv \begin{pmatrix} 1&0&0&0 \\ 0&1&0&0 \\
0&0&0&1 \\ 0&0&1&0 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0, q1: Qubit = 1) -> None:
unitary = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]
params = None
qubits = [q0, q1]
super().__init__(unitary, qubits, params)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
class SWAP(Gate):
r"""A 2-qubit swap gate
Locally equivalent to ``CAN(1/2,1/2,1/2)``.
.. math::
\text{SWAP}() \equiv
\begin{pmatrix}
1&0&0&0 \\ 0&0&1&0 \\ 0&1&0&0 \\ 0&0&0&1
\end{pmatrix}
"""
def __init__(self, q0: Qubit = 0, q1: Qubit = 1) -> None:
unitary = [[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]]
params = None
qubits = [q0, q1]
super().__init__(unitary, qubits, params)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
class ISWAP(Gate):
r"""A 2-qubit iswap gate
Locally equivalent to ``CAN(1/2,1/2,0)``.
.. math::
\text{ISWAP}() \equiv
\begin{pmatrix} 1&0&0&0 \\ 0&0&i&0 \\ 0&i&0&0 \\ 0&0&0&1 \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0, q1: Qubit = 1) -> None:
# Note: array wrapper is to work around an eager mode
# (not not regular tensorflow) issue.
# "Can't convert Python sequence with mixed types to Tensor."
unitary = np.array([[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]])
params = None
qubits = [q0, q1]
super().__init__(unitary, qubits, params)
class CPHASE00(Gate):
r"""A 2-qubit 00 phase-shift gate
.. math::
\text{CPHASE00}(\theta) \equiv \text{diag}(e^{i \theta}, 1, 1, 1)
"""
def __init__(self, theta: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[bk.exp(1j * ctheta), 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]]
qubits = [q0, q1]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = - self.params['theta']
return CPHASE00(theta, *self.qubits)
class CPHASE01(Gate):
r"""A 2-qubit 01 phase-shift gate
.. math::
\text{CPHASE01}(\theta) \equiv \text{diag}(1, e^{i \theta}, 1, 1)
"""
def __init__(self, theta: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[1.0, 0, 0, 0],
[0, bk.exp(1j * ctheta), 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]]
qubits = [q0, q1]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = - self.params['theta']
return CPHASE01(theta, *self.qubits)
class CPHASE10(Gate):
r"""A 2-qubit 10 phase-shift gate
.. math::
\text{CPHASE10}(\theta) \equiv \text{diag}(1, 1, e^{i \theta}, 1)
"""
def __init__(self, theta: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, bk.exp(1j * ctheta), 0],
[0, 0, 0, 1.0]]
qubits = [q0, q1]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = - self.params['theta']
return CPHASE10(theta, *self.qubits)
class CPHASE(Gate):
r"""A 2-qubit 11 phase-shift gate
.. math::
\text{CPHASE}(\theta) \equiv \text{diag}(1, 1, 1, e^{i \theta})
"""
def __init__(self, theta: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, bk.exp(1j * ctheta)]]
qubits = [q0, q1]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = - self.params['theta']
return CPHASE(theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta'] * t
return CPHASE(theta, *self.qubits)
class PSWAP(Gate):
r"""A 2-qubit parametric-swap gate, as defined by Quil.
Interpolates between SWAP (theta=0) and iSWAP (theta=pi/2).
Locally equivalent to ``CAN(1/2, 1/2, 1/2 - theta/pi)``
.. math::
\text{PSWAP}(\theta) \equiv \begin{pmatrix} 1&0&0&0 \\
0&0&e^{i\theta}&0 \\ 0&e^{i\theta}&0&0 \\ 0&0&0&1 \end{pmatrix}
"""
def __init__(self, theta: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[[[1, 0], [0, 0]], [[0, 0], [bk.exp(ctheta * 1.0j), 0]]],
[[[0, bk.exp(ctheta * 1.0j)], [0, 0]], [[0, 0], [0, 1]]]]
qubits = [q0, q1]
super().__init__(unitary, qubits, dict(theta=theta))
@property
def H(self) -> Gate:
theta = self.params['theta']
theta = 2. * pi - theta % (2. * pi)
return PSWAP(theta, *self.qubits)
class PISWAP(Gate):
r"""A parametric iswap gate, generated from XY interaction.
Locally equivalent to CAN(t,t,0), where t = theta / (2 * pi)
.. math::
\text{PISWAP}(\theta) \equiv
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \cos(2\theta) & i \sin(2\theta) & 0 \\
0 & i \sin(2\theta) & \cos(2\theta) & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self, theta: float, q0: Qubit = 0, q1: Qubit = 1) -> None:
ctheta = bk.ccast(theta)
unitary = [[[[1, 0], [0, 0]],
[[0, bk.cos(2*ctheta)], [bk.sin(2*ctheta) * 1j, 0]]],
[[[0, bk.sin(2*ctheta) * 1j], [bk.cos(2*ctheta), 0]],
[[0, 0], [0, 1]]]]
params = dict(theta=theta)
super().__init__(unitary, [q0, q1], params)
@property
def H(self) -> Gate:
theta = - self.params['theta']
return PISWAP(theta, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta = self.params['theta'] * t
return PISWAP(theta, *self.qubits)
# Standard 3 qubit gates
class CCNOT(Gate):
r"""
A 3-qubit Toffoli gate. A controlled, controlled-not.
Equivalent to ``controlled_gate(cnot())``
.. math::
\text{CCNOT}() \equiv \begin{pmatrix}
1& 0& 0& 0& 0& 0& 0& 0 \\
0& 1& 0& 0& 0& 0& 0& 0 \\
0& 0& 1& 0& 0& 0& 0& 0 \\
0& 0& 0& 1& 0& 0& 0& 0 \\
0& 0& 0& 0& 1& 0& 0& 0 \\
0& 0& 0& 0& 0& 1& 0& 0 \\
0& 0& 0& 0& 0& 0& 0& 1 \\
0& 0& 0& 0& 0& 0& 1& 0
\end{pmatrix}
"""
def __init__(self,
q0: Qubit = 0,
q1: Qubit = 1,
q2: Qubit = 2) -> None:
unitary = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]]
params = None
qubits = [q0, q1, q2]
super().__init__(unitary, qubits, params)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
class CSWAP(Gate):
r"""
A 3-qubit Fredkin gate. A controlled swap.
Equivalent to ``controlled_gate(swap())``
.. math::
\text{CSWAP}() \equiv \begin{pmatrix}
1& 0& 0& 0& 0& 0& 0& 0 \\
0& 1& 0& 0& 0& 0& 0& 0 \\
0& 0& 1& 0& 0& 0& 0& 0 \\
0& 0& 0& 1& 0& 0& 0& 0 \\
0& 0& 0& 0& 1& 0& 0& 0 \\
0& 0& 0& 0& 0& 0& 1& 0 \\
0& 0& 0& 0& 0& 1& 0& 0 \\
0& 0& 0& 0& 0& 0& 0& 1
\end{pmatrix}
"""
def __init__(self, q0: Qubit = 0,
q1: Qubit = 1, q2: Qubit = 2) -> None:
unitary = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
params = None
qubits = [q0, q1, q2]
super().__init__(unitary, qubits, params)
@property
def H(self) -> Gate:
return copy.copy(self) # Hermitian
# Other 1-qubit gates
class S_H(Gate):
r"""
The inverse of the 1-qubit phase S gate, equivalent to ``PHASE(-pi/2)``.
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & -i \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
super().__init__(np.asarray([[1.0, 0.0], [0.0, -1.0j]]), [q0])
@property
def H(self) -> Gate:
return S(*self.qubits)
def __pow__(self, t: float) -> Gate:
return PHASE(-pi / 2 * t, *self.qubits)
class T_H(Gate):
r"""
The inverse (complex conjugate) of the 1-qubit T (pi/8) gate, equivalent
to ``PHASE(-pi/4)``.
.. math::
\begin{pmatrix} 1 & 0 \\ 0 & e^{-i \pi / 4} \end{pmatrix}
"""
def __init__(self, q0: Qubit = 0) -> None:
unitary = [[1.0, 0.0], [0.0, bk.ccast(bk.cis(-pi / 4.0))]]
super().__init__(unitary, [q0])
@property
def H(self) -> Gate:
return T(*self.qubits)
def __pow__(self, t: float) -> Gate:
return PHASE(-pi / 4 * t, *self.qubits)
class RN(Gate):
r"""A 1-qubit rotation of angle theta about axis (nx, ny, nz)
.. math::
R_n(\theta) = \cos \frac{theta}{2} I - i \sin\frac{theta}{2}
(n_x X+ n_y Y + n_z Z)
Args:
theta: Angle of rotation on Block sphere
(nx, ny, nz): A three-dimensional real unit vector
"""
def __init__(self,
theta: float,
nx: float,
ny: float,
nz: float,
q0: Qubit = 0) -> None:
ctheta = bk.ccast(theta)
cost = bk.cos(ctheta / 2)
sint = bk.sin(ctheta / 2)
unitary = [[cost - 1j * sint * nz, -1j * sint * nx - sint * ny],
[-1j * sint * nx + sint * ny, cost + 1j * sint * nz]]
params = dict(theta=theta, nx=nx, ny=ny, nz=nz)
super().__init__(unitary, [q0], params)
@property
def H(self) -> Gate:
theta, nx, ny, nz = self.params.values()
return RN(-theta, nx, ny, nz, *self.qubits)
def __pow__(self, t: float) -> Gate:
theta, nx, ny, nz = self.params.values()
return RN(t * theta, nx, ny, nz, *self.qubits)
class TX(Gate):
r"""Powers of the 1-qubit Pauli-X gate.
.. math::
TX(t) = X^t = e^{i \pi t/2} R_X(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
def __init__(self, t: float, q0: Qubit = 0) -> None:
t = t % 2
ctheta = bk.ccast(pi * t)
phase = bk.exp(0.5j * ctheta)
unitary = [[phase * bk.cos(ctheta / 2),
phase * -1.0j * bk.sin(ctheta / 2)],
[phase * -1.0j * bk.sin(ctheta / 2),
phase * bk.cos(ctheta / 2)]]
super().__init__(unitary, [q0], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return TX(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return TX(t, *self.qubits)
class TY(Gate):
r"""Powers of the 1-qubit Pauli-Y gate.
The pseudo-Hadamard gate is TY(3/2), and its inverse is TY(1/2).
.. math::
TY(t) = Y^t = e^{i \pi t/2} R_Y(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
def __init__(self, t: float, q0: Qubit = 0) -> None:
t = t % 2
ctheta = bk.ccast(pi * t)
phase = bk.exp(0.5j * ctheta)
unitary = [[phase * bk.cos(ctheta / 2.0),
phase * -bk.sin(ctheta / 2.0)],
[phase * bk.sin(ctheta / 2.0),
phase * bk.cos(ctheta / 2.0)]]
# unitary = RY(pi*t).tensor * bk.exp(- 0.5j * t)
qubits = [q0]
super().__init__(unitary, qubits, dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return TY(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return TY(t, *self.qubits)
class TZ(Gate):
r"""Powers of the 1-qubit Pauli-Z gate.
.. math::
TZ(t) = Z^t = e^{i \pi t/2} R_Z(\pi t)
Args:
t: Number of half turns (quarter cycles) on Block sphere
"""
def __init__(self, t: float, q0: Qubit = 0) -> None:
t = t % 2
ctheta = bk.ccast(pi * t)
phase = bk.exp(0.5j * ctheta)
unitary = [[phase * bk.exp(-ctheta * 0.5j), 0],
[0, phase * bk.exp(ctheta * 0.5j)]]
super().__init__(unitary, [q0], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return TZ(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return TZ(t, *self.qubits)
class TH(Gate):
r"""
Powers of the 1-qubit Hadamard gate.
.. math::
TH(t) = H^t = e^{i \pi t/2}
\begin{pmatrix}
\cos(\tfrac{t}{2}) + \tfrac{i}{\sqrt{2}}\sin(\tfrac{t}{2})) &
\tfrac{i}{\sqrt{2}} \sin(\tfrac{t}{2}) \\
\tfrac{i}{\sqrt{2}} \sin(\tfrac{t}{2}) &
\cos(\tfrac{t}{2}) -\tfrac{i}{\sqrt{2}} \sin(\frac{t}{2})
\end{pmatrix}
"""
def __init__(self, t: float, q0: Qubit = 0) -> None:
theta = bk.ccast(pi * t)
phase = bk.exp(0.5j * theta)
unitary = [[phase * bk.cos(theta / 2)
- (phase * 1.0j * bk.sin(theta / 2)) / sqrt(2),
-(phase * 1.0j * bk.sin(theta / 2)) / sqrt(2)],
[-(phase * 1.0j * bk.sin(theta / 2)) / sqrt(2),
phase * bk.cos(theta / 2)
+ (phase * 1.0j * bk.sin(theta / 2)) / sqrt(2)]]
super().__init__(unitary, [q0], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return TH(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return TH(t, *self.qubits)
class ZYZ(Gate):
r"""A Z-Y-Z decomposition of one-qubit rotations in the Bloch sphere
The ZYZ decomposition of one-qubit rotations is
.. math::
\text{ZYZ}(t_0, t_1, t_2)
= Z^{t_2} Y^{t_1} Z^{t_0}
This is the unitary group on a 2-dimensional complex vector space, SU(2).
Ref: See Barenco et al (1995) section 4 (Warning: gates are defined as
conjugate of what we now use?), or Eq 4.11 of Nielsen and Chuang.
Args:
t0: Parameter of first parametric Z gate.
Number of half turns on Block sphere.
t1: Parameter of parametric Y gate.
t2: Parameter of second parametric Z gate.
"""
def __init__(self, t0: float, t1: float,
t2: float, q0: Qubit = 0) -> None:
ct0 = bk.ccast(pi * t0)
ct1 = bk.ccast(pi * t1)
ct2 = bk.ccast(pi * t2)
ct3 = 0
unitary = [[bk.cis(ct3 - 0.5 * ct2 - 0.5 * ct0) * bk.cos(0.5 * ct1),
-bk.cis(ct3 - 0.5 * ct2 + 0.5 * ct0) * bk.sin(0.5 * ct1)],
[bk.cis(ct3 + 0.5 * ct2 - 0.5 * ct0) * bk.sin(0.5 * ct1),
bk.cis(ct3 + 0.5 * ct2 + 0.5 * ct0) * bk.cos(0.5 * ct1)]]
super().__init__(unitary, [q0], dict(t0=t0, t1=t1, t2=t2))
@property
def H(self) -> Gate:
t0, t1, t2 = self.params.values()
return ZYZ(-t2, -t1, -t0, *self.qubits)
# Other 2-qubit gates
# TODO: Add references and explanation
# DOCME: Comment on sign conventions.
class CAN(Gate):
r"""A canonical 2-qubit gate
The canonical decomposition of 2-qubits gates removes local 1-qubit
rotations, and leaves only the non-local interactions.
.. math::
\text{CAN}(t_x, t_y, t_z) \equiv
\exp\Big\{-i\frac{\pi}{2}(t_x X\otimes X
+ t_y Y\otimes Y + t_z Z\otimes Z)\Big\}
"""
def __init__(self,
tx: float, ty: float, tz: float,
q0: Qubit = 0, q1: Qubit = 1) -> None:
xx = XX(tx)
yy = YY(ty)
zz = ZZ(tz)
gate = yy @ xx
gate = zz @ gate
unitary = gate.tensor
super().__init__(unitary, [q0, q1], dict(tx=tx, ty=ty, tz=tz))
@property
def H(self) -> Gate:
tx, ty, tz = self.params.values()
return CAN(-tx, -ty, -tz, *self.qubits)
def __pow__(self, t: float) -> Gate:
tx, ty, tz = self.params.values()
return CAN(tx * t, ty * t, tz * t, *self.qubits)
# Backwards compatability
# TODO: Add deprecation warning
class CANONICAL(CAN):
"""Deprecated. Use class CAN instead"""
pass
class XX(Gate):
r"""A parametric 2-qubit gate generated from an XX interaction,
Equivalent to ``CAN(t,0,0)``.
XX(1/2) is the Mølmer-Sørensen gate.
Ref: Sørensen, A. & Mølmer, K. Quantum computation with ions in thermal
motion. Phys. Rev. Lett. 82, 1971–1974 (1999)
Args:
t:
"""
def __init__(self, t: float, q0: Qubit = 0, q1: Qubit = 1) -> None:
theta = bk.ccast(pi * t)
unitary = [[bk.cos(theta / 2), 0, 0, -1.0j * bk.sin(theta / 2)],
[0, bk.cos(theta / 2), -1.0j * bk.sin(theta / 2), 0],
[0, -1.0j * bk.sin(theta / 2), bk.cos(theta / 2), 0],
[-1.0j * bk.sin(theta / 2), 0, 0, bk.cos(theta / 2)]]
super().__init__(unitary, [q0, q1], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return XX(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return XX(t, *self.qubits)
class YY(Gate):
r"""A parametric 2-qubit gate generated from a YY interaction.
Equivalent to ``CAN(0,t,0)``, and locally equivalent to
``CAN(t,0,0)``
Args:
t:
"""
def __init__(self, t: float, q0: Qubit = 0, q1: Qubit = 1) -> None:
theta = bk.ccast(pi * t)
unitary = [[bk.cos(theta / 2), 0, 0, 1.0j * bk.sin(theta / 2)],
[0, bk.cos(theta / 2), -1.0j * bk.sin(theta / 2), 0],
[0, -1.0j * bk.sin(theta / 2), bk.cos(theta / 2), 0],
[1.0j * bk.sin(theta / 2), 0, 0, bk.cos(theta / 2)]]
super().__init__(unitary, [q0, q1], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return YY(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return YY(t, *self.qubits)
class ZZ(Gate):
r"""A parametric 2-qubit gate generated from a ZZ interaction.
Equivalent to ``CAN(0,0,t)``, and locally equivalent to
``CAN(t,0,0)``
Args:
t:
"""
def __init__(self, t: float, q0: Qubit = 0, q1: Qubit = 1) -> None:
theta = bk.ccast(pi * t)
unitary = [[[[bk.cis(-theta / 2), 0], [0, 0]],
[[0, bk.cis(theta / 2)], [0, 0]]],
[[[0, 0], [bk.cis(theta / 2), 0]],
[[0, 0], [0, bk.cis(-theta / 2)]]]]
super().__init__(unitary, [q0, q1], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return ZZ(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return ZZ(t, *self.qubits)
class EXCH(Gate):
r"""A 2-qubit parametric gate generated from an exchange interaction.
Equivalent to CAN(t,t,t)
"""
def __init__(self, t: float, q0: Qubit = 0, q1: Qubit = 1) -> None:
unitary = CAN(t, t, t).tensor
super().__init__(unitary, [q0, q1], dict(t=t))
@property
def H(self) -> Gate:
t = - self.params['t']
return EXCH(t, *self.qubits)
def __pow__(self, t: float) -> Gate:
t = self.params['t'] * t
return EXCH(t, *self.qubits)
# TODO: LATEX_OPERATIONS, QUIL_GATESET
GATESET = frozenset([I, X, Y, Z, H, S, T, PHASE, RX, RY, RZ, CZ,
CNOT, SWAP, ISWAP, CPHASE00, CPHASE01, CPHASE10,
CPHASE, PSWAP, CCNOT, CSWAP, PISWAP,
# Extras
RN, TX, TY, TZ, TH, ZYZ,
CAN, XX, YY, ZZ, EXCH,
S_H, T_H])
# TODO: Rename STDGATES to NAME_GATE?
STDGATES = {gate_class.__name__: gate_class for gate_class in GATESET}
| 29.208583
| 78
| 0.483856
|
839bc0112654b0a19d47837c9fe608b478bf1786
| 216
|
py
|
Python
|
pandastools/__init__.py
|
phil65/pandastools
|
c00230df7e848d737e5a1e0d0db4d6dcf245ff04
|
[
"MIT"
] | 1
|
2021-05-06T06:51:27.000Z
|
2021-05-06T06:51:27.000Z
|
pandastools/__init__.py
|
phil65/pandastools
|
c00230df7e848d737e5a1e0d0db4d6dcf245ff04
|
[
"MIT"
] | 109
|
2020-07-24T04:10:14.000Z
|
2021-10-11T23:49:32.000Z
|
pandastools/__init__.py
|
phil65/pandastools
|
c00230df7e848d737e5a1e0d0db4d6dcf245ff04
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for PandasTools."""
from . import accessors
__author__ = """Philipp Temminghoff"""
__email__ = "phil65@kodi.tv"
__version__ = "0.5.1"
__all__ = ["accessors", "utils"]
| 18
| 40
| 0.657407
|
86d00a437384b586dc8e77b13c2300cd50e3183f
| 3,290
|
py
|
Python
|
salt/auth/mysql.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
salt/auth/mysql.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/auth/mysql.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Provide authentication using MySQL.
When using MySQL as an authentication backend, you will need to create or
use an existing table that has a username and a password column.
To get started, create a simple table that holds just a username and
a password. The password field will hold a SHA256 checksum.
.. code-block:: sql
CREATE TABLE `users` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`username` varchar(25) DEFAULT NULL,
`password` varchar(70) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1;
To create a user within MySQL, execute the following statement.
.. code-block:: sql
INSERT INTO users VALUES (NULL, 'diana', SHA2('secret', 256))
.. code-block:: yaml
mysql_auth:
hostname: localhost
database: SaltStack
username: root
password: letmein
auth_sql: 'SELECT username FROM users WHERE username = "{0}" AND password = SHA2("{1}", 256)'
The `auth_sql` contains the SQL that will validate a user to ensure they are
correctly authenticated. This is where you can specify other SQL queries to
authenticate users.
Enable MySQL authentication.
.. code-block:: yaml
external_auth:
mysql:
damian:
- test.*
:depends: - MySQL-python Python module
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
try:
# Trying to import MySQLdb
import MySQLdb
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.connections import OperationalError
except ImportError:
try:
# MySQLdb import failed, try to import PyMySQL
import pymysql
pymysql.install_as_MySQLdb()
import MySQLdb
import MySQLdb.cursors
import MySQLdb.converters
from MySQLdb.err import OperationalError
except ImportError:
MySQLdb = None
def __virtual__():
'''
Confirm that a python mysql client is installed.
'''
return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else ''
def __get_connection_info():
'''
Grab MySQL Connection Details
'''
conn_info = {}
try:
conn_info['hostname'] = __opts__['mysql_auth']['hostname']
conn_info['username'] = __opts__['mysql_auth']['username']
conn_info['password'] = __opts__['mysql_auth']['password']
conn_info['database'] = __opts__['mysql_auth']['database']
conn_info['auth_sql'] = __opts__['mysql_auth']['auth_sql']
except KeyError as e:
log.error('%s does not exist', e)
return None
return conn_info
def auth(username, password):
'''
Authenticate using a MySQL user table
'''
_info = __get_connection_info()
if _info is None:
return False
try:
conn = MySQLdb.connect(_info['hostname'],
_info['username'],
_info['password'],
_info['database'])
except OperationalError as e:
log.error(e)
return False
cur = conn.cursor()
cur.execute(_info['auth_sql'].format(username, password))
if cur.rowcount == 1:
return True
return False
| 25.703125
| 99
| 0.649848
|
d7019cc9a0ccd2e7192ab816a62f2fa0ee1e5a7b
| 2,765
|
py
|
Python
|
Python/Clone_Graph.py
|
YohanGupta/Hacktoberfest2020_
|
b395193a1f63345fa57ab32fa8da8dc51afb05be
|
[
"MIT"
] | 77
|
2020-10-01T01:37:10.000Z
|
2021-09-30T08:44:44.000Z
|
Python/Clone_Graph.py
|
YohanGupta/Hacktoberfest2020_
|
b395193a1f63345fa57ab32fa8da8dc51afb05be
|
[
"MIT"
] | 399
|
2020-09-30T18:48:08.000Z
|
2021-04-17T00:16:21.000Z
|
Python/Clone_Graph.py
|
YohanGupta/Hacktoberfest2020_
|
b395193a1f63345fa57ab32fa8da8dc51afb05be
|
[
"MIT"
] | 470
|
2020-09-30T18:53:00.000Z
|
2021-09-09T10:15:55.000Z
|
"""
Given a graph, clone the graph and output the cloned version of the graph.
The key thing is to map each node to its cloned version.
Steps
1. Map the starting or given node to its cloned version in a hash table.
2. Traverse the graph using a BFS.
3. For each current node get and clone its edges if each edge has not been cloned or seen before.
4. Add each edge(uncloned version) to the queue.
5. Set the neighbours of the cloned version of the current node to the cloned version of its edges.
6. Once the queue is empty, iteration ends and we return the cloned version of the starting node.
Time complexity is O(n) since we visit every node at least once.
Space complexity is O(n) since we are recreating another graph of size n.
Testcase Walkthrough
1.)
1-2, starting node = 1
- map node 1(starting node) to its clone: {1: 1'}
- add the node to queue: [1] and pop first item to get 1: queue = []
- get edges of current node and clone them: {1: 1', 2:2'}
- link cloned version of current node(1') to cloned edge: 1' -> 2'
- add edge(2) to queue: [2] and pop first item: queue = []
- 2 has no edge therefore queue becomes empty and iteration ends
- return 1'
i.e 1'-2'
2.)
1 - 2
| |
4 - 3 , starting node = 1
- map node 1(starting node) to its clone: {1: 1'}
- add the node to queue: [1] and pop first item to get 1: queue = []
- get edges and clone them: {1: 1', 2:2', 4:4'}
- link cloned version of current node(1') to cloned edge: 1' -> [2', 4']
- add edges to queue: [2, 4] and pop first item to get 2 -> queue = [4]
- 2 has edges [1,3] , repeat step 5: 2' -> [1',3']
- 1 has been cloned and seen so we only add 3 to queue: [4,3]
- pop first item to get 4: queue = [3]
- repeat step 5: 4' -> [1', 3']
- pop first item to get 3: queue = []
- 3 has no edges therefore queue becomes empty and iteration ends
- return 1'
i.e
1' - 2'
| |
4' - 3'
"""
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
# Solution
def cloneGraph(node):
if not node:
return None
import collections
queue = collections.deque([node]) #Initialize the Queue
cloned = {node: Node(node.val)}
while queue:
curr_node = queue.popleft() # Get current node
edges = curr_node.neighbors
for edge in edges:
if edge not in cloned:
cloned[edge] = Node(edge.val) # Clone the current node's edges
queue.append(edge)
cloned[curr_node].neighbors.append(cloned[edge])
return cloned[node]
| 28.802083
| 99
| 0.6217
|
c6268b88622be83b05dba7b5226695e15ce2e711
| 8,119
|
py
|
Python
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_resource_vault_configs_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_resource_vault_configs_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_resource_vault_configs_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class BackupResourceVaultConfigsOperations(object):
"""BackupResourceVaultConfigsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Fetches resource vault config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`BackupResourceVaultConfigResource
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfigResource>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`BackupResourceVaultConfigResource
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfigResource>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupconfig/vaultconfig'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupResourceVaultConfigResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, vault_name, resource_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates vault security config.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param parameters: resource config request
:type parameters: :class:`BackupResourceVaultConfigResource
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfigResource>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`BackupResourceVaultConfigResource
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfigResource>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`BackupResourceVaultConfigResource
<azure.mgmt.recoveryservicesbackup.models.BackupResourceVaultConfigResource>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupconfig/vaultconfig'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'BackupResourceVaultConfigResource')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupResourceVaultConfigResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 46.130682
| 163
| 0.684321
|
a6f979b24cedecb92cc56f0b094b33ef48fb3825
| 142
|
py
|
Python
|
kwat/path/clean.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-05-05T17:50:28.000Z
|
2019-01-30T19:23:02.000Z
|
kwat/path/clean.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-05-05T01:52:31.000Z
|
2019-04-20T21:06:05.000Z
|
kwat/path/clean.py
|
KwatME/ccal
|
d96dfa811482eee067f346386a2181ec514625f4
|
[
"MIT"
] | 5
|
2017-07-17T18:55:54.000Z
|
2019-02-02T04:46:19.000Z
|
from re import sub
def clean(na):
nac = sub(r"[^\w.]", "_", na.strip().lower())
print("{} => {}".format(na, nac))
return nac
| 12.909091
| 49
| 0.5
|
ba867831b830e9bab556f3faeb37447c307097c1
| 454
|
py
|
Python
|
imperative/python/megengine/data/transform/__init__.py
|
Olalaye/MegEngine
|
695d24f24517536e6544b07936d189dbc031bbce
|
[
"Apache-2.0"
] | 5,168
|
2020-03-19T06:10:04.000Z
|
2022-03-31T11:11:54.000Z
|
imperative/python/megengine/data/transform/__init__.py
|
Olalaye/MegEngine
|
695d24f24517536e6544b07936d189dbc031bbce
|
[
"Apache-2.0"
] | 286
|
2020-03-25T01:36:23.000Z
|
2022-03-31T10:26:33.000Z
|
imperative/python/megengine/data/transform/__init__.py
|
Olalaye/MegEngine
|
695d24f24517536e6544b07936d189dbc031bbce
|
[
"Apache-2.0"
] | 515
|
2020-03-19T06:10:05.000Z
|
2022-03-30T09:15:59.000Z
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from .meta_transform import PseudoTransform, Transform
from .vision import *
| 41.272727
| 88
| 0.757709
|
db9f8c38aada5720b66f02636ada1f841c10776a
| 2,158
|
py
|
Python
|
Structural/proxy.py
|
lpdswing/DesignPatternsPython
|
6f4b8416b0624ecda0cca3dadd306f938da9a118
|
[
"Apache-2.0"
] | 3
|
2020-07-21T11:43:22.000Z
|
2020-07-21T11:56:50.000Z
|
Structural/proxy.py
|
lpdswing/DesignPatternsPython
|
6f4b8416b0624ecda0cca3dadd306f938da9a118
|
[
"Apache-2.0"
] | null | null | null |
Structural/proxy.py
|
lpdswing/DesignPatternsPython
|
6f4b8416b0624ecda0cca3dadd306f938da9a118
|
[
"Apache-2.0"
] | null | null | null |
# 代理模式
# 虚拟代理
class LazyProperty:
def __init__(self, method):
self.method = method
self.method_name = method.__name__
def __get__(self, instance, owner):
if not instance:
return None
value = self.method(instance)
setattr(instance, self.method_name, value)
return value
class Test:
def __init__(self):
self.x = 'foo'
self.y = 'bar'
self._resource = None
# resource方法使用LazyProperty修饰
@LazyProperty
def resource(self):
print(f'init resource: resource is {self._resource}')
self._resource = tuple(range(5))
return self._resource
# 保护代理
class NotFindError(Exception):
def __init__(self, msg):
self.msg = msg
class RealSubject:
def __init__(self):
self.score = {
"张三": 90,
"李四": 59,
"王二": 61
}
def num_students(self):
num = len(self.score.keys())
print("The number of students is {num}".format(num=num))
def get_score(self, user_name):
_score = self.score.get(user_name)
print("The score of {user} is {score}".format(user=user_name,
score=_score))
class Proxy(object):
def __init__(self):
self.default_passwd = "9l0skjlsa"
self.real_subject = RealSubject()
def num_students(self):
self.real_subject.num_students()
def get_score(self, user_name):
print("You are visiting {} score ...".format(user_name))
passwd = input("Please input password : ")
if passwd == self.default_passwd:
if user_name in self.real_subject.score.keys():
return self.real_subject.get_score(user_name)
else:
raise NotFindError("The student you are visiting not found.")
else:
raise ValueError("The password you provided is wrong!")
def client():
proxy = Proxy()
proxy.get_score("张三")
if __name__ == '__main__':
t = Test()
print(t.x)
print(t.y)
print(t.resource)
print(t.resource)
# 初始化字符串只执行了一次
client()
| 23.714286
| 77
| 0.57924
|
9f2203e3f3da901d33d182d7321053c704a5752b
| 3,684
|
py
|
Python
|
kobo-book-downloader/__main__.py
|
jmvermeulen/kobo-book-downloader
|
18d039182b1eff648077f6319b943ba1a3890d35
|
[
"Unlicense"
] | 146
|
2017-11-06T08:22:33.000Z
|
2022-03-28T20:51:34.000Z
|
kobo-book-downloader/__main__.py
|
jmvermeulen/kobo-book-downloader
|
18d039182b1eff648077f6319b943ba1a3890d35
|
[
"Unlicense"
] | 17
|
2018-07-25T13:49:51.000Z
|
2021-12-08T15:16:53.000Z
|
kobo-book-downloader/__main__.py
|
jmvermeulen/kobo-book-downloader
|
18d039182b1eff648077f6319b943ba1a3890d35
|
[
"Unlicense"
] | 24
|
2019-05-04T12:51:01.000Z
|
2022-03-28T20:51:39.000Z
|
from Commands import Commands
from Globals import Globals
from Kobo import Kobo, KoboException
from Settings import Settings
import argparse
def Initialize() -> None:
Globals.Kobo = Kobo()
Globals.Settings = Settings()
if not Globals.Settings.AreAuthenticationSettingsSet():
Globals.Kobo.AuthenticateDevice()
Globals.Kobo.LoadInitializationSettings()
if not Globals.Settings.IsLoggedIn():
email = input( "Waiting for your input. You can use Shift+Insert to paste from the clipboard. Ctrl+C aborts the program.\n\nKobo e-mail: " )
password = input( "Kobo password: " )
print( """
Open https://authorize.kobo.com/signin in a private/incognito window in your browser, wait till the page
loads (do not login!) then open the developer tools (use F12 in Firefox/Chrome), select the console tab,
and paste the following code there and then press Enter there in the browser.
var newCaptchaDiv = document.createElement( "div" );
newCaptchaDiv.id = "new-grecaptcha-container";
document.getElementById( "grecaptcha-container" ).insertAdjacentElement( "afterend", newCaptchaDiv );
grecaptcha.render( newCaptchaDiv.id, {
sitekey: "6LeEbUwUAAAAADJxtlhMsvgnR7SsFpMm4sirr1CJ",
callback: function( response ) { console.log( "Captcha response:" ); console.log( response ); }
} );
A captcha should show up below the Sign-in form. Once you solve the captcha its response will be written
below the pasted code in the browser's console. Copy the response (the line below "Captcha response:")
and paste it here.
""" )
captcha = input( "Captcha response: " ).strip()
print( "" )
Globals.Kobo.Login( email, password, captcha )
def Main() -> None:
argumentParser = argparse.ArgumentParser( add_help = False )
argumentParser.add_argument( "--help", "-h", default = False, action = "store_true" )
subparsers = argumentParser.add_subparsers( dest = "Command", title = "commands", metavar = "command" )
getParser = subparsers.add_parser( "get", help = "Download book" )
getParser.add_argument( "OutputPath", metavar = "output-path", help = "If the output path is a directory then the file will be named automatically." )
getParser.add_argument( "RevisionId", metavar = "book-id", nargs = "?", help = "The identifier of the book" )
getParser.add_argument( "--all", default = False, action = "store_true", help = "Download all my books" )
infoParser = subparsers.add_parser( "info", help = "Show the location of the program's configuration file" )
listParser = subparsers.add_parser( "list", help = "List unread books" )
listParser.add_argument( "--all", default = False, action = "store_true", help = "List read books too" )
pickParser = subparsers.add_parser( "pick", help = "Download books using interactive selection" )
pickParser.add_argument( "OutputPath", metavar = "output-path", help = "Output path must be an existing directory" )
pickParser.add_argument( "--all", default = False, action = "store_true", help = "List read books too" )
wishListParser = subparsers.add_parser( "wishlist", help = "List your wish listed books" )
arguments = argumentParser.parse_args()
if arguments.Command is None:
Commands.ShowUsage()
return
Initialize()
if arguments.Command == "get":
Commands.GetBookOrBooks( arguments.RevisionId, arguments.OutputPath, arguments.all )
elif arguments.Command == "info":
Commands.Info()
elif arguments.Command == "list":
Commands.ListBooks( arguments.all )
elif arguments.Command == "pick":
Commands.PickBooks( arguments.OutputPath, arguments.all )
elif arguments.Command == "wishlist":
Commands.ListWishListedBooks()
if __name__ == '__main__':
try:
Main()
except KoboException as e:
print( "ERROR: %s" % e )
| 43.341176
| 151
| 0.737242
|
ad683c3a9e5df934cd229ab81c2770f5f442de1e
| 5,084
|
py
|
Python
|
evaluate/classifier.py
|
DylanTao94/ContraD
|
115c8e96e1ec2bcae1f37c2428236a59db555799
|
[
"MIT"
] | null | null | null |
evaluate/classifier.py
|
DylanTao94/ContraD
|
115c8e96e1ec2bcae1f37c2428236a59db555799
|
[
"MIT"
] | null | null | null |
evaluate/classifier.py
|
DylanTao94/ContraD
|
115c8e96e1ec2bcae1f37c2428236a59db555799
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn import CrossEntropyLoss
from evaluate import BaseEvaluator
from evaluate import AverageMeter
from training.criterion import nt_xent
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def error_k(output, target, ks=(1,)):
"""Computes the precision@k for the specified values of k"""
max_k = max(ks)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
results = []
for k in ks:
correct_k = correct[:k].view(-1).float().sum(0)
results.append(100.0 - correct_k.mul_(100.0 / batch_size))
return results
class XEntLoss(BaseEvaluator):
def __init__(self, model):
self._acc = AverageMeter()
self._model = model
self._criterion = CrossEntropyLoss().to(device)
def update(self, inputs, labels):
is_training = self._model.training
self._model.eval()
batch_size = inputs.size(0)
with torch.no_grad():
outputs = self._model(inputs)
loss = self._criterion(outputs.data, labels)
self._acc.update(loss, batch_size)
self._model.train(is_training)
return self._acc.value
@property
def value(self):
return self._acc.value
def summary(self):
return self._acc.average
def reset(self):
self._acc.reset()
class TopkErrorRate(BaseEvaluator):
def __init__(self, model, k=1):
self._acc = AverageMeter()
self._model = model
self.k = k
def update(self, inputs, labels):
is_training = self._model.training
self._model.eval()
batch_size = inputs.size(0)
with torch.no_grad():
outputs = self._model(inputs)
topk, = error_k(outputs.data, labels, ks=(self.k,))
self._acc.update(topk.item(), batch_size)
self._model.train(is_training)
return self._acc.value
@property
def value(self):
return self._acc.value
def summary(self):
return self._acc.average
def reset(self):
self._acc.reset()
class NoisyTopkErrorRate(TopkErrorRate):
def __init__(self, model, noise=None, k=1):
super().__init__(model, k)
if not noise:
noise = lambda x: x
self.noise = noise
def update(self, inputs, labels):
noisy = self.noise(inputs)
return super().update(noisy, labels)
class AdversarialTopkErrorRate(TopkErrorRate):
def __init__(self, model, adversary=None, k=1):
super().__init__(model, k)
if not adversary:
adversary = lambda x, y: x
self.adversary = adversary
def update(self, inputs, labels):
noisy = self.adversary(inputs, labels)
return super().update(noisy, labels)
class NT_XEntLoss(BaseEvaluator):
def __init__(self, model, augment_fn):
self._acc = AverageMeter()
self._model = model
if not augment_fn:
augment_fn = lambda x: x
self.augment_fn = augment_fn
def update(self, inputs, labels):
is_training = self._model.training
self._model.eval()
batch_size = inputs.size(0)
with torch.no_grad():
out1, aux1 = self._model(self.augment_fn(inputs), projection=True)
out2, aux2 = self._model(self.augment_fn(inputs), projection=True)
view1 = aux1['projection']
view2 = aux2['projection']
loss = nt_xent(view1, view2, temperature=0.1, normalize=True)
self._acc.update(loss, 2*batch_size)
self._model.train(is_training)
return self._acc.value
@property
def value(self):
return self._acc.value
def summary(self):
return self._acc.average
def reset(self):
self._acc.reset()
def test_classifier(cls, data_loader, metrics, augment_fn=None, adversary=None):
is_training = cls.training
cls.eval()
evaluators = {
'loss': XEntLoss(cls),
'error@1': TopkErrorRate(cls),
'adv@1': AdversarialTopkErrorRate(cls, adversary),
'noisy@1': NoisyTopkErrorRate(cls, augment_fn),
'nt_xent0.1': NT_XEntLoss(cls, augment_fn)
}
for n, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
for key in metrics:
evaluators[key].update(images, labels)
cls.train(is_training)
return {k: evaluators[k].summary() for k in metrics}
| 27.781421
| 88
| 0.620771
|
defe87e6e8cf9ed132ab7613c1f175bd1f3d803f
| 1,753
|
py
|
Python
|
bin/prepare_bfs_data.py
|
digital-land/dataset
|
31b20eef25305f2ad3273238d38e0c7da9689cad
|
[
"MIT"
] | 1
|
2020-03-17T19:10:14.000Z
|
2020-03-17T19:10:14.000Z
|
bin/prepare_bfs_data.py
|
digital-land/dataset
|
31b20eef25305f2ad3273238d38e0c7da9689cad
|
[
"MIT"
] | 10
|
2020-03-17T08:41:31.000Z
|
2021-10-05T09:48:52.000Z
|
bin/prepare_bfs_data.py
|
digital-land/dataset
|
31b20eef25305f2ad3273238d38e0c7da9689cad
|
[
"MIT"
] | 1
|
2020-02-24T13:55:33.000Z
|
2020-02-24T13:55:33.000Z
|
#!/usr/bin/env python3
import os
import sys
import json
# add parent directory
sys.path.append(".")
from analyse_dataset import BrownfieldDatasetAnalyser
from organisation import fetch_organisations
from points_to_features import convert_json_to_geojson
from brownfield import brownfield_dataset_path
def process_org(org):
sites = da.get_data_for_organisation(org.get("organisation"))
return {
"id": org.get("organisation"),
"statistical_geography": org.get("statistical-geography"),
"name": org.get("name"),
"count": len(sites),
}
def brownfield_map(orgs):
orgs_data = []
for o_id in orgs:
if organisations.get(o_id) is not None:
orgs_data.append(process_org(organisations.get(o_id)))
else:
print("no match for", o_id)
return orgs_data
def create_site_geojson(organisation):
curie_url = "/".join(organisation["path-segments"])
sites = da.get_data_for_organisation(organisation["organisation"])
gjson = convert_json_to_geojson(sites)
with open(
f"docs/brownfield-land/organisation/{curie_url}/sites.geojson", "w"
) as file:
file.write(json.dumps(gjson))
print("preparing brownfield data")
da = BrownfieldDatasetAnalyser(brownfield_dataset_path)
organisations = fetch_organisations()
orgs_with_bfs = da.organisations()
# need to remove any pesky None organisation values
orgs_with_bfs = [o for o in orgs_with_bfs if o is not None]
d = brownfield_map(orgs_with_bfs)
# save summary data needed by map
with open("data/organisation_boundary_data.json", "w") as file:
file.write(json.dumps(d))
# create geojson of sites for each organisation
for o in orgs_with_bfs:
create_site_geojson(organisations[o])
| 27.825397
| 75
| 0.725613
|
aef1b3280b63c03141cc937985e968f56ff27c88
| 11,295
|
py
|
Python
|
volatility/volatility/win32/hashdump.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | 2
|
2018-07-16T13:30:40.000Z
|
2018-07-17T12:02:05.000Z
|
volatility/volatility/win32/hashdump.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
volatility/volatility/win32/hashdump.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
# Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#pylint: disable-msg=C0111
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: bdolangavitt@wesleyan.edu
"""
import volatility.obj as obj
import volatility.win32.rawreg as rawreg
import volatility.win32.hive as hive
from Crypto.Hash import MD5, MD4
from Crypto.Cipher import ARC4, DES
from struct import unpack, pack
odd_parity = [
1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14,
16, 16, 19, 19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31,
32, 32, 35, 35, 37, 37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47,
49, 49, 50, 50, 52, 52, 55, 55, 56, 56, 59, 59, 61, 61, 62, 62,
64, 64, 67, 67, 69, 69, 70, 70, 73, 73, 74, 74, 76, 76, 79, 79,
81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91, 91, 93, 93, 94, 94,
97, 97, 98, 98, 100, 100, 103, 103, 104, 104, 107, 107, 109, 109, 110, 110,
112, 112, 115, 115, 117, 117, 118, 118, 121, 121, 122, 122, 124, 124, 127, 127,
128, 128, 131, 131, 133, 133, 134, 134, 137, 137, 138, 138, 140, 140, 143, 143,
145, 145, 146, 146, 148, 148, 151, 151, 152, 152, 155, 155, 157, 157, 158, 158,
161, 161, 162, 162, 164, 164, 167, 167, 168, 168, 171, 171, 173, 173, 174, 174,
176, 176, 179, 179, 181, 181, 182, 182, 185, 185, 186, 186, 188, 188, 191, 191,
193, 193, 194, 194, 196, 196, 199, 199, 200, 200, 203, 203, 205, 205, 206, 206,
208, 208, 211, 211, 213, 213, 214, 214, 217, 217, 218, 218, 220, 220, 223, 223,
224, 224, 227, 227, 229, 229, 230, 230, 233, 233, 234, 234, 236, 236, 239, 239,
241, 241, 242, 242, 244, 244, 247, 247, 248, 248, 251, 251, 253, 253, 254, 254
]
# Permutation matrix for boot key
p = [ 0x8, 0x5, 0x4, 0x2, 0xb, 0x9, 0xd, 0x3,
0x0, 0x6, 0x1, 0xc, 0xe, 0xa, 0xf, 0x7 ]
# Constants for SAM decrypt algorithm
aqwerty = "!@#$%^&*()qwertyUIOPAzxcvbnmQQQQQQQQQQQQ)(*@&%\0"
anum = "0123456789012345678901234567890123456789\0"
antpassword = "NTPASSWORD\0"
almpassword = "LMPASSWORD\0"
lmkey = "KGS!@#$%"
empty_lm = "aad3b435b51404eeaad3b435b51404ee".decode('hex')
empty_nt = "31d6cfe0d16ae931b73c59d7e0c089c0".decode('hex')
def str_to_key(s):
key = []
key.append(ord(s[0]) >> 1)
key.append(((ord(s[0]) & 0x01) << 6) | (ord(s[1]) >> 2))
key.append(((ord(s[1]) & 0x03) << 5) | (ord(s[2]) >> 3))
key.append(((ord(s[2]) & 0x07) << 4) | (ord(s[3]) >> 4))
key.append(((ord(s[3]) & 0x0F) << 3) | (ord(s[4]) >> 5))
key.append(((ord(s[4]) & 0x1F) << 2) | (ord(s[5]) >> 6))
key.append(((ord(s[5]) & 0x3F) << 1) | (ord(s[6]) >> 7))
key.append(ord(s[6]) & 0x7F)
for i in range(8):
key[i] = (key[i] << 1)
key[i] = odd_parity[key[i]]
return "".join(chr(k) for k in key)
def sid_to_key(sid):
s1 = ""
s1 += chr(sid & 0xFF)
s1 += chr((sid >> 8) & 0xFF)
s1 += chr((sid >> 16) & 0xFF)
s1 += chr((sid >> 24) & 0xFF)
s1 += s1[0]
s1 += s1[1]
s1 += s1[2]
s2 = s1[3] + s1[0] + s1[1] + s1[2]
s2 += s2[0] + s2[1] + s2[2]
return str_to_key(s1), str_to_key(s2)
def hash_lm(pw):
pw = pw[:14].upper()
pw = pw + ('\0' * (14 - len(pw)))
d1 = DES.new(str_to_key(pw[:7]), DES.MODE_ECB)
d2 = DES.new(str_to_key(pw[7:]), DES.MODE_ECB)
return d1.encrypt(lmkey) + d2.encrypt(lmkey)
def hash_nt(pw):
return MD4.new(pw.encode('utf-16-le')).digest()
def find_control_set(sysaddr):
root = rawreg.get_root(sysaddr)
if not root:
return 1
csselect = rawreg.open_key(root, ["Select"])
if not csselect:
return 1
for v in rawreg.values(csselect):
if v.Name == "Current":
return v.Data
return 1
def get_bootkey(sysaddr):
cs = find_control_set(sysaddr)
lsa_base = ["ControlSet{0:03}".format(cs), "Control", "Lsa"]
lsa_keys = ["JD", "Skew1", "GBG", "Data"]
root = rawreg.get_root(sysaddr)
if not root:
return None
lsa = rawreg.open_key(root, lsa_base)
if not lsa:
return None
bootkey = ""
for lk in lsa_keys:
key = rawreg.open_key(lsa, [lk])
class_data = sysaddr.read(key.Class, key.ClassLength)
if class_data == None:
return ""
bootkey += class_data.decode('utf-16-le').decode('hex')
bootkey_scrambled = ""
for i in range(len(bootkey)):
bootkey_scrambled += bootkey[p[i]]
return bootkey_scrambled
def get_hbootkey(samaddr, bootkey):
sam_account_path = ["SAM", "Domains", "Account"]
if not bootkey:
return None
root = rawreg.get_root(samaddr)
if not root:
return None
sam_account_key = rawreg.open_key(root, sam_account_path)
if not sam_account_key:
return None
F = None
for v in rawreg.values(sam_account_key):
if v.Name == 'F':
F = samaddr.read(v.Data, v.DataLength)
if not F:
return None
md5 = MD5.new()
md5.update(F[0x70:0x80] + aqwerty + bootkey + anum)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
hbootkey = rc4.encrypt(F[0x80:0xA0])
return hbootkey
def get_user_keys(samaddr):
user_key_path = ["SAM", "Domains", "Account", "Users"]
root = rawreg.get_root(samaddr)
if not root:
return []
user_key = rawreg.open_key(root, user_key_path)
if not user_key:
return []
return [k for k in rawreg.subkeys(user_key) if k.Name != "Names"]
def decrypt_single_hash(rid, hbootkey, enc_hash, lmntstr):
(des_k1, des_k2) = sid_to_key(rid)
d1 = DES.new(des_k1, DES.MODE_ECB)
d2 = DES.new(des_k2, DES.MODE_ECB)
md5 = MD5.new()
md5.update(hbootkey[:0x10] + pack("<L", rid) + lmntstr)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
obfkey = rc4.encrypt(enc_hash)
hash = d1.decrypt(obfkey[:8]) + d2.decrypt(obfkey[8:])
return hash
def decrypt_hashes(rid, enc_lm_hash, enc_nt_hash, hbootkey):
# LM Hash
if enc_lm_hash:
lmhash = decrypt_single_hash(rid, hbootkey, enc_lm_hash, almpassword)
else:
lmhash = ""
# NT Hash
if enc_nt_hash:
nthash = decrypt_single_hash(rid, hbootkey, enc_nt_hash, antpassword)
else:
nthash = ""
return lmhash, nthash
def encrypt_single_hash(rid, hbootkey, hash, lmntstr):
(des_k1, des_k2) = sid_to_key(rid)
d1 = DES.new(des_k1, DES.MODE_ECB)
d2 = DES.new(des_k2, DES.MODE_ECB)
enc_hash = d1.encrypt(hash[:8]) + d2.encrypt(hash[8:])
md5 = MD5.new()
md5.update(hbootkey[:0x10] + pack("<L", rid) + lmntstr)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
obfkey = rc4.encrypt(enc_hash)
return obfkey
def encrypt_hashes(rid, lm_hash, nt_hash, hbootkey):
# LM Hash
if lm_hash:
enc_lmhash = encrypt_single_hash(rid, hbootkey, lm_hash, almpassword)
else:
enc_lmhash = ""
# NT Hash
if nt_hash:
enc_nthash = encrypt_single_hash(rid, hbootkey, nt_hash, antpassword)
else:
enc_nthash = ""
return enc_lmhash, enc_nthash
def get_user_hashes(user_key, hbootkey):
samaddr = user_key.obj_vm
rid = int(str(user_key.Name), 16)
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
lm_offset = unpack("<L", V[0x9c:0xa0])[0] + 0xCC + 4
lm_len = unpack("<L", V[0xa0:0xa4])[0] - 4
nt_offset = unpack("<L", V[0xa8:0xac])[0] + 0xCC + 4
nt_len = unpack("<L", V[0xac:0xb0])[0] - 4
if lm_len:
enc_lm_hash = V[lm_offset:lm_offset + 0x10]
else:
enc_lm_hash = ""
if nt_len:
enc_nt_hash = V[nt_offset:nt_offset + 0x10]
else:
enc_nt_hash = ""
return decrypt_hashes(rid, enc_lm_hash, enc_nt_hash, hbootkey)
def get_user_name(user_key):
samaddr = user_key.obj_vm
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
name_offset = unpack("<L", V[0x0c:0x10])[0] + 0xCC
name_length = unpack("<L", V[0x10:0x14])[0]
if name_length > len(V):
return None
username = V[name_offset:name_offset + name_length].decode('utf-16-le')
return username
def get_user_desc(user_key):
samaddr = user_key.obj_vm
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
desc_offset = unpack("<L", V[0x24:0x28])[0] + 0xCC
desc_length = unpack("<L", V[0x28:0x2c])[0]
desc = V[desc_offset:desc_offset + desc_length].decode('utf-16-le')
return desc
def dump_hashes(sysaddr, samaddr):
if sysaddr == None:
yield obj.NoneObject("SYSTEM address is None: Did you use the correct profile?")
if samaddr == None:
yield obj.NoneObject("SAM address is None: Did you use the correct profile?")
bootkey = get_bootkey(sysaddr)
hbootkey = get_hbootkey(samaddr, bootkey)
if hbootkey:
for user in get_user_keys(samaddr):
ret = get_user_hashes(user, hbootkey)
if not ret:
yield obj.NoneObject("Cannot get user hashes for {0}".format(user))
else:
lmhash, nthash = ret
if not lmhash:
lmhash = empty_lm
if not nthash:
nthash = empty_nt
## temporary fix to prevent UnicodeDecodeError backtraces
## however this can cause truncated user names as a result
name = get_user_name(user)
if name is not None:
name = name.encode('ascii', 'ignore')
else:
name = "(unavailable)"
yield "{0}:{1}:{2}:{3}:::".format(name, int(str(user.Name), 16),
lmhash.encode('hex'), nthash.encode('hex'))
else:
yield obj.NoneObject("Hbootkey is not valid")
def dump_memory_hashes(addr_space, config, syshive, samhive):
if syshive != None and samhive != None:
sysaddr = hive.HiveAddressSpace(addr_space, config, syshive)
samaddr = hive.HiveAddressSpace(addr_space, config, samhive)
return dump_hashes(sysaddr, samaddr)
return obj.NoneObject("SYSTEM or SAM address is None: Did you use the correct profile?")
def dump_file_hashes(syshive_fname, samhive_fname):
sysaddr = hive.HiveFileAddressSpace(syshive_fname)
samaddr = hive.HiveFileAddressSpace(samhive_fname)
return dump_hashes(sysaddr, samaddr)
| 31.816901
| 93
| 0.607083
|
151e3b1d4f8ecc9ac04e34d12cfc782f0050d00f
| 1,227
|
py
|
Python
|
opacus/tests/grad_samples/instance_norm1d_test.py
|
iamgroot42/opacus
|
51708309e71c030aa2bf15d6dccc7bcbbe9ed570
|
[
"Apache-2.0"
] | 195
|
2019-12-11T23:55:47.000Z
|
2020-08-27T04:17:29.000Z
|
opacus/tests/grad_samples/instance_norm1d_test.py
|
iamgroot42/opacus
|
51708309e71c030aa2bf15d6dccc7bcbbe9ed570
|
[
"Apache-2.0"
] | 35
|
2020-01-21T11:04:29.000Z
|
2020-08-27T05:30:57.000Z
|
opacus/tests/grad_samples/instance_norm1d_test.py
|
iamgroot42/opacus
|
51708309e71c030aa2bf15d6dccc7bcbbe9ed570
|
[
"Apache-2.0"
] | 39
|
2020-01-04T20:05:20.000Z
|
2020-08-25T23:09:38.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hypothesis.strategies as st
import torch
import torch.nn as nn
from hypothesis import given, settings
from .common import GradSampleHooks_test
class InstanceNorm1d_test(GradSampleHooks_test):
@given(
N=st.integers(1, 4),
C=st.integers(1, 3),
W=st.integers(5, 10),
)
@settings(deadline=10000)
def test_3d_input(
self,
N: int,
C: int,
W: int,
):
x = torch.randn([N, C, W])
norm = nn.InstanceNorm1d(num_features=C, affine=True, track_running_stats=False)
self.run_test(x, norm, batch_first=True)
| 29.926829
| 88
| 0.695192
|
e58ceabd1879c49a97b65c149982935f4b4fde66
| 80,233
|
py
|
Python
|
tests/python/relay/test_op_level3.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 90
|
2021-11-30T11:58:10.000Z
|
2022-03-31T02:24:04.000Z
|
tests/python/relay/test_op_level3.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 64
|
2021-11-22T23:58:23.000Z
|
2022-03-31T03:19:22.000Z
|
tests/python/relay/test_op_level3.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 27
|
2021-12-09T22:39:27.000Z
|
2022-03-24T23:21:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import sys
from typing import Callable, Optional
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.error import TVMError
from tvm.relay import create_executor, transform
from tvm.relay.testing import check_grad, run_infer_type
from utils import ref_funcs
executor_kind = tvm.testing.parameter("graph", "debug")
class TestZerosOnes:
config = {"zeros": (relay.zeros, np.zeros), "ones": (relay.ones, np.ones)}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_zeros_ones(self, op, ref):
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp_res = create_executor().evaluate(y).numpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), "float64"))
class TestUnaryIdentity:
config = {
"zeros_like": (relay.zeros_like, np.zeros_like),
"ones_like": (relay.ones_like, np.ones_like),
"ceil": (relay.ceil, np.ceil),
"floor": (relay.floor, np.floor),
"trunc": (relay.trunc, np.trunc),
"round": (relay.round, np.round),
"abs": (relay.abs, np.abs),
"copy": (relay.copy, None), # np.copy
"negative": (relay.negative, np.negative),
"sign": (relay.sign, np.sign),
}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_unary_identity(self, op, ref):
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype("float32")
op_res = create_executor().evaluate(y, {x: relay.const(data)})
ref_res = ref(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_sliding_window():
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of shape (3,
# 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
# The resulting shape still has batch size 2. Each dimension in (1, 15, 10)
# represents the locations where we were able to form a window; that is, we
# were able to place the window in one place along the dimension of length
# 3, 15 places along the dimension of length 32 (when striding by 2), and 10
# places along the second dimension of length 32 (when striding by 3). The
# remaining dimensions (3, 4, 5) represent the formed windows.
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 1, 15, 10, 3, 4, 5), "float32")
data = np.random.rand(2, 3, 32, 32).astype("float32")
intrp = create_executor()
result = intrp.evaluate(y, {x: relay.const(data)})
result_np = result.numpy()
assert result_np.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result_np[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result_np[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result_np[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1.0, 4.0)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.clip(data, 1.0, 4.0)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
# Test 23 * 1/16
# [m,s] = [0.5, -3] = frexp(1/16)
# M = 0.5*2^31 = 1073741824
# so M = 1073741824 and s = -3
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23 * np.ones((10, 4)).astype("int32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.ones((10, 4)).astype("int32")
np.testing.assert_allclose(op_res.numpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype("float32") * 1000
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.numpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
# An approximation derived from Opus,
# https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.numpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
class TestSqueeze:
shape, dtype, axis = tvm.testing.parameters(
((1, 3, 2, 5), "float32", None),
((1, 3, 1), "float32", [0]),
((1, 2, 1, 2, 1), "float32", [0, 2]),
)
def test_squeeze(self, shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
op_res = create_executor().evaluate(squeeze, {x: relay.const(data)})
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((100, t, n), "float32")
def test_transpose(target, dev, executor_kind):
dshape = (2, 3, 4)
axes = (0, 2, 1)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 2000), "float32")
class TestReshape:
shape, newshape, oshape = tvm.testing.parameters(
((2, 3, 4), (8, 3), (8, 3)),
((4, 7), (2, 7, 2), (2, 7, 2)),
((2, 3, 4), (4, 0, 2), (4, 3, 2)),
((2, 3, 4), (2, 0, 0), (2, 3, 4)),
((2, 3, 4), (0, -1), (2, 12)),
((2, 3, 4), (-1, 0), (8, 3)),
((2, 3, 4), (2, -2), (2, 3, 4)),
((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1)),
((2, 3, 4), (-3, 4), (6, 4)),
((2, 3, 4, 5), (-3, -3), (6, 20)),
((2, 3, 4), (0, -3), (2, 12)),
((2, 3, 4), (-3, -2), (6, 4)),
((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4)),
((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4)),
((1,), (), ()),
)
def test_reshape(self, target, dev, executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2, 3], "float32"))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
# concrete shape
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1, 6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
# partial reshaping
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((1, 6, 5), "float32"))
z = relay.reshape_like(x, y, lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6, 4), "float32")
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((2, 3, 4, 1, 6), "float32"))
z = relay.reshape_like(x, y, rhs_end=3)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((2, 3, 4), "float32")
z = relay.reshape_like(x, y, rhs_begin=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((4, 1, 6), "float32")
# symbolic partial reshaping
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((5, 6), "float32"))
z = relay.var("z", relay.TensorType((4,), "float32"))
w = relay.reshape_like(x, y, lhs_end=3)
w = relay.reshape_like(w, z, lhs_begin=2)
w = run_infer_type(w)
assert w.checked_type == relay.TensorType((5, 6, 4), "float32")
class TestReshapeLike:
shape, oshape, shape_like, reshape_like_kwargs = tvm.testing.parameters(
((2, 3, 4), (1, 8, 3), None, {}),
((4, 7), (2, 7, 2), None, {}),
((1, 2, 3, 4), (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)),
)
def test_reshape_like(
self, target, dev, executor_kind, shape, oshape, shape_like=None, reshape_like_kwargs={}
):
if shape_like is None:
shape_like = oshape
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=shape_like).astype("float32")
ref_res = np.reshape(x_data, oshape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(shape_like, "float32"))
z = relay.reshape_like(x, y, **reshape_like_kwargs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestTakeInferType:
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
dshape, indices_shape, oshape, axis = tvm.testing.parameters(
((d1,), (1,), (1,), 0),
((4,), (d1, d2), (d1, d2), None),
((3, 3, 3), (1, d2), (1, d2), None),
((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0),
((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1),
((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2),
)
def test_take(self, dshape, indices_shape, oshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
class TestTake:
src_shape, indices_src, axis, mode, indices_dtype = tvm.testing.parameters(
((4,), [1], None, "clip", "int32"),
((4,), [[0, 1, 2, 3]], None, "clip", "int32"),
((3, 3, 3), [[11, 25]], None, "clip", "int32"),
((4,), [[0, 1], [2, 3]], None, "clip", "int32"),
((4,), [1], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 1, "clip", "int32"),
((4, 3, 5, 6), [[2, 1, 0, 0]], -2, "clip", "int32"),
((3, 4), [-5, 20], None, "clip", "int32"),
((3, 4), [-5, 20], None, "wrap", "int32"),
((3, 4), [-1, 2], 0, "clip", "int32"),
((3, 4), [-1, 2], 0, "wrap", "int32"),
((3, 4), [-1, 2], 1, "clip", "int32"),
((3, 4), [-1, 2], 1, "wrap", "int32"),
((3, 3, 3), [[11, 25]], None, "fast", "int32"),
((3, 4), [0, 2], 0, "fast", "int32"),
((3, 4), [0, 2], 1, "fast", "int32"),
((3, 4), [1, 2], 1, "clip", "uint32"),
((3, 4), [1, 2], 1, "wrap", "uint16"),
((3, 3, 3), [1, 2], None, "fast", "uint16"),
((3, 4), [0, 2], 0, "fast", "uint8"),
)
# Incorrect numeric output in some cases on vulkan
@tvm.testing.known_failing_targets("vulkan")
def test_take(
self, target, dev, executor_kind, src_shape, indices_src, axis, mode, indices_dtype
):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, indices_src
)
# Old versions of numpy has take internally cast inside take which may violate
# safety rules. We have such version in i386 CI image.
indices_src = indices_src.astype("int32")
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestSplitInferType:
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
dshape, indices_or_sections, ret_type, axis = tvm.testing.parameters(
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
]
)
),
1,
),
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
4,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
]
)
),
2,
),
(
(d1, d2, d3, d4),
2,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
(2, 4, 7),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
(
(d1, d2, d3, d4),
tuple(np.array([2, 4, 7]).astype(np.int64)),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
)
def test_split(self, dshape, indices_or_sections, ret_type, axis):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
def test_full_infer_type():
# default settings: match input dtype
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
# change the shape and dtype
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
class TestFull:
fill_value, arr_shape, dtype = tvm.testing.parameters(
(4, (1, 3, 4, 4), "int32"),
(4, (1, 3, 4, 4), "int64"),
(4.0, (1, 4), "float32"),
)
def test_full(self, target, dev, executor_kind, fill_value, arr_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, arr_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(arr_shape, fill_value, dtype=dtype)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like(self, target, dev, executor_kind, arr_shape, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=arr_shape).astype(dtype)
x = relay.var("x", relay.TensorType(arr_shape, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like_infer_type():
# concrete shape
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
def test_infer_type_leaky_relu(target, dev):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)
op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)
class TestInferTypePrelu:
dtype = tvm.testing.parameter("float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data, alpha, axis, output = tvm.testing.parameters(
((n, c, h, w), (c,), 1, (n, c, h, w)),
((n, h, w, c), (c,), 3, (n, h, w, c)),
((n, c, h, w), None, 1, (n, c, h, w)),
((n, h, w, c), None, 3, (n, h, w, c)),
((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2)),
((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3)),
((1, 3, 2, 2), None, 1, (1, 3, 2, 2)),
((1, 2, 2, 3), None, 3, (1, 2, 2, 3)),
)
def test_infer_type_prelu(self, target, dev, data, alpha, axis, output, dtype):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, a_data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)
op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(
x_data, a_data
)
tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5)
class TestArange:
dtype = tvm.testing.parameter("float32")
start, stop, step = tvm.testing.parameters(
(None, 20, None),
(None, 20, 2),
(1, 20, None),
(1, 20, 2),
# arange doesnt' support floating point right now, see type relation
# (1, 20, 1.5),
(1, 20.5, None),
(1, 20, 3),
(20, 1, -1),
# arange doesnt' support floating point right now, see type relation
# (20, 1, -1.5),
)
def test_arange(self, target, dev, executor_kind, start, stop, step, dtype):
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype),
)
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestMeshgrid:
lengths, indexing = tvm.testing.parameters(
([3, 5], "ij"),
([4, 2], "xy"),
([3, 5, 2], "ij"),
([3, 1, 5], "xy"),
# Length 0 signifies scalar.
([3, 5, 0], "ij"),
)
def test_meshgrid(self, target, dev, executor_kind, lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
# Scalar
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
# Get ref
ref_res = np.meshgrid(*input_data, indexing=indexing)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*input_data
)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5)
class TestTile:
dshape, reps = tvm.testing.parameters(
((2, 3, 4), (3, 2, 1)),
((2, 3, 4), (1, 2)),
((2, 3), (3, 2, 1)),
)
def test_tile(self, target, dev, executor_kind, dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestRepeat:
dshape, repeats, axis = tvm.testing.parameters(
((3,), 2, 0),
((3, 10), 2, -1),
((3, 2, 4), 3, 1),
)
def test_repeat(self, target, dev, executor_kind, dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestStack:
dshapes, axis = tvm.testing.parameters(
([(2,), (2,), (2,)], -1),
([(2,), (2,), (2,)], 0),
([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4),
)
expr_type = tvm.testing.parameter("tuple", "list", "tuple_expr")
@tvm.testing.fixture
def ref_data(self, dshapes, axis):
np_in = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
np_out = np.stack(np_in, axis=axis)
return np_in, np_out
@tvm.testing.fixture
def input_expr(self, dshapes, axis, expr_type, ref_data):
input_vars = [relay.var("input", relay.TensorType(shape, "float32")) for shape in dshapes]
if expr_type == "tuple":
input_expr = relay.Tuple(input_vars)
elif expr_type == "list":
input_expr = input_vars
elif expr_type == "tuple_expr":
# expression that evaluates to a tuple
# but is not a tuple literal
np_in, np_out = ref_data
x = relay.Var("x")
input_expr = relay.Let(x, relay.Tuple([relay.const(inp) for inp in np_in]), x)
else:
raise ValueError(f"Unknown expr_type '{expr_type}'")
return input_expr
def test_stack(self, target, dev, executor_kind, input_expr, ref_data, axis):
z = relay.stack(input_expr, axis=axis)
inp_vars = relay.analysis.free_vars(z)
func = relay.Function(inp_vars, z)
np_in, np_out = ref_data
relay_args = np_in if inp_vars else []
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*relay_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
class TestReverse:
dshape, axis = tvm.testing.parameters(
((2, 3, 4), 1),
((4, 7), 0),
((2, 3, 4), -1),
)
def test_reverse(self, target, dev, executor_kind, dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reverse_sequence(target, dev, executor_kind):
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def test_scatter(target, dev, executor_kind):
def verify_scatter(dshape, ishape, axis=0, indices_dtype="int64"):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, indices_dtype))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indices_dtype)
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_scatter((10,), (10,), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, 3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((4, 2, 1), (1, 1, 1), 0)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3, indices_dtype="uint32")
class TestDynamicScatter:
dshape, ishape, axis = tvm.testing.parameters(
((10,), (10,), 0),
((10, 5), (10, 5), -2),
((10, 5), (10, 5), -1),
((10, 5), (3, 5), 0),
((12, 4), (7, 2), 1),
((2, 3, 4), (1, 3, 4), 0),
((2, 3, 4), (2, 1, 4), 1),
((2, 3, 4), (2, 3, 1), 2),
((4, 2, 1), (1, 1, 1), 0),
((2, 3, 4, 5), (1, 3, 4, 5), 0),
((6, 3, 4, 5), (2, 3, 4, 5), 1),
((2, 3, 8, 5), (2, 3, 1, 1), 2),
((16, 16, 4, 5), (16, 16, 4, 5), 3),
)
@pytest.mark.parametrize("executor_kind", ["vm", "debug"])
def test_dynamic_scatter(self, target, dev, executor_kind, dshape, ishape, axis):
d = relay.var("d", relay.TensorType([relay.Any() for i in range(len(dshape))], "float32"))
i = relay.var("i", relay.TensorType([relay.Any() for i in range(len(ishape))], "int64"))
u = relay.var("u", relay.TensorType([relay.Any() for i in range(len(ishape))], "float32"))
z = relay.op.scatter(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestScatterAdd:
dshape, ishape, axis, dtype, indice_dtype = tvm.testing.parameters(
((10,), (10,), 0, "int32", "int64"),
((1000,), (1000,), 0, "int32", "int64"),
((10, 5), (10, 5), -2, "float32", "int64"),
((10, 5), (10, 5), -1, "float32", "int64"),
((10, 5), (3, 5), 0, "float32", "int64"),
((12, 4), (7, 2), 1, "float32", "int64"),
((2, 3, 4), (1, 3, 4), 0, "float32", "int64"),
((2, 3, 4), (2, 1, 4), 1, "float32", "int64"),
((2, 3, 4), (2, 3, 1), 2, "float32", "int64"),
((2, 3, 4, 5), (1, 3, 4, 5), 0, "float32", "int64"),
((6, 3, 4, 5), (2, 3, 4, 5), 1, "float32", "int64"),
((2, 3, 8, 5), (2, 3, 1, 1), 2, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "uint32"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dshape, ishape, axis, dtype, indice_dtype):
data_np = np.random.uniform(size=dshape).astype(dtype)
updates_np = np.random.uniform(size=ishape).astype(dtype)
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indice_dtype)
out_np = np.copy(data_np)
for index in np.ndindex(*indices_np.shape):
new_index = list(index)
new_index[axis] = indices_np[index]
out_np[tuple(new_index)] += updates_np[index]
return data_np, updates_np, indices_np, out_np
# Optimization can produce tir.atomic_add, not currently supported
# on vulkan runtime.
@tvm.testing.known_failing_targets("vulkan")
def test_scatter_add(self, target, dev, ref_data, dshape, ishape, axis, dtype, indice_dtype):
d = relay.var("d", relay.TensorType(shape=[relay.Any() for _ in dshape], dtype=dtype))
i = relay.var(
"i", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=indice_dtype)
)
u = relay.var("u", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=dtype))
z = relay.op.scatter_add(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np, updates_np, indices_np, out_np = ref_data
verify_func(target, dev, func, [data_np, indices_np, updates_np], out_np)
@pytest.mark.parametrize(
"data, axis, indices, ref_res",
[
([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
([[1, 2], [3, 4]], -1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
-3,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
2,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
-1,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
],
)
def test_gather(target, dev, executor_kind, data, axis, indices, ref_res):
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype="float32")
indices = np.asarray(indices, dtype="int32")
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, indices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather(data, axis, indices, ref_res)
def test_gather_nd(target, dev, executor_kind):
def verify_gather_nd(xshape, yshape, y_data, batch_dims=0, indices_dtype="int32"):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, indices_dtype))
z = relay.gather_nd(x, y, batch_dims)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
if y_data:
y_data = np.array(y_data, dtype=indices_dtype)
else:
y_data = np.random.randint(low=0, high=2, size=yshape, dtype=indices_dtype)
ref_res = ref_funcs.gather_nd(x_data, y_data, batch_dims)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
# Examples from tensorflow gather_nd doc
# https://www.tensorflow.org/api_docs/python/tf/gather_nd
verify_gather_nd((2, 2, 2), (1, 2), [[1, 0]], 1)
verify_gather_nd((2, 2, 2), (1, 2, 1), [[[1], [0]]], 1)
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1)
# Test cases from tensorflow gather_nd tests kernel_tests/array_ops_test.py
verify_gather_nd((2, 2, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2, 1), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2, 3), None, 1)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2, 1), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2, indices_dtype="uint8")
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1, indices_dtype="uint32")
def _verify_infiniteness_ops(relay_op, ref_op):
for dtype in ["float32", "float16", "float16", "int32", "int16"]:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith("float"):
data.ravel()[
np.random.choice(data.size, int(data.size * 0.5), replace=False)
] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
op_res = create_executor().evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_isfinite():
_verify_infiniteness_ops(relay.isfinite, np.isfinite)
def test_isinf():
_verify_infiniteness_ops(relay.isinf, np.isinf)
def test_unravel_index(target, dev, executor_kind):
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.var("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
# In below example, 5 is out of bound for array of size 4.
# Numpy implementation throws error for it
# TVM implementation does not throw error instead it produces
# output which is inline with Tensorflow
# verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)
def test_sparse_to_dense(target, dev, executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(args, d)
f = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)
if default_value is None:
op_res = f(sparse_indices_data, sparse_values_data)
else:
op_res = f(sparse_indices_data, sparse_values_data, default_value_data)
tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
# default value not specified
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
# negative test cases
# sparse indices should be ints
# verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_values should be 0d or 1d only
# verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_indices should not be > 2d tensor
# verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
class TestSparseReshape:
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np = tvm.testing.parameters(
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([2, 3, 6], dtype=np.int32),
np.array([9, -1], dtype=np.int32),
),
(
np.array(
[[0, 0, 0, 0], [0, 0, 1, 2], [0, 1, 0, 3], [1, 0, 0, 4], [1, 2, 3, 6]],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([9, 4], dtype=np.int32),
np.array([2, -1, 6], dtype=np.int32),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([25], dtype=np.int32),
np.array([5, 5], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([500, 20], dtype=np.int32),
np.array([500, -1], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([4], dtype=np.int32),
np.array([2, -1], dtype=np.int32),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([3, 6], dtype=np.int32),
np.array([-1, 2], dtype=np.int32),
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
sparse_indices_np: np.ndarray,
prev_shape_np: np.ndarray,
new_shape_np: np.ndarray,
):
"""
This function calculates the expected output of sparseshape operator given the inputs.
"""
new_sparse_indices = np.ones(
(sparse_indices_np.shape[0], new_shape_np.shape[0]), dtype=sparse_indices_np.dtype
)
multipliers = np.ones(prev_shape_np.shape[0])
dividers = np.ones(new_shape_np.shape[0])
total_ele = np.prod(prev_shape_np)
division_total_ele = 1
for i in range(new_shape_np.shape[0]):
if new_shape_np[i] == -1:
continue
division_total_ele *= new_shape_np[i]
for i in range(prev_shape_np.shape[0] - 2, -1, -1):
multipliers[i] = prev_shape_np[i + 1] * multipliers[i + 1]
for i in range(len(new_shape_np)):
if new_shape_np[i] == -1:
new_shape_np[i] = total_ele // division_total_ele
if np.array_equal(prev_shape_np, new_shape_np):
return sparse_indices_np, prev_shape_np
for i in range(new_shape_np.shape[0] - 2, -1, -1):
dividers[i] = new_shape_np[i + 1] * dividers[i + 1]
for row_num, sparse_row in enumerate(sparse_indices_np):
flat_idx = 0
if len(sparse_indices_np.shape) != 1:
for i, ele in enumerate(sparse_row):
flat_idx += sparse_row[i] * multipliers[i]
else:
flat_idx += sparse_row
if len(new_sparse_indices.shape) != 1:
for i in range(new_sparse_indices.shape[1]):
new_sparse_indices[row_num][i] = flat_idx // dividers[i]
flat_idx = flat_idx % dividers[i]
else:
new_sparse_indices[row_num] = flat_idx
return new_sparse_indices, new_shape_np
@tvm.testing.known_failing_targets("vulkan")
def test_sparse_reshape(
self,
target,
dev,
ref_res,
sparse_indices_np,
sparse_values_np,
prev_shape_np,
new_shape_np,
use_dyn,
):
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
prev_shape = relay.var(
"prev_shape",
shape=[relay.Any()],
dtype=str(prev_shape_np.dtype),
)
new_shape = relay.var(
"new_shape",
shape=[relay.Any()],
dtype=str(new_shape_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
prev_shape = relay.var(
"prev_shape", relay.TensorType(prev_shape_np.shape, str(prev_shape_np.dtype))
)
new_shape = relay.var(
"new_shape", relay.TensorType(new_shape_np.shape, str(new_shape_np.dtype))
)
z = relay.op.sparse_reshape(sparse_indices, prev_shape, new_shape).astuple()
func = relay.Function([sparse_indices, prev_shape, new_shape], z)
outputs = run_infer_type(z)
new_sparse_indices_infer_type, new_shape_infer_type = (
outputs.checked_type.fields[0].dtype,
outputs.checked_type.fields[1].dtype,
)
assert new_sparse_indices_infer_type == sparse_indices_np.dtype
assert new_shape_infer_type == new_shape_np.dtype
verify_func(
target,
dev,
func,
[sparse_indices_np, prev_shape_np, new_shape_np],
ref_res,
)
class TestSegmentSum:
data_np, segment_ids_np, num_segments = tvm.testing.parameters(
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 1, 1, 0, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((6, 4, 5)),
np.array([2, 0, 1, 0, 3, 2], dtype=np.int64),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([5, 0, 1, 0, 3, 6, 8, 7, 7], dtype=np.int64),
9,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
):
"""
This function calculates the expected output of segment_sum operator given the inputs.
"""
if not num_segments:
num_segments = np.unique(segment_ids_np).shape[0]
result = np.zeros((num_segments,) + data_np.shape[1:], data_np.dtype)
for i, index in enumerate(segment_ids_np):
result[index] += data_np[i]
return result
# Optimization can produce tir.atomic_add, not currently supported
# on vulkan runtime.
@tvm.testing.known_failing_targets("vulkan")
def test_segment_sum(
self,
target,
dev,
ref_res: np.ndarray,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
use_dyn: bool,
):
"""
This function verifies the relay output of segment_sum with its expected output.
"""
if use_dyn:
data = relay.var(
"data",
shape=[relay.Any() for _ in data_np.shape],
dtype=str(data_np.dtype),
)
segment_ids = relay.var(
"segment_ids",
shape=[relay.Any()],
dtype=str(segment_ids_np.dtype),
)
else:
data = relay.var(
"data",
relay.TensorType(data_np.shape, str(data_np.dtype)),
)
segment_ids = relay.var(
"segment_ids", relay.TensorType(segment_ids_np.shape, str(segment_ids_np.dtype))
)
z = relay.op.segment_sum(data, segment_ids, num_segments)
func = relay.Function([data, segment_ids], z)
segment_sum_result = run_infer_type(z)
assert segment_sum_result.checked_type.dtype == data_np.dtype
verify_func(
target,
dev,
func,
[data_np, segment_ids_np],
ref_res,
)
def verify_func(target, dev, func, data, ref_res):
assert isinstance(data, list)
for kind in ["vm"]:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
relay.backend.te_compiler.get().clear()
def test_adv_index(target, dev, executor_kind):
def verify_adv_index(data_shape, index_shapes):
dtype = "float32"
inputs = [relay.var("data", relay.TensorType(data_shape, dtype))]
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype("int64"))
inputs.append(relay.var("index_{}".format(i), relay.TensorType(index_shape, "int64")))
np_out = np_data[tuple(np_indices)]
np_args = [np_data] + np_indices
out = relay.op.adv_index(inputs)
func = relay.Function(inputs, out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*np_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
verify_adv_index((10, 5), [(3, 4), (3, 1)])
verify_adv_index((10, 5), [(1, 4), (3, 1)])
verify_adv_index(
(10, 5),
[
(2,),
],
)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)])
# Helper for testing binop functions
scanops_supported = {"cumsum": relay.op.cumsum, "cumprod": relay.op.cumprod}
def run_binop_tests(
target,
dev,
executor_kind,
binop_type: str,
gt_func: Callable[..., np.array],
identity_value: int,
):
def assert_relay_scanop(
data_np: np.array,
np_out: np.array,
axis: int = None,
out_dtype: str = None,
rtol: float = 1e-5,
atol: float = 1e-5,
exclusive: bool = False,
):
inp = relay.var("data", relay.TensorType(data_np.shape, str(data_np.dtype)))
if binop_type not in scanops_supported.keys():
raise ValueError(f"Unknown function {binop_type}. Options: {scanops_supported.keys()}")
out = scanops_supported[binop_type](inp, axis, out_dtype, exclusive=exclusive)
func = relay.Function([inp], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol)
data = np.array([2, 3, 0])
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data), out_dtype="int64")
data = np.random.randn(10, 10)
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data, axis=0), axis=0)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1)
data = np.random.randn(10, 5, 10).astype("float32")
assert_relay_scanop(data, gt_func(data), rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=0), axis=0, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=-1), axis=-1, rtol=1e-4, atol=1e-4)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
assert_relay_scanop(data, gt_func(data, dtype=np.int32))
assert_relay_scanop(data, gt_func(data, dtype="int64"), out_dtype="int64")
# Test exclusivity operations
data = np.random.randint(-100, 100, size=(10, 10)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True)
expected_result = np.roll(gt_func(data, axis=0), 1, axis=0)
expected_result[0, :] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=0)
expected_result = np.roll(gt_func(data, axis=1), 1, axis=1)
expected_result[:, 0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumsum", gt_func=np.cumsum, identity_value=0
)
@tvm.testing.parametrize_targets
def test_cumprod(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumprod", gt_func=np.cumprod, identity_value=1
)
@tvm.testing.parametrize_targets
def test_scatter_nd(target, dev, executor_kind):
def verify_scatter_nd(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", shape=indices_np.shape, dtype=str(indices_np.dtype))
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function([data, indices, updates], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
def verify_scatter_nd_with_stack(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices_vars = [
relay.var("ind%d" % i, shape=v.shape, dtype=str(v.dtype))
for i, v in enumerate(indices_np)
]
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
# test if scatter_nd works in case indices are prepared by another Relay operator
indices = relay.op.stack(indices_vars, axis=0)
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function(
[data, updates] + indices_vars,
out,
)
fargs = [data_np, updates_np]
for a in indices_np:
fargs.append(a)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*fargs
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
for indice_dtype in ["uint8", "uint16", "uint32"]:
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]]).astype(indice_dtype)
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]]).astype(indice_dtype)
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
indices = np.array([[1, 0, 0]]).astype(indice_dtype)
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
verify_scatter_nd(data, indices, updates, out, mode="add")
verify_scatter_nd_with_stack(data, indices, updates, out)
for mode in ["add", "update"]:
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
indice_dtype
)
updates = np.ones((5, 3)).astype("float64")
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy()
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
verify_scatter_nd(data, indices, updates, out, mode)
verify_scatter_nd_with_stack(data, indices, updates, out, mode)
def test_unique(target, dev):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
# In unsorted case, need to sort the index of first occurence
index = np.sort(index)
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
num_uniq,
counts,
]
def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()], dtype))
else:
x = relay.var("x", relay.TensorType([n], dtype))
outs = relay.unique(x, is_sorted, return_counts)
outs = outs.astuple()
func = relay.Function([x], outs)
x_data = np.random.randint(50, size=n).astype(dtype)
if is_dyn:
backends = ["vm", "debug"]
else:
backends = ["graph", "debug"]
for kind in backends:
mod = tvm.ir.IRModule.from_expr(func)
tvm_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_data
) # unique, indices, inverse_indices, num_unique, (counts)
np_res = calc_numpy_unique(
x_data, is_sorted
) # unique, indices, inverse_indices, num_unique, counts
num_unique = np_res[3][0]
# num_unique
assert num_unique == tvm_res[3].numpy()[0]
# unique
tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5)
# indices
tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5)
# inverse_indices
tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5)
# counts
if return_counts:
tvm.testing.assert_allclose(tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5)
for dtype in ["int32", "int64"]:
for i in range(8):
is_dyn, is_sorted, return_counts = bool(i & 1), bool(i & 2), bool(i & 4)
verify_unique(10, dtype, is_dyn, is_sorted, return_counts)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| 38.741188
| 116
| 0.538145
|
d9e5b6c42b14d6f5fe11db606dd90e87d2b6a645
| 663
|
py
|
Python
|
config.py
|
blackshirt/dompetku
|
51b907dc25c927ef42b4522499c1884affd42980
|
[
"BSD-2-Clause"
] | 1
|
2016-05-15T16:58:54.000Z
|
2016-05-15T16:58:54.000Z
|
config.py
|
blackshirt/dompetku
|
51b907dc25c927ef42b4522499c1884affd42980
|
[
"BSD-2-Clause"
] | null | null | null |
config.py
|
blackshirt/dompetku
|
51b907dc25c927ef42b4522499c1884affd42980
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright @2014 blackshirtmuslim@yahoo.com
# Licensed: see Python license
"""Config file on top of root dir"""
import os
_data_path = os.path.join(os.path.dirname(__file__), 'data') # relatif ke main script
_db_file = 'dompetku.sqlite'
_db = os.path.join(_data_path, _db_file)
dbconfig = {
'sqlite': {
'db_path': _data_path,
'db_name': _db_file,
'db': _db,
},
'mysql': {
'host': '127.0.0.1',
'user': '',
'password': '',
'db_name': '',
},
'postgre': {
'host': '127.0.0.1',
'user': '',
'password': '',
'db_name': '',
}
}
| 19.5
| 86
| 0.52187
|
cb69b3d3f0e33f5240fa393199c33b0da6a621b7
| 1,909
|
py
|
Python
|
research/cv/ntsnet/src/config_gpu.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/ntsnet/src/config_gpu.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/ntsnet/src/config_gpu.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train_gpu.py and eval_gpu.py
"""
from easydict import EasyDict as ed
config_ascend = ed({
"save_checkpoint": True,
"save_checkpoint_epochs": 2,
"keep_checkpoint_max": 10,
"learning_rate": 0.001,
"m_for_scrutinizer": 4,
"topK": 6,
"input_size": (448, 448),
"crop_pct_size": (600, 600),
"weight_decay": 1e-4,
"momentum": 0.9,
"num_epochs": 200,
"num_classes": 200,
"num_train_images": 5994,
"num_test_images": 5794,
"batch_size": 8,
"prefix": "ntsnet",
"lossLogName": "loss.log",
"lr_scheduler": "cosine",
"lr_step": [200, 200],
"optimizer": "momentum"
})
config_gpu = ed({
"save_checkpoint": True,
"save_checkpoint_epochs": 2,
"keep_checkpoint_max": 10,
"learning_rate": 0.001,
"m_for_scrutinizer": 4,
"topK": 6,
"input_size": (448, 448),
"crop_pct_size": (600, 600),
"weight_decay": 1e-4,
"momentum": 0.9,
"num_epochs": 200,
"num_classes": 200,
"num_train_images": 5994,
"num_test_images": 5794,
"batch_size": 16,
"prefix": "ntsnet",
"lossLogName": "loss.log",
"lr_scheduler": "cosine",
"lr_step": [60, 100],
"optimizer": "momentum"
})
| 29.369231
| 78
| 0.629125
|
d3dd616cbd48264f6df0f88fefe656b4aff0660c
| 376
|
py
|
Python
|
biblioteca/models.py
|
Camilotk/projeto_pratico_django
|
b76510d79c7a92774dc33de7271e75adae5f3875
|
[
"Apache-2.0"
] | null | null | null |
biblioteca/models.py
|
Camilotk/projeto_pratico_django
|
b76510d79c7a92774dc33de7271e75adae5f3875
|
[
"Apache-2.0"
] | null | null | null |
biblioteca/models.py
|
Camilotk/projeto_pratico_django
|
b76510d79c7a92774dc33de7271e75adae5f3875
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Livro(models.Model):
autor = models.CharField(max_length=50)
editora = models.CharField(max_length=50)
isbn = models.IntegerField()
numeroPaginas = models.IntegerField()
titulo = models.CharField(max_length=50)
anoPublicacao = models.IntegerField()
emailEditora = models.EmailField()
| 28.923077
| 45
| 0.734043
|
e24e3c5dfab691e4eac98f23eebebdd12939aa40
| 5,857
|
py
|
Python
|
saleor/graphql/core/enums.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 3
|
2021-06-22T12:38:18.000Z
|
2021-07-11T15:01:57.000Z
|
saleor/graphql/core/enums.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 111
|
2021-06-30T08:51:06.000Z
|
2022-03-28T04:48:49.000Z
|
saleor/graphql/core/enums.py
|
IslamDEVO/es-saleor-nginx
|
a56a4aaf23fc308aad7b7489bc090fd4fcdb6315
|
[
"CC-BY-4.0"
] | 6
|
2021-11-08T16:43:05.000Z
|
2022-03-22T17:31:16.000Z
|
import graphene
from django.conf import settings
from ...account import error_codes as account_error_codes
from ...app import error_codes as app_error_codes
from ...attribute import error_codes as attribute_error_codes
from ...channel import error_codes as channel_error_codes
from ...checkout import error_codes as checkout_error_codes
from ...core import JobStatus
from ...core import error_codes as core_error_codes
from ...core.permissions import get_permissions_enum_list
from ...core.units import (
AreaUnits,
DistanceUnits,
MeasurementUnits,
VolumeUnits,
WeightUnits,
)
from ...csv import error_codes as csv_error_codes
from ...discount import error_codes as discount_error_codes
from ...giftcard import error_codes as giftcard_error_codes
from ...invoice import error_codes as invoice_error_codes
from ...menu import error_codes as menu_error_codes
from ...order import error_codes as order_error_codes
from ...page import error_codes as page_error_codes
from ...payment import error_codes as payment_error_codes
from ...plugins import error_codes as plugin_error_codes
from ...product import error_codes as product_error_codes
from ...shipping import error_codes as shipping_error_codes
from ...warehouse import error_codes as warehouse_error_codes
from ...webhook import error_codes as webhook_error_codes
from ...wishlist import error_codes as wishlist_error_codes
from ..shop import error_codes as shop_error_codes
from .utils import str_to_enum
class OrderDirection(graphene.Enum):
ASC = ""
DESC = "-"
@property
def description(self):
# Disable all the no-member violations in this function
# pylint: disable=no-member
if self == OrderDirection.ASC:
return "Specifies an ascending sort order."
if self == OrderDirection.DESC:
return "Specifies a descending sort order."
raise ValueError("Unsupported enum value: %s" % self.value)
class ReportingPeriod(graphene.Enum):
TODAY = "TODAY"
THIS_MONTH = "THIS_MONTH"
def to_enum(enum_cls, *, type_name=None, **options) -> graphene.Enum:
"""Create a Graphene enum from a class containing a set of options.
:param enum_cls:
The class to build the enum from.
:param type_name:
The name of the type. Default is the class name + 'Enum'.
:param options:
- description:
Contains the type description (default is the class's docstring)
- deprecation_reason:
Contains the deprecation reason.
The default is enum_cls.__deprecation_reason__ or None.
:return:
"""
# note this won't work until
# https://github.com/graphql-python/graphene/issues/956 is fixed
deprecation_reason = getattr(enum_cls, "__deprecation_reason__", None)
if deprecation_reason:
options.setdefault("deprecation_reason", deprecation_reason)
type_name = type_name or (enum_cls.__name__ + "Enum")
enum_data = [(str_to_enum(code.upper()), code) for code, name in enum_cls.CHOICES]
return graphene.Enum(type_name, enum_data, **options)
LanguageCodeEnum = graphene.Enum(
"LanguageCodeEnum",
[(lang[0].replace("-", "_").upper(), lang[0]) for lang in settings.LANGUAGES],
)
JobStatusEnum = to_enum(JobStatus)
PermissionEnum = graphene.Enum("PermissionEnum", get_permissions_enum_list())
# unit enums
MeasurementUnitsEnum = to_enum(MeasurementUnits)
DistanceUnitsEnum = to_enum(DistanceUnits)
AreaUnitsEnum = to_enum(AreaUnits)
VolumeUnitsEnum = to_enum(VolumeUnits)
WeightUnitsEnum = to_enum(WeightUnits)
unit_enums = [DistanceUnitsEnum, AreaUnitsEnum, VolumeUnitsEnum, WeightUnitsEnum]
AccountErrorCode = graphene.Enum.from_enum(account_error_codes.AccountErrorCode)
AppErrorCode = graphene.Enum.from_enum(app_error_codes.AppErrorCode)
AttributeErrorCode = graphene.Enum.from_enum(attribute_error_codes.AttributeErrorCode)
ChannelErrorCode = graphene.Enum.from_enum(channel_error_codes.ChannelErrorCode)
CheckoutErrorCode = graphene.Enum.from_enum(checkout_error_codes.CheckoutErrorCode)
ExportErrorCode = graphene.Enum.from_enum(csv_error_codes.ExportErrorCode)
DiscountErrorCode = graphene.Enum.from_enum(discount_error_codes.DiscountErrorCode)
PluginErrorCode = graphene.Enum.from_enum(plugin_error_codes.PluginErrorCode)
GiftCardErrorCode = graphene.Enum.from_enum(giftcard_error_codes.GiftCardErrorCode)
MenuErrorCode = graphene.Enum.from_enum(menu_error_codes.MenuErrorCode)
OrderSettingsErrorCode = graphene.Enum.from_enum(
shop_error_codes.OrderSettingsErrorCode
)
MetadataErrorCode = graphene.Enum.from_enum(core_error_codes.MetadataErrorCode)
OrderErrorCode = graphene.Enum.from_enum(order_error_codes.OrderErrorCode)
InvoiceErrorCode = graphene.Enum.from_enum(invoice_error_codes.InvoiceErrorCode)
PageErrorCode = graphene.Enum.from_enum(page_error_codes.PageErrorCode)
PaymentErrorCode = graphene.Enum.from_enum(payment_error_codes.PaymentErrorCode)
PermissionGroupErrorCode = graphene.Enum.from_enum(
account_error_codes.PermissionGroupErrorCode
)
ProductErrorCode = graphene.Enum.from_enum(product_error_codes.ProductErrorCode)
CollectionErrorCode = graphene.Enum.from_enum(product_error_codes.CollectionErrorCode)
ShopErrorCode = graphene.Enum.from_enum(core_error_codes.ShopErrorCode)
ShippingErrorCode = graphene.Enum.from_enum(shipping_error_codes.ShippingErrorCode)
StockErrorCode = graphene.Enum.from_enum(warehouse_error_codes.StockErrorCode)
UploadErrorCode = graphene.Enum.from_enum(core_error_codes.UploadErrorCode)
WarehouseErrorCode = graphene.Enum.from_enum(warehouse_error_codes.WarehouseErrorCode)
WebhookErrorCode = graphene.Enum.from_enum(webhook_error_codes.WebhookErrorCode)
WishlistErrorCode = graphene.Enum.from_enum(wishlist_error_codes.WishlistErrorCode)
TranslationErrorCode = graphene.Enum.from_enum(core_error_codes.TranslationErrorCode)
| 44.371212
| 86
| 0.799385
|
d50e820c6c6bc89a9346382c79f057e179f1da12
| 3,301
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_ones_op.py
|
licanisme/Paddle
|
d11c140e280880b9d031fa38361f3230aef6cf9c
|
[
"Apache-2.0"
] | 3
|
2021-06-11T06:48:10.000Z
|
2021-09-02T10:18:06.000Z
|
python/paddle/fluid/tests/unittests/test_ones_op.py
|
92lqllearning/Paddle
|
d11c140e280880b9d031fa38361f3230aef6cf9c
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_ones_op.py
|
92lqllearning/Paddle
|
d11c140e280880b9d031fa38361f3230aef6cf9c
|
[
"Apache-2.0"
] | 1
|
2020-11-05T08:41:11.000Z
|
2020-11-05T08:41:11.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
import numpy as np
class ApiOnesTest(unittest.TestCase):
def test_paddle_ones(self):
with paddle.program_guard(paddle.Program()):
ones = paddle.ones(shape=[10])
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
def test_fluid_ones(self):
with paddle.program_guard(paddle.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with paddle.program_guard(paddle.Program()):
ones = paddle.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error1)
def test_error2():
with paddle.program_guard(paddle.Program()):
ones = paddle.ones(shape=10)
self.assertRaises(TypeError, test_error2)
def test_error3():
with paddle.program_guard(paddle.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with paddle.program_guard(paddle.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
if __name__ == "__main__":
unittest.main()
| 35.494624
| 74
| 0.645865
|
e99145bd815e63f8da5059fa118d10dfeafec823
| 996
|
py
|
Python
|
taps/ctypes/frechet.py
|
schinavro/taps
|
c03b4e23ed299824c1b062225b837a0b7cfff216
|
[
"MIT"
] | null | null | null |
taps/ctypes/frechet.py
|
schinavro/taps
|
c03b4e23ed299824c1b062225b837a0b7cfff216
|
[
"MIT"
] | null | null | null |
taps/ctypes/frechet.py
|
schinavro/taps
|
c03b4e23ed299824c1b062225b837a0b7cfff216
|
[
"MIT"
] | null | null | null |
# ctypes_test.py
import ctypes
import pathlib
if __name__ == "__main__":
# Load the shared library into ctypes
libname = pathlib.Path().absolute() / "libcfrechet.so"
c_lib = ctypes.CDLL(libname)
dist = c_lib.frechet_distance
dist.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double)]
dist.restype = ctypes.c_double
import numpy as np
D = 1
M = 2
P = 300
r = 1
R = 3
_a = np.zeros((D, M, P))
_b = np.zeros((D, M, P))
for i in range(P):
theta = np.pi / 180 * i / 360 * P
_a[..., i] = r * np.array([np.cos(theta), np.sin(theta)])
_b[..., i] = -R * np.array([np.cos(theta), np.sin(theta)])
a = _a.flatten().ctypes.data_as(ctypes.POINTER(ctypes.c_double))
b = _b.flatten().ctypes.data_as(ctypes.POINTER(ctypes.c_double))
answer = dist(D, M, P, a, b)
print(answer, type(answer), _a.flatten()[30])
# print(f" In Python: int: {x} float {y:.1f} return val {answer:.1f}")
| 26.918919
| 73
| 0.630522
|
fa60f0334b791638eb7c32cbfa2c32f7416a05f7
| 105,175
|
py
|
Python
|
sdk/python/pulumi_azure_native/devices/v20210331/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20210331/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20210331/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ArmIdentityResponse',
'ArmUserIdentityResponse',
'CertificatePropertiesResponse',
'CloudToDevicePropertiesResponse',
'EnrichmentPropertiesResponse',
'EventHubPropertiesResponse',
'FallbackRoutePropertiesResponse',
'FeedbackPropertiesResponse',
'IotHubLocationDescriptionResponse',
'IotHubPropertiesResponse',
'IotHubSkuInfoResponse',
'IpFilterRuleResponse',
'ManagedIdentityResponse',
'MessagingEndpointPropertiesResponse',
'NetworkRuleSetIpRuleResponse',
'NetworkRuleSetPropertiesResponse',
'PrivateEndpointConnectionPropertiesResponse',
'PrivateEndpointConnectionResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'RoutePropertiesResponse',
'RoutingEndpointsResponse',
'RoutingEventHubPropertiesResponse',
'RoutingPropertiesResponse',
'RoutingServiceBusQueueEndpointPropertiesResponse',
'RoutingServiceBusTopicEndpointPropertiesResponse',
'RoutingStorageContainerPropertiesResponse',
'SharedAccessSignatureAuthorizationRuleResponse',
'StorageEndpointPropertiesResponse',
]
@pulumi.output_type
class ArmIdentityResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
elif key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ArmIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ArmIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ArmIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: Optional[str] = None,
user_assigned_identities: Optional[Mapping[str, 'outputs.ArmUserIdentityResponse']] = None):
"""
:param str principal_id: Principal Id
:param str tenant_id: Tenant Id
:param str type: The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the service.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
Principal Id
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
Tenant Id
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the service.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.ArmUserIdentityResponse']]:
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class ArmUserIdentityResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ArmUserIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ArmUserIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ArmUserIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str):
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
return pulumi.get(self, "principal_id")
@pulumi.output_type
class CertificatePropertiesResponse(dict):
"""
The description of an X509 CA Certificate.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "isVerified":
suggest = "is_verified"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CertificatePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CertificatePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CertificatePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created: str,
expiry: str,
is_verified: bool,
subject: str,
thumbprint: str,
updated: str,
certificate: Optional[str] = None):
"""
The description of an X509 CA Certificate.
:param str created: The certificate's create date and time.
:param str expiry: The certificate's expiration date and time.
:param bool is_verified: Determines whether certificate has been verified.
:param str subject: The certificate's subject name.
:param str thumbprint: The certificate's thumbprint.
:param str updated: The certificate's last update date and time.
:param str certificate: The certificate content
"""
pulumi.set(__self__, "created", created)
pulumi.set(__self__, "expiry", expiry)
pulumi.set(__self__, "is_verified", is_verified)
pulumi.set(__self__, "subject", subject)
pulumi.set(__self__, "thumbprint", thumbprint)
pulumi.set(__self__, "updated", updated)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
@property
@pulumi.getter
def created(self) -> str:
"""
The certificate's create date and time.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def expiry(self) -> str:
"""
The certificate's expiration date and time.
"""
return pulumi.get(self, "expiry")
@property
@pulumi.getter(name="isVerified")
def is_verified(self) -> bool:
"""
Determines whether certificate has been verified.
"""
return pulumi.get(self, "is_verified")
@property
@pulumi.getter
def subject(self) -> str:
"""
The certificate's subject name.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter
def thumbprint(self) -> str:
"""
The certificate's thumbprint.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def updated(self) -> str:
"""
The certificate's last update date and time.
"""
return pulumi.get(self, "updated")
@property
@pulumi.getter
def certificate(self) -> Optional[str]:
"""
The certificate content
"""
return pulumi.get(self, "certificate")
@pulumi.output_type
class CloudToDevicePropertiesResponse(dict):
"""
The IoT hub cloud-to-device messaging properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultTtlAsIso8601":
suggest = "default_ttl_as_iso8601"
elif key == "maxDeliveryCount":
suggest = "max_delivery_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CloudToDevicePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CloudToDevicePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CloudToDevicePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_ttl_as_iso8601: Optional[str] = None,
feedback: Optional['outputs.FeedbackPropertiesResponse'] = None,
max_delivery_count: Optional[int] = None):
"""
The IoT hub cloud-to-device messaging properties.
:param str default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param 'FeedbackPropertiesResponse' feedback: The properties of the feedback queue for cloud-to-device messages.
:param int max_delivery_count: The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
if default_ttl_as_iso8601 is not None:
pulumi.set(__self__, "default_ttl_as_iso8601", default_ttl_as_iso8601)
if feedback is not None:
pulumi.set(__self__, "feedback", feedback)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
@property
@pulumi.getter(name="defaultTtlAsIso8601")
def default_ttl_as_iso8601(self) -> Optional[str]:
"""
The default time to live for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "default_ttl_as_iso8601")
@property
@pulumi.getter
def feedback(self) -> Optional['outputs.FeedbackPropertiesResponse']:
"""
The properties of the feedback queue for cloud-to-device messages.
"""
return pulumi.get(self, "feedback")
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[int]:
"""
The max delivery count for cloud-to-device messages in the device queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "max_delivery_count")
@pulumi.output_type
class EnrichmentPropertiesResponse(dict):
"""
The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointNames":
suggest = "endpoint_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EnrichmentPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EnrichmentPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EnrichmentPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_names: Sequence[str],
key: str,
value: str):
"""
The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
:param Sequence[str] endpoint_names: The list of endpoints for which the enrichment is applied to the message.
:param str key: The key or name for the enrichment property.
:param str value: The value for the enrichment property.
"""
pulumi.set(__self__, "endpoint_names", endpoint_names)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="endpointNames")
def endpoint_names(self) -> Sequence[str]:
"""
The list of endpoints for which the enrichment is applied to the message.
"""
return pulumi.get(self, "endpoint_names")
@property
@pulumi.getter
def key(self) -> str:
"""
The key or name for the enrichment property.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the enrichment property.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class EventHubPropertiesResponse(dict):
"""
The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "partitionIds":
suggest = "partition_ids"
elif key == "partitionCount":
suggest = "partition_count"
elif key == "retentionTimeInDays":
suggest = "retention_time_in_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EventHubPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EventHubPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EventHubPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint: str,
partition_ids: Sequence[str],
path: str,
partition_count: Optional[int] = None,
retention_time_in_days: Optional[float] = None):
"""
The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
:param str endpoint: The Event Hub-compatible endpoint.
:param Sequence[str] partition_ids: The partition ids in the Event Hub-compatible endpoint.
:param str path: The Event Hub-compatible name.
:param int partition_count: The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:param float retention_time_in_days: The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "partition_ids", partition_ids)
pulumi.set(__self__, "path", path)
if partition_count is not None:
pulumi.set(__self__, "partition_count", partition_count)
if retention_time_in_days is not None:
pulumi.set(__self__, "retention_time_in_days", retention_time_in_days)
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The Event Hub-compatible endpoint.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> Sequence[str]:
"""
The partition ids in the Event Hub-compatible endpoint.
"""
return pulumi.get(self, "partition_ids")
@property
@pulumi.getter
def path(self) -> str:
"""
The Event Hub-compatible name.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> Optional[int]:
"""
The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="retentionTimeInDays")
def retention_time_in_days(self) -> Optional[float]:
"""
The retention time for device-to-cloud messages in days. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages
"""
return pulumi.get(self, "retention_time_in_days")
@pulumi.output_type
class FallbackRoutePropertiesResponse(dict):
"""
The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointNames":
suggest = "endpoint_names"
elif key == "isEnabled":
suggest = "is_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FallbackRoutePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FallbackRoutePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FallbackRoutePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_names: Sequence[str],
is_enabled: bool,
source: str,
condition: Optional[str] = None,
name: Optional[str] = None):
"""
The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
:param Sequence[str] endpoint_names: The list of endpoints to which the messages that satisfy the condition are routed to. Currently only 1 endpoint is allowed.
:param bool is_enabled: Used to specify whether the fallback route is enabled.
:param str source: The source to which the routing rule is to be applied to. For example, DeviceMessages
:param str condition: The condition which is evaluated in order to apply the fallback route. If the condition is not provided it will evaluate to true by default. For grammar, See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language
:param str name: The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
"""
pulumi.set(__self__, "endpoint_names", endpoint_names)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "source", source)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="endpointNames")
def endpoint_names(self) -> Sequence[str]:
"""
The list of endpoints to which the messages that satisfy the condition are routed to. Currently only 1 endpoint is allowed.
"""
return pulumi.get(self, "endpoint_names")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Used to specify whether the fallback route is enabled.
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def source(self) -> str:
"""
The source to which the routing rule is to be applied to. For example, DeviceMessages
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def condition(self) -> Optional[str]:
"""
The condition which is evaluated in order to apply the fallback route. If the condition is not provided it will evaluate to true by default. For grammar, See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class FeedbackPropertiesResponse(dict):
"""
The properties of the feedback queue for cloud-to-device messages.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockDurationAsIso8601":
suggest = "lock_duration_as_iso8601"
elif key == "maxDeliveryCount":
suggest = "max_delivery_count"
elif key == "ttlAsIso8601":
suggest = "ttl_as_iso8601"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FeedbackPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FeedbackPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FeedbackPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[str] = None,
max_delivery_count: Optional[int] = None,
ttl_as_iso8601: Optional[str] = None):
"""
The properties of the feedback queue for cloud-to-device messages.
:param str lock_duration_as_iso8601: The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param int max_delivery_count: The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:param str ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[str]:
"""
The lock duration for the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "lock_duration_as_iso8601")
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[int]:
"""
The number of times the IoT hub attempts to deliver a message on the feedback queue. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "max_delivery_count")
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[str]:
"""
The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
"""
return pulumi.get(self, "ttl_as_iso8601")
@pulumi.output_type
class IotHubLocationDescriptionResponse(dict):
"""
Public representation of one of the locations where a resource is provisioned.
"""
def __init__(__self__, *,
location: Optional[str] = None,
role: Optional[str] = None):
"""
Public representation of one of the locations where a resource is provisioned.
:param str location: The name of the Azure region
:param str role: The role of the region, can be either primary or secondary. The primary region is where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery (DR) paired region and also the region where the IoT hub can failover to.
"""
if location is not None:
pulumi.set(__self__, "location", location)
if role is not None:
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The name of the Azure region
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def role(self) -> Optional[str]:
"""
The role of the region, can be either primary or secondary. The primary region is where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery (DR) paired region and also the region where the IoT hub can failover to.
"""
return pulumi.get(self, "role")
@pulumi.output_type
class IotHubPropertiesResponse(dict):
"""
The properties of an IoT hub.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hostName":
suggest = "host_name"
elif key == "provisioningState":
suggest = "provisioning_state"
elif key == "authorizationPolicies":
suggest = "authorization_policies"
elif key == "cloudToDevice":
suggest = "cloud_to_device"
elif key == "enableFileUploadNotifications":
suggest = "enable_file_upload_notifications"
elif key == "eventHubEndpoints":
suggest = "event_hub_endpoints"
elif key == "ipFilterRules":
suggest = "ip_filter_rules"
elif key == "messagingEndpoints":
suggest = "messaging_endpoints"
elif key == "minTlsVersion":
suggest = "min_tls_version"
elif key == "networkRuleSets":
suggest = "network_rule_sets"
elif key == "privateEndpointConnections":
suggest = "private_endpoint_connections"
elif key == "publicNetworkAccess":
suggest = "public_network_access"
elif key == "storageEndpoints":
suggest = "storage_endpoints"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IotHubPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IotHubPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IotHubPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
host_name: str,
locations: Sequence['outputs.IotHubLocationDescriptionResponse'],
provisioning_state: str,
state: str,
authorization_policies: Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']] = None,
cloud_to_device: Optional['outputs.CloudToDevicePropertiesResponse'] = None,
comments: Optional[str] = None,
enable_file_upload_notifications: Optional[bool] = None,
event_hub_endpoints: Optional[Mapping[str, 'outputs.EventHubPropertiesResponse']] = None,
features: Optional[str] = None,
ip_filter_rules: Optional[Sequence['outputs.IpFilterRuleResponse']] = None,
messaging_endpoints: Optional[Mapping[str, 'outputs.MessagingEndpointPropertiesResponse']] = None,
min_tls_version: Optional[str] = None,
network_rule_sets: Optional['outputs.NetworkRuleSetPropertiesResponse'] = None,
private_endpoint_connections: Optional[Sequence['outputs.PrivateEndpointConnectionResponse']] = None,
public_network_access: Optional[str] = None,
routing: Optional['outputs.RoutingPropertiesResponse'] = None,
storage_endpoints: Optional[Mapping[str, 'outputs.StorageEndpointPropertiesResponse']] = None):
"""
The properties of an IoT hub.
:param str host_name: The name of the host.
:param Sequence['IotHubLocationDescriptionResponse'] locations: Primary and secondary location for iot hub
:param str provisioning_state: The provisioning state.
:param str state: The hub state.
:param Sequence['SharedAccessSignatureAuthorizationRuleResponse'] authorization_policies: The shared access policies you can use to secure a connection to the IoT hub.
:param 'CloudToDevicePropertiesResponse' cloud_to_device: The IoT hub cloud-to-device messaging properties.
:param str comments: IoT hub comments.
:param bool enable_file_upload_notifications: If True, file upload notifications are enabled.
:param Mapping[str, 'EventHubPropertiesResponse'] event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub.
:param str features: The capabilities and features enabled for the IoT hub.
:param Sequence['IpFilterRuleResponse'] ip_filter_rules: The IP filter rules.
:param Mapping[str, 'MessagingEndpointPropertiesResponse'] messaging_endpoints: The messaging endpoint properties for the file upload notification queue.
:param str min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
:param 'NetworkRuleSetPropertiesResponse' network_rule_sets: Network Rule Set Properties of IotHub
:param Sequence['PrivateEndpointConnectionResponse'] private_endpoint_connections: Private endpoint connections created on this IotHub
:param str public_network_access: Whether requests from Public Network are allowed
:param 'RoutingPropertiesResponse' routing: The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging
:param Mapping[str, 'StorageEndpointPropertiesResponse'] storage_endpoints: The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown.
"""
pulumi.set(__self__, "host_name", host_name)
pulumi.set(__self__, "locations", locations)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "state", state)
if authorization_policies is not None:
pulumi.set(__self__, "authorization_policies", authorization_policies)
if cloud_to_device is not None:
pulumi.set(__self__, "cloud_to_device", cloud_to_device)
if comments is not None:
pulumi.set(__self__, "comments", comments)
if enable_file_upload_notifications is not None:
pulumi.set(__self__, "enable_file_upload_notifications", enable_file_upload_notifications)
if event_hub_endpoints is not None:
pulumi.set(__self__, "event_hub_endpoints", event_hub_endpoints)
if features is not None:
pulumi.set(__self__, "features", features)
if ip_filter_rules is not None:
pulumi.set(__self__, "ip_filter_rules", ip_filter_rules)
if messaging_endpoints is not None:
pulumi.set(__self__, "messaging_endpoints", messaging_endpoints)
if min_tls_version is not None:
pulumi.set(__self__, "min_tls_version", min_tls_version)
if network_rule_sets is not None:
pulumi.set(__self__, "network_rule_sets", network_rule_sets)
if private_endpoint_connections is not None:
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if routing is not None:
pulumi.set(__self__, "routing", routing)
if storage_endpoints is not None:
pulumi.set(__self__, "storage_endpoints", storage_endpoints)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
The name of the host.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def locations(self) -> Sequence['outputs.IotHubLocationDescriptionResponse']:
"""
Primary and secondary location for iot hub
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def state(self) -> str:
"""
The hub state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="authorizationPolicies")
def authorization_policies(self) -> Optional[Sequence['outputs.SharedAccessSignatureAuthorizationRuleResponse']]:
"""
The shared access policies you can use to secure a connection to the IoT hub.
"""
return pulumi.get(self, "authorization_policies")
@property
@pulumi.getter(name="cloudToDevice")
def cloud_to_device(self) -> Optional['outputs.CloudToDevicePropertiesResponse']:
"""
The IoT hub cloud-to-device messaging properties.
"""
return pulumi.get(self, "cloud_to_device")
@property
@pulumi.getter
def comments(self) -> Optional[str]:
"""
IoT hub comments.
"""
return pulumi.get(self, "comments")
@property
@pulumi.getter(name="enableFileUploadNotifications")
def enable_file_upload_notifications(self) -> Optional[bool]:
"""
If True, file upload notifications are enabled.
"""
return pulumi.get(self, "enable_file_upload_notifications")
@property
@pulumi.getter(name="eventHubEndpoints")
def event_hub_endpoints(self) -> Optional[Mapping[str, 'outputs.EventHubPropertiesResponse']]:
"""
The Event Hub-compatible endpoint properties. The only possible keys to this dictionary is events. This key has to be present in the dictionary while making create or update calls for the IoT hub.
"""
return pulumi.get(self, "event_hub_endpoints")
@property
@pulumi.getter
def features(self) -> Optional[str]:
"""
The capabilities and features enabled for the IoT hub.
"""
return pulumi.get(self, "features")
@property
@pulumi.getter(name="ipFilterRules")
def ip_filter_rules(self) -> Optional[Sequence['outputs.IpFilterRuleResponse']]:
"""
The IP filter rules.
"""
return pulumi.get(self, "ip_filter_rules")
@property
@pulumi.getter(name="messagingEndpoints")
def messaging_endpoints(self) -> Optional[Mapping[str, 'outputs.MessagingEndpointPropertiesResponse']]:
"""
The messaging endpoint properties for the file upload notification queue.
"""
return pulumi.get(self, "messaging_endpoints")
@property
@pulumi.getter(name="minTlsVersion")
def min_tls_version(self) -> Optional[str]:
"""
Specifies the minimum TLS version to support for this hub. Can be set to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
"""
return pulumi.get(self, "min_tls_version")
@property
@pulumi.getter(name="networkRuleSets")
def network_rule_sets(self) -> Optional['outputs.NetworkRuleSetPropertiesResponse']:
"""
Network Rule Set Properties of IotHub
"""
return pulumi.get(self, "network_rule_sets")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Optional[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
Private endpoint connections created on this IotHub
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Whether requests from Public Network are allowed
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def routing(self) -> Optional['outputs.RoutingPropertiesResponse']:
"""
The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging
"""
return pulumi.get(self, "routing")
@property
@pulumi.getter(name="storageEndpoints")
def storage_endpoints(self) -> Optional[Mapping[str, 'outputs.StorageEndpointPropertiesResponse']]:
"""
The list of Azure Storage endpoints where you can upload files. Currently you can configure only one Azure Storage account and that MUST have its key as $default. Specifying more than one storage account causes an error to be thrown. Not specifying a value for this property when the enableFileUploadNotifications property is set to True, causes an error to be thrown.
"""
return pulumi.get(self, "storage_endpoints")
@pulumi.output_type
class IotHubSkuInfoResponse(dict):
"""
Information about the SKU of the IoT hub.
"""
def __init__(__self__, *,
name: str,
tier: str,
capacity: Optional[float] = None):
"""
Information about the SKU of the IoT hub.
:param str name: The name of the SKU.
:param str tier: The billing tier for the IoT hub.
:param float capacity: The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "tier", tier)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> str:
"""
The billing tier for the IoT hub.
"""
return pulumi.get(self, "tier")
@property
@pulumi.getter
def capacity(self) -> Optional[float]:
"""
The number of provisioned IoT Hub units. See: https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
"""
return pulumi.get(self, "capacity")
@pulumi.output_type
class IpFilterRuleResponse(dict):
"""
The IP filter rules for the IoT hub.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "filterName":
suggest = "filter_name"
elif key == "ipMask":
suggest = "ip_mask"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IpFilterRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IpFilterRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IpFilterRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action: str,
filter_name: str,
ip_mask: str):
"""
The IP filter rules for the IoT hub.
:param str action: The desired action for requests captured by this rule.
:param str filter_name: The name of the IP filter rule.
:param str ip_mask: A string that contains the IP address range in CIDR notation for the rule.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "filter_name", filter_name)
pulumi.set(__self__, "ip_mask", ip_mask)
@property
@pulumi.getter
def action(self) -> str:
"""
The desired action for requests captured by this rule.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="filterName")
def filter_name(self) -> str:
"""
The name of the IP filter rule.
"""
return pulumi.get(self, "filter_name")
@property
@pulumi.getter(name="ipMask")
def ip_mask(self) -> str:
"""
A string that contains the IP address range in CIDR notation for the rule.
"""
return pulumi.get(self, "ip_mask")
@pulumi.output_type
class ManagedIdentityResponse(dict):
"""
The properties of the Managed identity.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAssignedIdentity":
suggest = "user_assigned_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ManagedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ManagedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ManagedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_assigned_identity: Optional[str] = None):
"""
The properties of the Managed identity.
:param str user_assigned_identity: The user assigned identity.
"""
if user_assigned_identity is not None:
pulumi.set(__self__, "user_assigned_identity", user_assigned_identity)
@property
@pulumi.getter(name="userAssignedIdentity")
def user_assigned_identity(self) -> Optional[str]:
"""
The user assigned identity.
"""
return pulumi.get(self, "user_assigned_identity")
@pulumi.output_type
class MessagingEndpointPropertiesResponse(dict):
"""
The properties of the messaging endpoints used by this IoT hub.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockDurationAsIso8601":
suggest = "lock_duration_as_iso8601"
elif key == "maxDeliveryCount":
suggest = "max_delivery_count"
elif key == "ttlAsIso8601":
suggest = "ttl_as_iso8601"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MessagingEndpointPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MessagingEndpointPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MessagingEndpointPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
lock_duration_as_iso8601: Optional[str] = None,
max_delivery_count: Optional[int] = None,
ttl_as_iso8601: Optional[str] = None):
"""
The properties of the messaging endpoints used by this IoT hub.
:param str lock_duration_as_iso8601: The lock duration. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:param int max_delivery_count: The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:param str ttl_as_iso8601: The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
"""
if lock_duration_as_iso8601 is not None:
pulumi.set(__self__, "lock_duration_as_iso8601", lock_duration_as_iso8601)
if max_delivery_count is not None:
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if ttl_as_iso8601 is not None:
pulumi.set(__self__, "ttl_as_iso8601", ttl_as_iso8601)
@property
@pulumi.getter(name="lockDurationAsIso8601")
def lock_duration_as_iso8601(self) -> Optional[str]:
"""
The lock duration. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "lock_duration_as_iso8601")
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[int]:
"""
The number of times the IoT hub attempts to deliver a message. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "max_delivery_count")
@property
@pulumi.getter(name="ttlAsIso8601")
def ttl_as_iso8601(self) -> Optional[str]:
"""
The period of time for which a message is available to consume before it is expired by the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
"""
return pulumi.get(self, "ttl_as_iso8601")
@pulumi.output_type
class NetworkRuleSetIpRuleResponse(dict):
"""
IP Rule to be applied as part of Network Rule Set
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "filterName":
suggest = "filter_name"
elif key == "ipMask":
suggest = "ip_mask"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkRuleSetIpRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkRuleSetIpRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkRuleSetIpRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
filter_name: str,
ip_mask: str,
action: Optional[str] = None):
"""
IP Rule to be applied as part of Network Rule Set
:param str filter_name: Name of the IP filter rule.
:param str ip_mask: A string that contains the IP address range in CIDR notation for the rule.
:param str action: IP Filter Action
"""
pulumi.set(__self__, "filter_name", filter_name)
pulumi.set(__self__, "ip_mask", ip_mask)
if action is None:
action = 'Allow'
if action is not None:
pulumi.set(__self__, "action", action)
@property
@pulumi.getter(name="filterName")
def filter_name(self) -> str:
"""
Name of the IP filter rule.
"""
return pulumi.get(self, "filter_name")
@property
@pulumi.getter(name="ipMask")
def ip_mask(self) -> str:
"""
A string that contains the IP address range in CIDR notation for the rule.
"""
return pulumi.get(self, "ip_mask")
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
IP Filter Action
"""
return pulumi.get(self, "action")
@pulumi.output_type
class NetworkRuleSetPropertiesResponse(dict):
"""
Network Rule Set Properties of IotHub
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "applyToBuiltInEventHubEndpoint":
suggest = "apply_to_built_in_event_hub_endpoint"
elif key == "ipRules":
suggest = "ip_rules"
elif key == "defaultAction":
suggest = "default_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkRuleSetPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkRuleSetPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkRuleSetPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
apply_to_built_in_event_hub_endpoint: bool,
ip_rules: Sequence['outputs.NetworkRuleSetIpRuleResponse'],
default_action: Optional[str] = None):
"""
Network Rule Set Properties of IotHub
:param bool apply_to_built_in_event_hub_endpoint: If True, then Network Rule Set is also applied to BuiltIn EventHub EndPoint of IotHub
:param Sequence['NetworkRuleSetIpRuleResponse'] ip_rules: List of IP Rules
:param str default_action: Default Action for Network Rule Set
"""
pulumi.set(__self__, "apply_to_built_in_event_hub_endpoint", apply_to_built_in_event_hub_endpoint)
pulumi.set(__self__, "ip_rules", ip_rules)
if default_action is None:
default_action = 'Deny'
if default_action is not None:
pulumi.set(__self__, "default_action", default_action)
@property
@pulumi.getter(name="applyToBuiltInEventHubEndpoint")
def apply_to_built_in_event_hub_endpoint(self) -> bool:
"""
If True, then Network Rule Set is also applied to BuiltIn EventHub EndPoint of IotHub
"""
return pulumi.get(self, "apply_to_built_in_event_hub_endpoint")
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> Sequence['outputs.NetworkRuleSetIpRuleResponse']:
"""
List of IP Rules
"""
return pulumi.get(self, "ip_rules")
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> Optional[str]:
"""
Default Action for Network Rule Set
"""
return pulumi.get(self, "default_action")
@pulumi.output_type
class PrivateEndpointConnectionPropertiesResponse(dict):
"""
The properties of a private endpoint connection
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateLinkServiceConnectionState":
suggest = "private_link_service_connection_state"
elif key == "privateEndpoint":
suggest = "private_endpoint"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateEndpointConnectionPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateEndpointConnectionPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateEndpointConnectionPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_link_service_connection_state: 'outputs.PrivateLinkServiceConnectionStateResponse',
private_endpoint: Optional['outputs.PrivateEndpointResponse'] = None):
"""
The properties of a private endpoint connection
:param 'PrivateLinkServiceConnectionStateResponse' private_link_service_connection_state: The current state of a private endpoint connection
:param 'PrivateEndpointResponse' private_endpoint: The private endpoint property of a private endpoint connection
"""
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
The current state of a private endpoint connection
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The private endpoint property of a private endpoint connection
"""
return pulumi.get(self, "private_endpoint")
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
The private endpoint connection of an IotHub
"""
def __init__(__self__, *,
id: str,
name: str,
properties: 'outputs.PrivateEndpointConnectionPropertiesResponse',
type: str):
"""
The private endpoint connection of an IotHub
:param str id: The resource identifier.
:param str name: The resource name.
:param 'PrivateEndpointConnectionPropertiesResponse' properties: The properties of a private endpoint connection
:param str type: The resource type.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.PrivateEndpointConnectionPropertiesResponse':
"""
The properties of a private endpoint connection
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The private endpoint property of a private endpoint connection
"""
def __init__(__self__, *,
id: str):
"""
The private endpoint property of a private endpoint connection
:param str id: The resource identifier.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
The current state of a private endpoint connection
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionsRequired":
suggest = "actions_required"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateLinkServiceConnectionStateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateLinkServiceConnectionStateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
description: str,
status: str,
actions_required: Optional[str] = None):
"""
The current state of a private endpoint connection
:param str description: The description for the current state of a private endpoint connection
:param str status: The status of a private endpoint connection
:param str actions_required: Actions required for a private endpoint connection
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "status", status)
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
@property
@pulumi.getter
def description(self) -> str:
"""
The description for the current state of a private endpoint connection
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of a private endpoint connection
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[str]:
"""
Actions required for a private endpoint connection
"""
return pulumi.get(self, "actions_required")
@pulumi.output_type
class RoutePropertiesResponse(dict):
"""
The properties of a routing rule that your IoT hub uses to route messages to endpoints.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointNames":
suggest = "endpoint_names"
elif key == "isEnabled":
suggest = "is_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_names: Sequence[str],
is_enabled: bool,
name: str,
source: str,
condition: Optional[str] = None):
"""
The properties of a routing rule that your IoT hub uses to route messages to endpoints.
:param Sequence[str] endpoint_names: The list of endpoints to which messages that satisfy the condition are routed. Currently only one endpoint is allowed.
:param bool is_enabled: Used to specify whether a route is enabled.
:param str name: The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:param str source: The source that the routing rule is to be applied to, such as DeviceMessages.
:param str condition: The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language
"""
pulumi.set(__self__, "endpoint_names", endpoint_names)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "source", source)
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter(name="endpointNames")
def endpoint_names(self) -> Sequence[str]:
"""
The list of endpoints to which messages that satisfy the condition are routed. Currently only one endpoint is allowed.
"""
return pulumi.get(self, "endpoint_names")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Used to specify whether a route is enabled.
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the route. The name can only include alphanumeric characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def source(self) -> str:
"""
The source that the routing rule is to be applied to, such as DeviceMessages.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def condition(self) -> Optional[str]:
"""
The condition that is evaluated to apply the routing rule. If no condition is provided, it evaluates to true by default. For grammar, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language
"""
return pulumi.get(self, "condition")
@pulumi.output_type
class RoutingEndpointsResponse(dict):
"""
The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventHubs":
suggest = "event_hubs"
elif key == "serviceBusQueues":
suggest = "service_bus_queues"
elif key == "serviceBusTopics":
suggest = "service_bus_topics"
elif key == "storageContainers":
suggest = "storage_containers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingEndpointsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingEndpointsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingEndpointsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_hubs: Optional[Sequence['outputs.RoutingEventHubPropertiesResponse']] = None,
service_bus_queues: Optional[Sequence['outputs.RoutingServiceBusQueueEndpointPropertiesResponse']] = None,
service_bus_topics: Optional[Sequence['outputs.RoutingServiceBusTopicEndpointPropertiesResponse']] = None,
storage_containers: Optional[Sequence['outputs.RoutingStorageContainerPropertiesResponse']] = None):
"""
The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:param Sequence['RoutingEventHubPropertiesResponse'] event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on the routing rules. This list does not include the built-in Event Hubs endpoint.
:param Sequence['RoutingServiceBusQueueEndpointPropertiesResponse'] service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the messages to, based on the routing rules.
:param Sequence['RoutingServiceBusTopicEndpointPropertiesResponse'] service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the messages to, based on the routing rules.
:param Sequence['RoutingStorageContainerPropertiesResponse'] storage_containers: The list of storage container endpoints that IoT hub routes messages to, based on the routing rules.
"""
if event_hubs is not None:
pulumi.set(__self__, "event_hubs", event_hubs)
if service_bus_queues is not None:
pulumi.set(__self__, "service_bus_queues", service_bus_queues)
if service_bus_topics is not None:
pulumi.set(__self__, "service_bus_topics", service_bus_topics)
if storage_containers is not None:
pulumi.set(__self__, "storage_containers", storage_containers)
@property
@pulumi.getter(name="eventHubs")
def event_hubs(self) -> Optional[Sequence['outputs.RoutingEventHubPropertiesResponse']]:
"""
The list of Event Hubs endpoints that IoT hub routes messages to, based on the routing rules. This list does not include the built-in Event Hubs endpoint.
"""
return pulumi.get(self, "event_hubs")
@property
@pulumi.getter(name="serviceBusQueues")
def service_bus_queues(self) -> Optional[Sequence['outputs.RoutingServiceBusQueueEndpointPropertiesResponse']]:
"""
The list of Service Bus queue endpoints that IoT hub routes the messages to, based on the routing rules.
"""
return pulumi.get(self, "service_bus_queues")
@property
@pulumi.getter(name="serviceBusTopics")
def service_bus_topics(self) -> Optional[Sequence['outputs.RoutingServiceBusTopicEndpointPropertiesResponse']]:
"""
The list of Service Bus topic endpoints that the IoT hub routes the messages to, based on the routing rules.
"""
return pulumi.get(self, "service_bus_topics")
@property
@pulumi.getter(name="storageContainers")
def storage_containers(self) -> Optional[Sequence['outputs.RoutingStorageContainerPropertiesResponse']]:
"""
The list of storage container endpoints that IoT hub routes messages to, based on the routing rules.
"""
return pulumi.get(self, "storage_containers")
@pulumi.output_type
class RoutingEventHubPropertiesResponse(dict):
"""
The properties related to an event hub endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticationType":
suggest = "authentication_type"
elif key == "connectionString":
suggest = "connection_string"
elif key == "endpointUri":
suggest = "endpoint_uri"
elif key == "entityPath":
suggest = "entity_path"
elif key == "resourceGroup":
suggest = "resource_group"
elif key == "subscriptionId":
suggest = "subscription_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingEventHubPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingEventHubPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingEventHubPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
authentication_type: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
id: Optional[str] = None,
identity: Optional['outputs.ManagedIdentityResponse'] = None,
resource_group: Optional[str] = None,
subscription_id: Optional[str] = None):
"""
The properties related to an event hub endpoint.
:param str name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types.
:param str authentication_type: Method used to authenticate against the event hub endpoint
:param str connection_string: The connection string of the event hub endpoint.
:param str endpoint_uri: The url of the event hub endpoint. It must include the protocol sb://
:param str entity_path: Event hub name on the event hub namespace
:param str id: Id of the event hub endpoint
:param 'ManagedIdentityResponse' identity: Managed identity properties of routing event hub endpoint.
:param str resource_group: The name of the resource group of the event hub endpoint.
:param str subscription_id: The subscription identifier of the event hub endpoint.
"""
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if entity_path is not None:
pulumi.set(__self__, "entity_path", entity_path)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def name(self) -> str:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[str]:
"""
Method used to authenticate against the event hub endpoint
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The connection string of the event hub endpoint.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[str]:
"""
The url of the event hub endpoint. It must include the protocol sb://
"""
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="entityPath")
def entity_path(self) -> Optional[str]:
"""
Event hub name on the event hub namespace
"""
return pulumi.get(self, "entity_path")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Id of the event hub endpoint
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
Managed identity properties of routing event hub endpoint.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
The name of the resource group of the event hub endpoint.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The subscription identifier of the event hub endpoint.
"""
return pulumi.get(self, "subscription_id")
@pulumi.output_type
class RoutingPropertiesResponse(dict):
"""
The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fallbackRoute":
suggest = "fallback_route"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoints: Optional['outputs.RoutingEndpointsResponse'] = None,
enrichments: Optional[Sequence['outputs.EnrichmentPropertiesResponse']] = None,
fallback_route: Optional['outputs.FallbackRoutePropertiesResponse'] = None,
routes: Optional[Sequence['outputs.RoutePropertiesResponse']] = None):
"""
The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging
:param 'RoutingEndpointsResponse' endpoints: The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:param Sequence['EnrichmentPropertiesResponse'] enrichments: The list of user-provided enrichments that the IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid
:param 'FallbackRoutePropertiesResponse' fallback_route: The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint.
:param Sequence['RoutePropertiesResponse'] routes: The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs.
"""
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if enrichments is not None:
pulumi.set(__self__, "enrichments", enrichments)
if fallback_route is not None:
pulumi.set(__self__, "fallback_route", fallback_route)
if routes is not None:
pulumi.set(__self__, "routes", routes)
@property
@pulumi.getter
def endpoints(self) -> Optional['outputs.RoutingEndpointsResponse']:
"""
The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def enrichments(self) -> Optional[Sequence['outputs.EnrichmentPropertiesResponse']]:
"""
The list of user-provided enrichments that the IoT hub applies to messages to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid
"""
return pulumi.get(self, "enrichments")
@property
@pulumi.getter(name="fallbackRoute")
def fallback_route(self) -> Optional['outputs.FallbackRoutePropertiesResponse']:
"""
The properties of the route that is used as a fall-back route when none of the conditions specified in the 'routes' section are met. This is an optional parameter. When this property is not set, the messages which do not meet any of the conditions specified in the 'routes' section get routed to the built-in eventhub endpoint.
"""
return pulumi.get(self, "fallback_route")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.RoutePropertiesResponse']]:
"""
The list of user-provided routing rules that the IoT hub uses to route messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and a maximum of 5 routing rules are allowed for free hubs.
"""
return pulumi.get(self, "routes")
@pulumi.output_type
class RoutingServiceBusQueueEndpointPropertiesResponse(dict):
"""
The properties related to service bus queue endpoint types.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticationType":
suggest = "authentication_type"
elif key == "connectionString":
suggest = "connection_string"
elif key == "endpointUri":
suggest = "endpoint_uri"
elif key == "entityPath":
suggest = "entity_path"
elif key == "resourceGroup":
suggest = "resource_group"
elif key == "subscriptionId":
suggest = "subscription_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingServiceBusQueueEndpointPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingServiceBusQueueEndpointPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingServiceBusQueueEndpointPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
authentication_type: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
id: Optional[str] = None,
identity: Optional['outputs.ManagedIdentityResponse'] = None,
resource_group: Optional[str] = None,
subscription_id: Optional[str] = None):
"""
The properties related to service bus queue endpoint types.
:param str name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name.
:param str authentication_type: Method used to authenticate against the service bus queue endpoint
:param str connection_string: The connection string of the service bus queue endpoint.
:param str endpoint_uri: The url of the service bus queue endpoint. It must include the protocol sb://
:param str entity_path: Queue name on the service bus namespace
:param str id: Id of the service bus queue endpoint
:param 'ManagedIdentityResponse' identity: Managed identity properties of routing service bus queue endpoint.
:param str resource_group: The name of the resource group of the service bus queue endpoint.
:param str subscription_id: The subscription identifier of the service bus queue endpoint.
"""
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if entity_path is not None:
pulumi.set(__self__, "entity_path", entity_path)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def name(self) -> str:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual queue name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[str]:
"""
Method used to authenticate against the service bus queue endpoint
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The connection string of the service bus queue endpoint.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[str]:
"""
The url of the service bus queue endpoint. It must include the protocol sb://
"""
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="entityPath")
def entity_path(self) -> Optional[str]:
"""
Queue name on the service bus namespace
"""
return pulumi.get(self, "entity_path")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Id of the service bus queue endpoint
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
Managed identity properties of routing service bus queue endpoint.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
The name of the resource group of the service bus queue endpoint.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The subscription identifier of the service bus queue endpoint.
"""
return pulumi.get(self, "subscription_id")
@pulumi.output_type
class RoutingServiceBusTopicEndpointPropertiesResponse(dict):
"""
The properties related to service bus topic endpoint types.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "authenticationType":
suggest = "authentication_type"
elif key == "connectionString":
suggest = "connection_string"
elif key == "endpointUri":
suggest = "endpoint_uri"
elif key == "entityPath":
suggest = "entity_path"
elif key == "resourceGroup":
suggest = "resource_group"
elif key == "subscriptionId":
suggest = "subscription_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingServiceBusTopicEndpointPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingServiceBusTopicEndpointPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingServiceBusTopicEndpointPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
authentication_type: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
id: Optional[str] = None,
identity: Optional['outputs.ManagedIdentityResponse'] = None,
resource_group: Optional[str] = None,
subscription_id: Optional[str] = None):
"""
The properties related to service bus topic endpoint types.
:param str name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual topic name.
:param str authentication_type: Method used to authenticate against the service bus topic endpoint
:param str connection_string: The connection string of the service bus topic endpoint.
:param str endpoint_uri: The url of the service bus topic endpoint. It must include the protocol sb://
:param str entity_path: Queue name on the service bus topic
:param str id: Id of the service bus topic endpoint
:param 'ManagedIdentityResponse' identity: Managed identity properties of routing service bus topic endpoint.
:param str resource_group: The name of the resource group of the service bus topic endpoint.
:param str subscription_id: The subscription identifier of the service bus topic endpoint.
"""
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if entity_path is not None:
pulumi.set(__self__, "entity_path", entity_path)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def name(self) -> str:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types. The name need not be the same as the actual topic name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[str]:
"""
Method used to authenticate against the service bus topic endpoint
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The connection string of the service bus topic endpoint.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[str]:
"""
The url of the service bus topic endpoint. It must include the protocol sb://
"""
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="entityPath")
def entity_path(self) -> Optional[str]:
"""
Queue name on the service bus topic
"""
return pulumi.get(self, "entity_path")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Id of the service bus topic endpoint
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
Managed identity properties of routing service bus topic endpoint.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
The name of the resource group of the service bus topic endpoint.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The subscription identifier of the service bus topic endpoint.
"""
return pulumi.get(self, "subscription_id")
@pulumi.output_type
class RoutingStorageContainerPropertiesResponse(dict):
"""
The properties related to a storage container endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerName":
suggest = "container_name"
elif key == "authenticationType":
suggest = "authentication_type"
elif key == "batchFrequencyInSeconds":
suggest = "batch_frequency_in_seconds"
elif key == "connectionString":
suggest = "connection_string"
elif key == "endpointUri":
suggest = "endpoint_uri"
elif key == "fileNameFormat":
suggest = "file_name_format"
elif key == "maxChunkSizeInBytes":
suggest = "max_chunk_size_in_bytes"
elif key == "resourceGroup":
suggest = "resource_group"
elif key == "subscriptionId":
suggest = "subscription_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RoutingStorageContainerPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RoutingStorageContainerPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RoutingStorageContainerPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_name: str,
name: str,
authentication_type: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
connection_string: Optional[str] = None,
encoding: Optional[str] = None,
endpoint_uri: Optional[str] = None,
file_name_format: Optional[str] = None,
id: Optional[str] = None,
identity: Optional['outputs.ManagedIdentityResponse'] = None,
max_chunk_size_in_bytes: Optional[int] = None,
resource_group: Optional[str] = None,
subscription_id: Optional[str] = None):
"""
The properties related to a storage container endpoint.
:param str container_name: The name of storage container in the storage account.
:param str name: The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types.
:param str authentication_type: Method used to authenticate against the storage endpoint
:param int batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
:param str connection_string: The connection string of the storage account.
:param str encoding: Encoding that is used to serialize messages to blobs. Supported values are 'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'.
:param str endpoint_uri: The url of the storage endpoint. It must include the protocol https://
:param str file_name_format: File name format for the blob. Default format is {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be reordered.
:param str id: Id of the storage container endpoint
:param 'ManagedIdentityResponse' identity: Managed identity properties of routing storage endpoint.
:param int max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:param str resource_group: The name of the resource group of the storage account.
:param str subscription_id: The subscription identifier of the storage account.
"""
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "name", name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if batch_frequency_in_seconds is not None:
pulumi.set(__self__, "batch_frequency_in_seconds", batch_frequency_in_seconds)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if encoding is not None:
pulumi.set(__self__, "encoding", encoding)
if endpoint_uri is not None:
pulumi.set(__self__, "endpoint_uri", endpoint_uri)
if file_name_format is not None:
pulumi.set(__self__, "file_name_format", file_name_format)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if max_chunk_size_in_bytes is not None:
pulumi.set(__self__, "max_chunk_size_in_bytes", max_chunk_size_in_bytes)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
The name of storage container in the storage account.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def name(self) -> str:
"""
The name that identifies this endpoint. The name can only include alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64 characters. The following names are reserved: events, fileNotifications, $default. Endpoint names must be unique across endpoint types.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[str]:
"""
Method used to authenticate against the storage endpoint
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter(name="batchFrequencyInSeconds")
def batch_frequency_in_seconds(self) -> Optional[int]:
"""
Time interval at which blobs are written to storage. Value should be between 60 and 720 seconds. Default value is 300 seconds.
"""
return pulumi.get(self, "batch_frequency_in_seconds")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The connection string of the storage account.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter
def encoding(self) -> Optional[str]:
"""
Encoding that is used to serialize messages to blobs. Supported values are 'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'.
"""
return pulumi.get(self, "encoding")
@property
@pulumi.getter(name="endpointUri")
def endpoint_uri(self) -> Optional[str]:
"""
The url of the storage endpoint. It must include the protocol https://
"""
return pulumi.get(self, "endpoint_uri")
@property
@pulumi.getter(name="fileNameFormat")
def file_name_format(self) -> Optional[str]:
"""
File name format for the blob. Default format is {iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be reordered.
"""
return pulumi.get(self, "file_name_format")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Id of the storage container endpoint
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
Managed identity properties of routing storage endpoint.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="maxChunkSizeInBytes")
def max_chunk_size_in_bytes(self) -> Optional[int]:
"""
Maximum number of bytes for each blob written to storage. Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
"""
return pulumi.get(self, "max_chunk_size_in_bytes")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
The name of the resource group of the storage account.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The subscription identifier of the storage account.
"""
return pulumi.get(self, "subscription_id")
@pulumi.output_type
class SharedAccessSignatureAuthorizationRuleResponse(dict):
"""
The properties of an IoT hub shared access policy.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyName":
suggest = "key_name"
elif key == "primaryKey":
suggest = "primary_key"
elif key == "secondaryKey":
suggest = "secondary_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SharedAccessSignatureAuthorizationRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SharedAccessSignatureAuthorizationRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SharedAccessSignatureAuthorizationRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_name: str,
rights: str,
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None):
"""
The properties of an IoT hub shared access policy.
:param str key_name: The name of the shared access policy.
:param str rights: The permissions assigned to the shared access policy.
:param str primary_key: The primary key.
:param str secondary_key: The secondary key.
"""
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "rights", rights)
if primary_key is not None:
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key is not None:
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
The name of the shared access policy.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter
def rights(self) -> str:
"""
The permissions assigned to the shared access policy.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
The primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
The secondary key.
"""
return pulumi.get(self, "secondary_key")
@pulumi.output_type
class StorageEndpointPropertiesResponse(dict):
"""
The properties of the Azure Storage endpoint for file upload.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionString":
suggest = "connection_string"
elif key == "containerName":
suggest = "container_name"
elif key == "authenticationType":
suggest = "authentication_type"
elif key == "sasTtlAsIso8601":
suggest = "sas_ttl_as_iso8601"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageEndpointPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageEndpointPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageEndpointPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_string: str,
container_name: str,
authentication_type: Optional[str] = None,
identity: Optional['outputs.ManagedIdentityResponse'] = None,
sas_ttl_as_iso8601: Optional[str] = None):
"""
The properties of the Azure Storage endpoint for file upload.
:param str connection_string: The connection string for the Azure Storage account to which files are uploaded.
:param str container_name: The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified.
:param str authentication_type: Specifies authentication type being used for connecting to the storage account.
:param 'ManagedIdentityResponse' identity: Managed identity properties of storage endpoint for file upload.
:param str sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
"""
pulumi.set(__self__, "connection_string", connection_string)
pulumi.set(__self__, "container_name", container_name)
if authentication_type is not None:
pulumi.set(__self__, "authentication_type", authentication_type)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if sas_ttl_as_iso8601 is not None:
pulumi.set(__self__, "sas_ttl_as_iso8601", sas_ttl_as_iso8601)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> str:
"""
The connection string for the Azure Storage account to which files are uploaded.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> str:
"""
The name of the root container where you upload files. The container need not exist but should be creatable using the connectionString specified.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="authenticationType")
def authentication_type(self) -> Optional[str]:
"""
Specifies authentication type being used for connecting to the storage account.
"""
return pulumi.get(self, "authentication_type")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedIdentityResponse']:
"""
Managed identity properties of storage endpoint for file upload.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="sasTtlAsIso8601")
def sas_ttl_as_iso8601(self) -> Optional[str]:
"""
The period of time for which the SAS URI generated by IoT Hub for file upload is valid. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
"""
return pulumi.get(self, "sas_ttl_as_iso8601")
| 41.769261
| 452
| 0.656534
|
8ba0aabb7996ee351c685bcd22750de541da6dac
| 2,218
|
py
|
Python
|
chemistry.py
|
clede/chemistry
|
5fb4141054734f4372a196dae6ef77d27e85ca3e
|
[
"Apache-2.0"
] | null | null | null |
chemistry.py
|
clede/chemistry
|
5fb4141054734f4372a196dae6ef77d27e85ca3e
|
[
"Apache-2.0"
] | null | null | null |
chemistry.py
|
clede/chemistry
|
5fb4141054734f4372a196dae6ef77d27e85ca3e
|
[
"Apache-2.0"
] | null | null | null |
import initialization
class Unit(object):
"""A unit of measurement for a substance or supplement. e.g. capsule,
tablet, drop, or milligram, etc."""
# In the future we will need to track relationships between different units.
# e.g. a 'gram' consists of 1000 'milligrams'.
def __init__(self, singular, plural=None, abbrev=None):
self.singular = singular
if plural:
self.plural = plural
# If plural is not specified, just tack an 's' on to the singular.
else:
self.plural = singular + 's'
if abbrev:
self.abbrev = abbrev
else:
self.abbrev = self.plural
def __str__(self):
return self.abbrev
def __repr__(self):
return self.abbrev
class Amount(object):
"""An amount consists of a numeric qty and a unit.
This could be used to represent a dose (e.g. 2 tablets),
or a quantity of a substance in a dose (e.g. a pill contains 50 mg)"""
def __init__(self, qty, unit):
self.qty = float(qty)
assert unit.__class__.__name__ == 'Unit', 'Specified Unit is invalid.'
self.unit = unit
def __str__(self):
return str(self.qty) + ' ' + self.unit.abbrev
def __repr__(self):
return str(self.qty) + ' ' + self.unit.abbrev
class Substance(object):
"""A specific substance. e.g. Vitamin D, or Biotin."""
def __init__(self)
class Supplement(object):
"""A specific supplement product. e.g. """
# To start, this will be generic. We'll track a single object for 'Vitamin
# D'. However, ultimately, we'll want to track different brands, etc.
def __init__(self, name, brand, units, amt_per_unit=None):
self.name = name
self.brand = brand
assert units.__class__.name__ == 'Unit', units + ' is not a Unit object.'
self.units = units
self.description = ''
if amt_per_unit:
assert amt_per_unit.__class__.__name__ == 'Amount', """amt_per_unit
is not valid."""
self.amt_per_unit = amt_per_unit
else:
self.amt_per_unit = None
def __str__(self):
return self.name + ' ' + str(self.amt_per_unit)
| 30.805556
| 81
| 0.613165
|
4896d85f9e37ebf1025231690559aa4f8b50303a
| 8,627
|
py
|
Python
|
pyflux/gas/tests/gas_llev_tests_poisson.py
|
ThomasHoppe/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 2,091
|
2016-04-01T02:52:10.000Z
|
2022-03-29T11:38:15.000Z
|
pyflux/gas/tests/gas_llev_tests_poisson.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 160
|
2016-04-26T14:52:18.000Z
|
2022-03-15T02:09:07.000Z
|
pyflux/gas/tests/gas_llev_tests_poisson.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 264
|
2016-05-02T14:03:31.000Z
|
2022-03-29T07:48:20.000Z
|
import numpy as np
import pyflux as pf
import pandas as pd
countdata = pd.read_csv('http://www.pyflux.com/notebooks/leicester_goals_scored.csv')
countdata.columns= ["Time","Goals","Season2"]
data = countdata['Goals'].values
def test_poisson_couple_terms():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit()
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_bbvi():
"""
Tests an GAS model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI',iterations=300, mini_batch=32, map_start=False)
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI',iterations=300, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_poisson_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI',iterations=200, mini_batch=32, record_elbo=True, map_start=False)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_poisson_mh():
"""
Tests an GAS model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit('PML')
x = model.fit('M-H', nsims=300)
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_laplace():
"""
Tests an GAS model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 1)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_poisson_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_poisson_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_poisson_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_poisson_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GASLLEV(data=countdata, family=pf.Poisson())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_poisson_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit()
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
def test_poisson_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
def test_poisson_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
def test_poisson_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
def test_poisson_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('PML')
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
"""
def test_poisson_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('PML')
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=2, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
"""
def test_poisson_sample_model():
"""
Tests sampling function
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-1)
def test_poisson_ppc():
"""
Tests PPC value
"""
model = pf.GASLLEV(data=data, family=pf.Poisson())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| 39.392694
| 114
| 0.706503
|
287ce8f88b8e9be8c3bfacdde841b638a724ac14
| 1,458
|
py
|
Python
|
www/lib/components/data/network_interfaces_io.py
|
cripplet/ipfire-material-design
|
972dac352a097bbfe556bf4a6cad900c037cca21
|
[
"MIT"
] | null | null | null |
www/lib/components/data/network_interfaces_io.py
|
cripplet/ipfire-material-design
|
972dac352a097bbfe556bf4a6cad900c037cca21
|
[
"MIT"
] | 3
|
2019-07-13T08:03:22.000Z
|
2019-07-13T08:06:03.000Z
|
www/lib/components/data/network_interfaces_io.py
|
cripplet/ipfire-material-design
|
972dac352a097bbfe556bf4a6cad900c037cca21
|
[
"MIT"
] | null | null | null |
from typing import AnyStr
import json
from lib.components import shared
from lib.components.data import shared as shared_data
_NETWORK_DATA_SUBPATH_PATTERN = '/interface/if_octets-'
def get_network_interfaces():
return [
i.rsplit('.', 1)[0] for i in
shared_data.get_logged_members(
'{root}/{subpath_pattern}'.format(
root=shared_data.LOG_ROOT_DIRECTORY,
subpath_pattern=_NETWORK_DATA_SUBPATH_PATTERN)
)
]
class _NetworkInterfaceIOData(shared_data.MonitoringShim):
UNIT = 'b'
def FromEngine(self, interface: AnyStr) -> shared.ConfigType:
query = [
'DEF:rx={root}/{subpath_pattern}{interface}.rrd:rx:AVERAGE'.format(
interface=interface,
root=shared_data.LOG_ROOT_DIRECTORY,
subpath_pattern=_NETWORK_DATA_SUBPATH_PATTERN),
'DEF:tx={root}/{subpath_pattern}{interface}.rrd:tx:AVERAGE'.format(
interface=interface,
root=shared_data.LOG_ROOT_DIRECTORY,
subpath_pattern=_NETWORK_DATA_SUBPATH_PATTERN),
'XPORT:rx:rx',
'XPORT:tx:tx',
]
return super(_NetworkInterfaceIOData, self).FromEngine(query=query)
def get_network_interfaces_io_data(interface):
if interface not in set(get_network_interfaces()):
raise KeyError(
'Cannot find specified interface \'{i}\''.format(
i=interface))
return _NetworkInterfaceIOData().FromEngine(interface=interface)
| 31.021277
| 75
| 0.691358
|
c5d284c36f82b92b2116b8d04acc617d6392aca4
| 8,129
|
py
|
Python
|
examples/adwords/v201802/reporting/parallel_report_download.py
|
stelaxi/googleads-python-lib
|
7830f303a63217e7c7190f9b085bce9c4f677727
|
[
"Apache-2.0"
] | 1
|
2018-06-25T18:44:30.000Z
|
2018-06-25T18:44:30.000Z
|
examples/adwords/v201802/reporting/parallel_report_download.py
|
stelaxi/googleads-python-lib
|
7830f303a63217e7c7190f9b085bce9c4f677727
|
[
"Apache-2.0"
] | null | null | null |
examples/adwords/v201802/reporting/parallel_report_download.py
|
stelaxi/googleads-python-lib
|
7830f303a63217e7c7190f9b085bce9c4f677727
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads an adgroup performance report for all child accounts.
To get report fields, run get_report_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import logging
import multiprocessing
from Queue import Empty
import time
import googleads
logging.basicConfig(level=logging.INFO)
logging.getLogger('suds.transport').setLevel(logging.DEBUG)
# Timeout between retries in seconds.
BACKOFF_FACTOR = 5
# Maximum number of processes to spawn.
MAX_PROCESSES = multiprocessing.cpu_count()
# Maximum number of retries for 500 errors.
MAX_RETRIES = 5
# Maximum number of items to be sent in a single API response.
PAGE_SIZE = 100
# Directory to download the reports to.
REPORT_DOWNLOAD_DIRECTORY = 'INSERT_REPORT_DOWNLOAD_DIRECTORY'
def main(client, report_download_directory):
# Determine list of customer IDs to retrieve report for.
input_queue = GetCustomerIDs(client)
reports_succeeded = multiprocessing.Queue()
reports_failed = multiprocessing.Queue()
# Create report definition.
report_definition = {
'reportName': 'Custom ADGROUP_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_7_DAYS',
'reportType': 'ADGROUP_PERFORMANCE_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['CampaignId', 'AdGroupId', 'Impressions', 'Clicks',
'Cost'],
# Predicates are optional.
'predicates': {
'field': 'AdGroupStatus',
'operator': 'IN',
'values': ['ENABLED', 'PAUSED']
}
},
}
queue_size = input_queue.qsize()
num_processes = min(queue_size, MAX_PROCESSES)
print 'Retrieving %d reports with %d processes:' % (queue_size, num_processes)
# Start all the processes.
processes = [ReportWorker(client, report_download_directory,
report_definition, input_queue, reports_succeeded,
reports_failed)
for _ in range(num_processes)]
for process in processes:
process.start()
for process in processes:
process.join()
print 'Finished downloading reports with the following results:'
while True:
try:
success = reports_succeeded.get(timeout=0.01)
except Empty:
break
print '\tReport for CustomerId "%d" succeeded.' % success['customerId']
while True:
try:
failure = reports_failed.get(timeout=0.01)
except Empty:
break
print ('\tReport for CustomerId "%d" failed with error code "%s" and '
'message: %s.' % (failure['customerId'], failure['code'],
failure['message']))
class ReportWorker(multiprocessing.Process):
"""A worker Process used to download reports for a set of customer IDs."""
_FILENAME_TEMPLATE = 'adgroup_%d.csv'
_FILEPATH_TEMPLATE = '%s/%s'
def __init__(self, client, report_download_directory, report_definition,
input_queue, success_queue, failure_queue):
"""Initializes a ReportWorker.
Args:
client: An AdWordsClient instance.
report_download_directory: A string indicating the directory where you
would like to download the reports.
report_definition: A dict containing the report definition that you would
like to run against all customer IDs in the input_queue.
input_queue: A Queue instance containing all of the customer IDs that
the report_definition will be run against.
success_queue: A Queue instance that the details of successful report
downloads will be saved to.
failure_queue: A Queue instance that the details of failed report
downloads will be saved to.
"""
super(ReportWorker, self).__init__()
self.report_downloader = client.GetReportDownloader(version='v201802')
self.report_download_directory = report_download_directory
self.report_definition = report_definition
self.input_queue = input_queue
self.success_queue = success_queue
self.failure_queue = failure_queue
def _DownloadReport(self, customer_id):
filepath = self._FILEPATH_TEMPLATE % (self.report_download_directory,
self._FILENAME_TEMPLATE % customer_id)
retry_count = 0
while True:
print ('[%d/%d] Loading report for customer ID "%s" into "%s"...'
% (self.ident, retry_count, customer_id, filepath))
try:
with open(filepath, 'wb') as handler:
self.report_downloader.DownloadReport(
self.report_definition, output=handler,
client_customer_id=customer_id)
return (True, {'customerId': customer_id})
except googleads.errors.AdWordsReportError, e:
if e.code == 500 and retry_count < MAX_RETRIES:
time.sleep(retry_count * BACKOFF_FACTOR)
else:
print ('Report failed for customer ID "%s" with code "%d" after "%d" '
'retries.' % (customer_id, e.code, retry_count+1))
return (False, {'customerId': customer_id, 'code': e.code,
'message': e.message})
except Exception, e:
print 'Report failed for customer ID "%s".' % customer_id
print 'e: %s' % e.__class__
return (False, {'customerId': customer_id, 'code': None,
'message': e.message})
def run(self):
while True:
try:
customer_id = self.input_queue.get(timeout=0.01)
except Empty:
break
result = self._DownloadReport(customer_id)
(self.success_queue if result[0] else self.failure_queue).put(result[1])
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201802')
offset = 0
# Get the account hierarchy for this account.
selector = {'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)}}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue
if __name__ == '__main__':
adwords_client = googleads.adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, REPORT_DOWNLOAD_DIRECTORY)
| 35.190476
| 80
| 0.672038
|
edd0ec043c1b4e5b49f85a5a85f5c32aa6a40983
| 1,321
|
py
|
Python
|
behavior_regularized_offline_rl/brac/train_online_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
behavior_regularized_offline_rl/brac/train_online_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
behavior_regularized_offline_rl/brac/train_online_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for behavior_regularized_offline_rl.brac.train_online."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from behavior_regularized_offline_rl.brac import train_online
class TrainOnlineTest(tf.test.TestCase):
def test_train_online(self):
flags.FLAGS.sub_dir = '0'
flags.FLAGS.env_name = 'HalfCheetah-v2'
flags.FLAGS.eval_target = 4000
flags.FLAGS.agent_name = 'sac'
flags.FLAGS.total_train_steps = 100 # Short training.
flags.FLAGS.n_eval_episodes = 1
train_online.main(None) # Just test that it runs.
if __name__ == '__main__':
tf.test.main()
| 31.452381
| 74
| 0.763815
|
b4ce968bee28a92439ae50122067c63d37f5b6a0
| 779
|
py
|
Python
|
fgs.py
|
trejsu/ensemble-adv-training
|
0d385fbd87c86fc7826cb04a75c9a6bd219e5d69
|
[
"MIT"
] | 133
|
2017-06-01T01:52:35.000Z
|
2021-12-14T03:08:11.000Z
|
fgs.py
|
trejsu/ensemble-adv-training
|
0d385fbd87c86fc7826cb04a75c9a6bd219e5d69
|
[
"MIT"
] | 5
|
2017-09-04T07:18:40.000Z
|
2020-04-14T15:19:39.000Z
|
fgs.py
|
trejsu/ensemble-adv-training
|
0d385fbd87c86fc7826cb04a75c9a6bd219e5d69
|
[
"MIT"
] | 42
|
2017-06-04T01:16:40.000Z
|
2020-10-20T08:11:21.000Z
|
import keras.backend as K
from attack_utils import gen_grad
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
"""
FGSM attack.
"""
# signed gradient
normed_grad = K.sign(grad)
# Multiply by constant epsilon
scaled_grad = eps * normed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = K.stop_gradient(x + scaled_grad)
if clipping:
adv_x = K.clip(adv_x, 0, 1)
return adv_x
def iter_fgs(model, x, y, steps, eps):
"""
I-FGSM attack.
"""
adv_x = x
# iteratively apply the FGSM with small step size
for i in range(steps):
logits = model(adv_x)
grad = gen_grad(adv_x, logits, y)
adv_x = symbolic_fgs(adv_x, grad, eps, True)
return adv_x
| 19.974359
| 72
| 0.631579
|
8f31b1ec2bacc22c66607c767971b71c038a359e
| 16,082
|
py
|
Python
|
intersight/apis/workflow_build_task_meta_owner_api.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/apis/workflow_build_task_meta_owner_api.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/apis/workflow_build_task_meta_owner_api.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class WorkflowBuildTaskMetaOwnerApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def workflow_build_task_meta_owners_get(self, **kwargs):
"""
Get a list of 'workflowBuildTaskMetaOwner' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.workflow_build_task_meta_owners_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on records. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for records to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: WorkflowBuildTaskMetaOwnerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.workflow_build_task_meta_owners_get_with_http_info(**kwargs)
else:
(data) = self.workflow_build_task_meta_owners_get_with_http_info(**kwargs)
return data
def workflow_build_task_meta_owners_get_with_http_info(self, **kwargs):
"""
Get a list of 'workflowBuildTaskMetaOwner' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.workflow_build_task_meta_owners_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on records. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for records to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: WorkflowBuildTaskMetaOwnerList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'inlinecount', 'top', 'skip', 'filter', 'select', 'orderby', 'expand', 'apply', 'at']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method workflow_build_task_meta_owners_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('$count', params['count']))
if 'inlinecount' in params:
query_params.append(('$inlinecount', params['inlinecount']))
if 'top' in params:
query_params.append(('$top', params['top']))
if 'skip' in params:
query_params.append(('$skip', params['skip']))
if 'filter' in params:
query_params.append(('$filter', params['filter']))
if 'select' in params:
query_params.append(('$select', params['select']))
if 'orderby' in params:
query_params.append(('$orderby', params['orderby']))
if 'expand' in params:
query_params.append(('$expand', params['expand']))
if 'apply' in params:
query_params.append(('$apply', params['apply']))
if 'at' in params:
query_params.append(('at', params['at']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/workflow/BuildTaskMetaOwners', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkflowBuildTaskMetaOwnerList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def workflow_build_task_meta_owners_moid_get(self, moid, **kwargs):
"""
Get a specific instance of 'workflowBuildTaskMetaOwner'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.workflow_build_task_meta_owners_moid_get(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the workflowBuildTaskMetaOwner instance. (required)
:return: WorkflowBuildTaskMetaOwner
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.workflow_build_task_meta_owners_moid_get_with_http_info(moid, **kwargs)
else:
(data) = self.workflow_build_task_meta_owners_moid_get_with_http_info(moid, **kwargs)
return data
def workflow_build_task_meta_owners_moid_get_with_http_info(self, moid, **kwargs):
"""
Get a specific instance of 'workflowBuildTaskMetaOwner'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.workflow_build_task_meta_owners_moid_get_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the workflowBuildTaskMetaOwner instance. (required)
:return: WorkflowBuildTaskMetaOwner
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method workflow_build_task_meta_owners_moid_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `workflow_build_task_meta_owners_moid_get`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/workflow/BuildTaskMetaOwners/{moid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkflowBuildTaskMetaOwner',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 56.826855
| 818
| 0.652096
|
71e2146809a8acd688ab14fcc199ec5f954e5f16
| 1,055
|
py
|
Python
|
AC09/Server/Model/usuario.py
|
lucaskurata/API
|
7f67ad3babf525d3caeab7b58bff59abbfef5615
|
[
"Apache-2.0"
] | null | null | null |
AC09/Server/Model/usuario.py
|
lucaskurata/API
|
7f67ad3babf525d3caeab7b58bff59abbfef5615
|
[
"Apache-2.0"
] | null | null | null |
AC09/Server/Model/usuario.py
|
lucaskurata/API
|
7f67ad3babf525d3caeab7b58bff59abbfef5615
|
[
"Apache-2.0"
] | null | null | null |
class Usuario():
def __init__(self, id_Usuario, nome, segredo):
self.id_Usuario = id_Usuario
self.nome = nome
self.segredo = segredo
def __dict__(self):
dicio = dict()
dicio['id_Usuario'] = self.id_Usuario
dicio['nome'] = self.nome
dicio['segredo'] = self.segredo
return dicio
@staticmethod
def cria(dados):
try:
id_Usuario = dados["id_Usuario"]
nome = dados["nome"]
segredo = dados["segredo"]
return Usuario(id_Usuario = id_Usuario, nome = nome, segredo = segredo)
except Exception as e:
print("Problema ao criar Usuario!")
print(e)
@staticmethod
def cria_de_tupla(dados):
try:
id_Usuario = dados[0]
nome = dados[1]
segredo = dados[2]
return Usuario(id_Usuario = id_Usuario, nome = nome, segredo = segredo)
except Exception as e:
print("Problema ao criar Usuario!")
print(e)
| 30.142857
| 83
| 0.546919
|
45362447ee575e9c471259c9b80047711573631a
| 543
|
py
|
Python
|
bgp/tables.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 127
|
2017-10-12T00:27:45.000Z
|
2020-08-07T11:13:55.000Z
|
bgp/tables.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 247
|
2017-12-26T12:55:34.000Z
|
2020-08-08T11:57:35.000Z
|
bgp/tables.py
|
maznu/peering-manager
|
d249fcf530f4cc48b39429badb79bc203e0148ba
|
[
"Apache-2.0"
] | 63
|
2017-10-13T06:46:05.000Z
|
2020-08-08T00:41:57.000Z
|
import django_tables2 as tables
from bgp.models import Relationship
from utils.tables import BaseTable, ButtonsColumn, ColourColumn, SelectColumn
class RelationshipTable(BaseTable):
pk = SelectColumn()
name = tables.Column(linkify=True)
color = ColourColumn()
actions = ButtonsColumn(Relationship, buttons=("edit", "delete"))
class Meta(BaseTable.Meta):
model = Relationship
fields = ("pk", "name", "slug", "description", "color", "actions")
default_columns = ("pk", "name", "color", "actions")
| 31.941176
| 77
| 0.690608
|
7b235c7b944b91fc209f09f8852ca84b908f87f4
| 4,123
|
py
|
Python
|
locations/spiders/perkins.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/perkins.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
locations/spiders/perkins.py
|
mfjackson/alltheplaces
|
37c90b4041c80a574e6e4c2f886883e97df4b636
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class PerkinsSpider(scrapy.Spider):
name = "perkins"
item_attributes = {"brand": "Perkins"}
allowed_domains = ["stores.perkinsrestaurants.com"]
start_urls = ("https://stores.perkinsrestaurants.com/sitemap.xml",)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info["day"][:2].title()
hour_intervals = []
for interval in day_info["intervals"]:
f_time = str(interval["start"]).zfill(4)
t_time = str(interval["end"]).zfill(4)
hour_intervals.append(
"{}:{}-{}:{}".format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
)
)
hours = ",".join(hour_intervals)
if not this_day_group:
this_day_group = {"from_day": day, "to_day": day, "hours": hours}
elif this_day_group["hours"] != hours:
day_groups.append(this_day_group)
this_day_group = {"from_day": day, "to_day": day, "hours": hours}
elif this_day_group["hours"] == hours:
this_day_group["to_day"] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]["hours"] in (
"00:00-23:59",
"00:00-00:00",
):
opening_hours = "24/7"
else:
for day_group in day_groups:
if day_group["from_day"] == day_group["to_day"]:
opening_hours += "{from_day} {hours}; ".format(**day_group)
elif day_group["from_day"] == "Su" and day_group["to_day"] == "Sa":
opening_hours += "{hours}; ".format(**day_group)
else:
opening_hours += "{from_day}-{to_day} {hours}; ".format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath("//url/loc/text()").extract()
for path in city_urls:
if path.count("/") == 6:
yield scrapy.Request(
path,
callback=self.parse_store,
)
def parse_store(self, response):
properties = {
"addr_full": response.xpath(
'//span[@itemprop="streetAddress"]/span/text()'
).extract_first(),
"city": response.xpath(
'//span[@itemprop="addressLocality"]/text()'
).extract_first(),
"state": response.xpath(
'//abbr[@itemprop="addressRegion"]/text()'
).extract_first(),
"postcode": response.xpath('//span[@itemprop="postalCode"]/text()')
.extract_first()
.strip(),
"ref": response.url,
"website": response.url,
"lon": float(
response.xpath(
'//span/meta[@itemprop="longitude"]/@content'
).extract_first()
),
"lat": float(
response.xpath(
'//span/meta[@itemprop="latitude"]/@content'
).extract_first()
),
}
phone = response.xpath(
'//a[@class="c-phone-number-link c-phone-main-number-link"]/text()'
).extract_first()
if phone:
properties["phone"] = phone
hours = json.loads(
response.xpath(
'//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days'
).extract_first()
)
opening_hours = self.store_hours(hours) if hours else None
if opening_hours:
properties["opening_hours"] = opening_hours
yield GeojsonPointItem(**properties)
| 35.239316
| 95
| 0.502304
|
ff197bd8e4b69272f42af3474e9210866bacd8c5
| 1,963
|
py
|
Python
|
kindle-notes/utils/html/base.py
|
Drinkey/kindle-notes
|
35300164d12a63264b318b778880d124a8f68e0c
|
[
"MIT"
] | null | null | null |
kindle-notes/utils/html/base.py
|
Drinkey/kindle-notes
|
35300164d12a63264b318b778880d124a8f68e0c
|
[
"MIT"
] | 9
|
2019-02-22T09:02:19.000Z
|
2019-02-24T14:51:55.000Z
|
kindle-notes/utils/html/base.py
|
Drinkey/kindle-notes
|
35300164d12a63264b318b778880d124a8f68e0c
|
[
"MIT"
] | null | null | null |
import re
from enum import Enum
import requests_html
from utils.objects import KindleNotes, NoteElement
def get_note_heading_text(heading):
rex = re.search(r'Highlight.*\-\s(.*)\s\> Location.*', heading)
if rex:
return rex.group(1)
return ''
def html_parser(html, selector):
return html.find(selector, first=True)
def html_line_parser(html, selector):
line = html_parser(html, selector)
if line:
return line.text
return ''
class KindleNotesHtml(KindleNotes):
def __init__(self, selector: Enum):
self._html = None
self._selector = selector
def parse(self, html_doc:str):
self._html = requests_html.HTML(html=html_doc)
self.title = html_parser(self._html, self._selector.TITLE.value).text
self.author = html_parser(self._html, self._selector.AUTHOR.value).text
self.notes = self._extract_notes()
return self
def _extract_notes(self):
notes = list()
for section in self._html.find(self._selector.SECTIONS.value):
note = NoteElement()
inner_html = requests_html.HTML(html=section.html)
note.section = html_line_parser(inner_html, self._selector.S_HEADING.value)
note.heading = get_note_heading_text(
html_line_parser(inner_html, self._selector.S_NOTE_HEADING.value)
)
# Notes and note heading already in order, just need to check if the heading already
# exists in previous notes. If exists, set current note.heading to none
if note.heading in [n.heading for n in notes]:
note.heading = ''
note.text = html_line_parser(inner_html, self._selector.S_NOTE_TEXT.value)
notes.append(note)
return notes
def __repr__(self):
return "<title: ('{}'), author: ('{}'), notes: ('{}')>".format(
self.title, self.author, '\n'.join(map(str, self.notes)))
| 33.844828
| 96
| 0.641875
|
df89bdba14d3c5677079ef5ef1c63a79bfbea18e
| 2,015
|
py
|
Python
|
networkx/algorithms/centrality/harmonic.py
|
AaronOpfer/networkx
|
f04ca835c3503f04f9b3e933270575980e44205b
|
[
"BSD-3-Clause"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
networkx/algorithms/centrality/harmonic.py
|
AaronOpfer/networkx
|
f04ca835c3503f04f9b3e933270575980e44205b
|
[
"BSD-3-Clause"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
networkx/algorithms/centrality/harmonic.py
|
AaronOpfer/networkx
|
f04ca835c3503f04f9b3e933270575980e44205b
|
[
"BSD-3-Clause"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
# Copyright (C) 2015 by
# Alessandro Luongo
# BSD license.
#
# Authors:
# Alessandro Luongo <alessandro.luongo@studenti.unimi.it>
#
"""Functions for computing the harmonic centrality of a graph."""
from functools import partial
import networkx as nx
__all__ = ['harmonic_centrality']
def harmonic_centrality(G, nbunch=None, distance=None):
r"""Compute harmonic centrality for nodes.
Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal
of the shortest path distances from all other nodes to `u`
.. math::
C(u) = \sum_{v \neq u} \frac{1}{d(v, u)}
where `d(v, u)` is the shortest-path distance between `v` and `u`.
Notice that higher values indicate higher centrality.
Parameters
----------
G : graph
A NetworkX graph
nbunch : container
Container of nodes. If provided harmonic centrality will be computed
only over the nodes in nbunch.
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations. If `None`, then each edge will have distance equal to 1.
Returns
-------
nodes : dictionary
Dictionary of nodes with harmonic centrality as the value.
See Also
--------
betweenness_centrality, load_centrality, eigenvector_centrality,
degree_centrality, closeness_centrality
Notes
-----
If the 'distance' keyword is set to an edge attribute key then the
shortest-path length will be computed using Dijkstra's algorithm with
that edge attribute as the edge weight.
References
----------
.. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality."
Internet Mathematics 10.3-4 (2014): 222-262.
"""
if G.is_directed():
G = G.reverse()
spl = partial(nx.shortest_path_length, G, weight=distance)
return {u: sum(1 / d if d > 0 else 0 for v, d in spl(source=u).items())
for u in G.nbunch_iter(nbunch)}
| 29.202899
| 82
| 0.664516
|
f715517d413224bd0e232c087a3dc3de8fac5148
| 2,409
|
py
|
Python
|
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
# analy.py
# A python program to analyze the SUS weighting function in order to reach the following goals:
# 1. plot the weight function
# 2. generate the normalized distribution for Z=1
# 3. extrapolate the N distribution for different Zs given by the user.
# Author: Yuding Ai
# Date: 2015 Oct 23
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def PN():
WF = [] # a list of my target Weighting function
PN = [] # a list of number distribution
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
WF.append(n); #append value into my WF list
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(8600,9600)
plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = [] # a list of number distribution
for i in range(len(W)):
W[i] = W[i] + i*math.log(z)
maxi = max(W)
if maxi > 500:
for j in range(len(W)):
W[j] = W[j]-maxi +500
P.append(math.exp(W[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1] # take the P(N;z=1)
W = PN()[0] # take the original weighting function
Wplot(W)
# Pplot(P,"1")
# Pe = exploPN(W,4.44)
# enlargePplot(Pe,4.44)
# for i in range(10):
# W = PN()[0] # take the original weighting function
# t = 3.83 + 0.02*i
# Pe = exploPN(W,t)
# # Pplot(Pe,t)
# enlargePplot(Pe,t)
main()
| 23.38835
| 95
| 0.632213
|
d897337abce84a3f711b6e51390d7a815fa7b388
| 581
|
py
|
Python
|
acmgnyr2014/e.py
|
AnAverageHuman/competitive
|
4c4b9bdbe91fde1c52f731426f9a53bff97796e1
|
[
"BSD-3-Clause"
] | null | null | null |
acmgnyr2014/e.py
|
AnAverageHuman/competitive
|
4c4b9bdbe91fde1c52f731426f9a53bff97796e1
|
[
"BSD-3-Clause"
] | null | null | null |
acmgnyr2014/e.py
|
AnAverageHuman/competitive
|
4c4b9bdbe91fde1c52f731426f9a53bff97796e1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
P = int(input())
for _ in range(P):
K, N = map(int, input().split())
b = []
while N > 0:
empty = len(b)
for (i, x) in enumerate(b):
if x == 0:
empty = i
break
for i in range(empty):
b[i] -= 1
if empty == len(b):
b.append(empty + 1)
else:
b[empty] = empty + 1
N -= 1
print(f"{K} {len(b)}")
for (i, x) in enumerate(b):
print(x, end="\n" if i % 10 == 9 else " ")
if len(b) % 10:
print()
| 18.741935
| 50
| 0.387263
|
e22b6747bdc779a95fc9a672afa26d7f663d52c1
| 1,148
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/Xml/setup.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/Xml/setup.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/Xml/setup.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestswaggerbatxmlservice"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.18", "azure-core<2.0.0,>=1.8.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestSwaggerBATXMLService",
author_email="",
url="",
keywords=["Swagger", "AutoRestSwaggerBATXMLService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest Swagger BAT.
"""
)
| 30.210526
| 94
| 0.630662
|
cb80f3125c3a61420af5aaf2477b4df6a0cf0a80
| 87
|
py
|
Python
|
mypics/apps.py
|
ngetichnicholas/My-Gallery
|
0b5e86b648c488d3d6c6ff86e4a094cc42e8a30b
|
[
"MIT"
] | 1
|
2021-07-31T08:01:11.000Z
|
2021-07-31T08:01:11.000Z
|
mypics/apps.py
|
ngetichnicholas/My-Gallery
|
0b5e86b648c488d3d6c6ff86e4a094cc42e8a30b
|
[
"MIT"
] | null | null | null |
mypics/apps.py
|
ngetichnicholas/My-Gallery
|
0b5e86b648c488d3d6c6ff86e4a094cc42e8a30b
|
[
"MIT"
] | 1
|
2022-03-29T19:01:07.000Z
|
2022-03-29T19:01:07.000Z
|
from django.apps import AppConfig
class MypicsConfig(AppConfig):
name = 'mypics'
| 14.5
| 33
| 0.747126
|
f1f03e3f0be8934083020c10afc5563cb3179072
| 269
|
py
|
Python
|
aiocogeo/errors.py
|
vincentsarago/aiocogeo
|
bda7334cfc2c761b92563a6cd952cf31c54ca495
|
[
"MIT"
] | null | null | null |
aiocogeo/errors.py
|
vincentsarago/aiocogeo
|
bda7334cfc2c761b92563a6cd952cf31c54ca495
|
[
"MIT"
] | null | null | null |
aiocogeo/errors.py
|
vincentsarago/aiocogeo
|
bda7334cfc2c761b92563a6cd952cf31c54ca495
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class CogReadError(Exception):
message: str
@dataclass
class InvalidTiffError(CogReadError):
...
@dataclass
class TileNotFoundError(CogReadError):
...
@dataclass
class MissingAssets(CogReadError):
...
| 12.809524
| 38
| 0.739777
|
74272a7fe1e9f313d6253fc51ff385ac70aa1bd8
| 31,233
|
py
|
Python
|
stateRepresentation.py
|
xc-liu/pacman-rl-agent
|
5372f8c479b1a1f25132602978f9eb908fce774e
|
[
"Unlicense"
] | null | null | null |
stateRepresentation.py
|
xc-liu/pacman-rl-agent
|
5372f8c479b1a1f25132602978f9eb908fce774e
|
[
"Unlicense"
] | null | null | null |
stateRepresentation.py
|
xc-liu/pacman-rl-agent
|
5372f8c479b1a1f25132602978f9eb908fce774e
|
[
"Unlicense"
] | null | null | null |
import math
import time
import numpy as np
from score_keeper import get_timesteps
symmetric = True
def kalman(v, r, q):
"""
v - array to filter
t - time steps, usually just range(len(v))
r - how much we trust our path def=0.1
q - decreasing it makes it smoother def=0.01
"""
p = 12
x = 0
for i in range(len(v)):
p = p + q
K = p * (1 / (p + r))
x = x + K * (v[i] - x)
p = (1 - K) * p
return x
def find_nearest(array, value):
array = np.asarray(array)
return (np.abs(array - value)).argmin()
def add_players(idx1, idx2):
idx1 = int(idx1)
idx2 = int(idx2)
bigger_idx = max(idx1, idx2)
smaller_idx = min(idx1, idx2)
return int(bigger_idx * 10 + smaller_idx)
def split_players(number):
number = int(number)
if (len(str(number))) == 1:
return False
if (len(str(number))) == 2:
idx1 = int(str(number)[0])
idx2 = int(str(number)[1])
return idx1, idx2
if (len(str(number))) == 3:
idx1 = int(str(number)[0])
idx2 = int(str(number)[1])
idx3 = int(str(number)[2])
return idx1, idx2, idx3
else:
idx1 = int(str(number)[0])
idx2 = int(str(number)[1])
idx3 = int(str(number)[2])
idx4 = int(str(number)[3])
return idx1, idx2, idx3, idx4
def minus_players(number, idx):
number = int(number)
idx = int(idx)
if (len(str(number))) == 1:
return False
idx1 = int(number / 10)
if idx1 != 0:
idx2 = number - idx1 * 10
else:
idx2 = int(number / 10)
idx1 = number - idx1 * 10
if idx1 == idx:
return idx2
elif idx2 == idx:
return idx1
else:
return False
class stateRepresentation:
def __init__(self, agent, gameState, index, red):
self.agent = agent
self.gameState = gameState
self.index = index
self.indices = set(self.agent.getTeam(gameState))
indices = set(self.agent.getTeam(gameState))
indices.discard(self.index)
self.index_second_agent = indices.pop()
self.red = red
self.dist_history = {}
self.dist_history_second_agent = {}
self.initial_enemy_pos = {}
self.initial_team_pos = {}
self.last_enemy_pos = {}
self.last_team_pos = {}
self.last_player_state = {}
self.corresponding_index = {}
self.initial_food = 0
self.digital_state = self.initialise_digital_state(gameState)
self.next_pos1 = self.initialise_next_pos(steps=1)
self.next_pos2 = self.initialise_next_pos(steps=2)
self.score = agent.getScore(gameState)
self.time_left = gameState.data.timeleft
self.total_time = gameState.data.timeleft
self.flipping_action = {0: 1, 1: 0, 2: 3, 3: 2}
def initialise_digital_state(self, gameState):
# digital state has 7 layers:
# 0. walls
# 1. food
# 2. capsule
# 3. players
# 4. directions (ignored for now)
# 5. pacman players (number = food carried)
# 6. scared players (number = timer)
layout = str(gameState.data.layout).split("\n")
digital_state = np.zeros(shape=(6, len(layout), len(layout[0])))
for i in range(len(layout)):
for j in range(len(layout[0])):
if self.red:
idx1 = i
idx2 = j
else:
idx1 = len(layout) - i - 1
idx2 = len(layout[0]) - j - 1
if layout[idx1][idx2] == "%":
digital_state[0][i][j] = 1
elif layout[idx1][idx2] == ".":
digital_state[1][i][j] = 1
elif layout[idx1][idx2] == "o":
digital_state[2][i][j] = 1
elif layout[idx1][idx2] != " ":
idx = int(layout[idx1][idx2])
# our player should always be red team, 1, 3
if self.red:
digital_state[3][i][j] = idx
elif idx in [1, 3]:
digital_state[3][i][j] = idx + 1
else:
digital_state[3][i][j] = idx - 1
self.corresponding_index[idx - 1] = digital_state[3][i][j]
if digital_state[3][i][j] % 2 == 0:
self.last_enemy_pos[int(digital_state[3][i][j])] = (j, len(layout) - i - 1)
self.initial_food = np.sum(digital_state[1])
myPos = gameState.getAgentState(self.index).getPosition()
secondPos = gameState.getAgentState(self.index_second_agent).getPosition()
if not self.red:
myPos = (len(layout[0]) - myPos[0] - 1, len(layout) - myPos[1] - 1)
secondPos = (len(layout[0]) - secondPos[0] - 1, len(layout) - secondPos[1] - 1)
self.last_team_pos[self.index] = myPos
self.last_team_pos[self.index_second_agent] = secondPos
self.initial_team_pos[self.index] = myPos
self.initial_team_pos[self.index_second_agent] = secondPos
for i in [2, 4]:
self.initial_enemy_pos[i] = self.last_enemy_pos[i]
self.dist_history[i] = [self.agent.distancer.getDistance(pos1=self.last_enemy_pos[i], pos2=myPos)]
self.dist_history_second_agent[i] = [
self.agent.distancer.getDistance(pos1=self.last_enemy_pos[i], pos2=secondPos)]
for i in range(1, 5):
self.last_player_state[i] = "ghost"
return digital_state
def initialise_next_pos(self, steps=2):
next_pos = {}
h = len(self.digital_state[0])
w = len(self.digital_state[0][0])
for i in range(h):
for j in range(w):
if self.digital_state[0][h - 1 - i][j] == 0:
pos = (i, j)
next_pos[(pos[1], pos[0])] = []
possible_pos = [(i + p, j + q)
for p in range(-steps, steps + 1)
for q in range(-steps, steps + 1)
if 0 <= i + p < h and 0 <= j + q < w]
for p in possible_pos:
if self.digital_state[0][h - 1 - p[0]][p[1]] == 0:
if self.agent.distancer.getDistance((pos[1], pos[0]), (p[1], p[0])) <= steps:
next_pos[(pos[1], pos[0])].append((p[1], p[0]))
return next_pos
def update_state(self, gameState, acting_agent):
self.gameState = gameState
self.score = self.agent.getScore(gameState)
self.time_left = gameState.data.timeleft
height = len(self.digital_state[0])
width = len(self.digital_state[0][0])
r_food = gameState.getRedFood()
b_food = gameState.getBlueFood()
r_capsule = gameState.getRedCapsules()
b_capsule = gameState.getBlueCapsules()
my_prev_food = np.copy(self.digital_state[1, :, :int(width / 2)])
my_prev_capsule = np.copy(self.digital_state[2, :, :int(width / 2)])
self.digital_state[1:, :, :] = 0
# update food
for i in range(height):
for j in range(width):
if self.red:
idx1 = j
idx2 = height - i - 1
else:
idx1 = width - j - 1
idx2 = i
if r_food[idx1][idx2] or b_food[idx1][idx2]:
self.digital_state[1][i][j] = 1
# if the current food layer is different from the previous one, update the previous position of the enemy
my_food_change = my_prev_food - self.digital_state[1, :, :int(width / 2)]
change_food = np.nonzero(my_food_change)
change_pos = [(b, height - a - 1) for a in change_food[0] for b in change_food[1]]
# update capsule
if self.red:
if len(r_capsule) > 0:
for cap in r_capsule:
self.digital_state[2][height - 1 - cap[1]][cap[0]] = 1
if len(b_capsule) > 0:
for cap in b_capsule:
self.digital_state[2][height - 1 - cap[1]][cap[0]] = 1
else:
if len(r_capsule) > 0:
for cap in r_capsule:
self.digital_state[2][cap[1]][width - 1 - cap[0]] = 1
if len(b_capsule) > 0:
for cap in b_capsule:
self.digital_state[2][cap[1]][width - 1 - cap[0]] = 1
my_capsule_change = my_prev_capsule - self.digital_state[2, :, :int(width / 2)]
change_capsule = np.nonzero(my_capsule_change)
change_pos += [(b, height - a - 1) for a in change_capsule[0] for b in change_capsule[1]]
# update player states
myPos = gameState.getAgentState(self.index).getPosition()
secondPos = gameState.getAgentState(self.index_second_agent).getPosition()
if not self.red:
myPos = (width - myPos[0] - 1, height - myPos[1] - 1)
secondPos = (width - secondPos[0] - 1, height - secondPos[1] - 1)
for idx in self.agent.getTeam(gameState) + self.agent.getOpponents(gameState):
enemy = False
if idx in self.agent.getOpponents(gameState):
enemy = True
i_state = gameState.getAgentState(idx)
pos = i_state.getPosition()
pacman = i_state.isPacman
food_carrying = i_state.numCarrying
scared_timer = i_state.scaredTimer
# direction = i_state.configuration.direction
original_idx = idx
if self.red:
idx += 1
else:
if idx in [0, 2]:
idx += 2
if enemy:
if pos is None and self.near_last_time(agent_idx=self.index, enemy_idx=idx, distance=2):
# if the enemy was right next to us the previous time step but suddenly disappears
# it is eaten and back to initial position
my_idx = self.corresponding_index[self.index]
if (self.last_player_state[my_idx] == "ghost" and self.last_player_state[idx] == "pacman") or \
self.last_player_state[idx] == "scared":
self.reinitialise_enemy_position(idx, myPos, secondPos)
else:
self.last_team_pos[self.index] = self.initial_team_pos[self.index]
self.dist_history[idx] = [self.agent.distancer.getDistance(self.last_enemy_pos[idx],
self.initial_team_pos[self.index])]
elif pos is None and self.near_last_time(agent_idx=self.index_second_agent, enemy_idx=idx, distance=2):
second_idx = self.corresponding_index[self.index_second_agent]
if (self.last_player_state[second_idx] == "ghost" and self.last_player_state[idx] == "pacman") or \
self.last_player_state[idx] == "scared":
self.reinitialise_enemy_position(idx, myPos, secondPos)
else:
self.last_team_pos[self.index_second_agent] = self.initial_team_pos[self.index_second_agent]
self.dist_history_second_agent[idx] = [
self.agent.distancer.getDistance(self.last_enemy_pos[idx],
self.initial_team_pos[self.index_second_agent])]
if pos is not None:
if not self.red:
pos = [width - 1 - pos[0], height - 1 - pos[1]]
self.dist_history[idx].append(self.agent.distancer.getDistance(myPos, pos))
# self.dist_history[idx] = [self.agent.distancer.getDistance(myPos, pos)]
self.last_enemy_pos[idx] = pos
else:
changed = False
if len(change_pos) == 1:
indices = list(self.last_enemy_pos.keys())
distance_to_food = [self.agent.distancer.getDistance(change_pos[0], p)
for p in self.last_enemy_pos.values()]
if distance_to_food[indices.index(idx)] == min(distance_to_food):
pos = change_pos[0]
changed = True
# self.dist_history[idx].append(self.agent.distancer.getDistance(myPos, pos))
self.dist_history[idx] = [self.agent.distancer.getDistance(myPos, pos)]
self.last_enemy_pos[idx] = pos
elif len(change_pos) == 2:
indices = list(self.last_enemy_pos.keys())
cost1 = self.agent.distancer.getDistance(self.last_enemy_pos[indices[0]], change_pos[0]) \
+ self.agent.distancer.getDistance(self.last_enemy_pos[indices[1]], change_pos[1])
cost2 = self.agent.distancer.getDistance(self.last_enemy_pos[indices[0]], change_pos[1]) \
+ self.agent.distancer.getDistance(self.last_enemy_pos[indices[1]], change_pos[0])
if cost1 < cost2:
corresponding_pos = {0: 0, 1: 1}
else:
corresponding_pos = {0: 1, 1: 0}
pos = change_pos[corresponding_pos[indices.index(idx)]]
changed = True
self.dist_history[idx].append(self.agent.distancer.getDistance(myPos, pos))
# self.dist_history[idx] = [self.agent.distancer.getDistance(myPos, pos)]
self.last_enemy_pos[idx] = pos
if not changed:
noisy_dist = np.clip(self.gameState.getAgentDistances()[original_idx], a_min=5, a_max=None)
if acting_agent == self.index:
pos = self.computeOpponentPosition(idx, pacman, noisy_dist, myPos)
else:
pos = self.computeOpponentPosition(idx, pacman, noisy_dist, myPos, 'second')
else:
if not self.red:
pos = [width - 1 - pos[0], height - 1 - pos[1]]
# if self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] == 0:
# self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] = idx
# else:
# self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] += idx + 1
if self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] == 0:
self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] = idx
else:
self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])] = add_players(
self.digital_state[3][height - 1 - int(pos[1])][int(pos[0])], idx)
# digital_state[4][height - int(pos[1])][int(pos[0])] = actions_idx[direction]
self.digital_state[4][height - 1 - int(pos[1])][int(pos[0])] += food_carrying if pacman else 0
self.digital_state[5][height - 1 - int(pos[1])][int(pos[0])] += scared_timer
if pacman:
self.last_player_state[idx] = "pacman"
elif scared_timer > 0:
self.last_player_state[idx] = "scared"
else:
self.last_player_state[idx] = "ghost"
self.last_team_pos[self.index] = myPos
self.last_team_pos[self.index_second_agent] = secondPos
return self.digital_state
def near_last_time(self, agent_idx, enemy_idx, distance):
return self.agent.distancer.getDistance(self.last_enemy_pos[enemy_idx],
self.last_team_pos[agent_idx]) <= distance
def reinitialise_enemy_position(self, enemy_idx, myPos, secondPos):
self.last_enemy_pos[enemy_idx] = self.initial_enemy_pos[enemy_idx]
self.dist_history[enemy_idx] = [self.agent.distancer.getDistance(self.last_enemy_pos[enemy_idx], myPos)]
self.dist_history_second_agent[enemy_idx] = [
self.agent.distancer.getDistance(self.last_enemy_pos[enemy_idx], secondPos)]
def update_last_enemy_positions(self, agentDistancs, enemy_pacman):
# this function is used for the second agent to update the last enemy positions
# when the enemies are too far to detect true positions
for idx in self.agent.getOpponents(self.gameState):
original_idx = idx
if self.red:
idx += 1
else:
if idx in [0, 2]:
idx += 2
noisy_dist = np.clip(agentDistancs[original_idx], a_min=5, a_max=None)
secondPos = self.gameState.getAgentState(self.index_second_agent).getPosition()
self.computeOpponentPosition(idx, enemy_pacman[original_idx], noisy_dist, secondPos, "second")
def computeOpponentPosition(self, enemy_idx, enemy_pacman, noisy_dist, agent_pos, agent="first"):
# use Kalman filter to correct the noisy distance
if agent == "first":
self.dist_history[enemy_idx].append(noisy_dist)
dist_history = self.dist_history[enemy_idx]
else:
self.dist_history_second_agent[enemy_idx].append(noisy_dist)
dist_history = self.dist_history_second_agent[enemy_idx]
corrected_dist = np.clip(kalman(dist_history, 0.01, 0.01), a_min=5, a_max=None)
# corrected_dist = noisy_dist
# sample around the last enemy state, find the one with closest distance to corrected_dist
if agent == "first":
possible_enemy_pos = self.next_pos1[
(int(self.last_enemy_pos[enemy_idx][0]), int(self.last_enemy_pos[enemy_idx][1]))]
else:
possible_enemy_pos = self.next_pos1[
(int(self.last_enemy_pos[enemy_idx][0]), int(self.last_enemy_pos[enemy_idx][1]))]
possible_distances = []
for p in possible_enemy_pos:
possible_distances.append(self.agent.distancer.getDistance(agent_pos, p))
best_enemy_pos = possible_enemy_pos[find_nearest(possible_distances, corrected_dist)]
if symmetric:
if (self.red and enemy_pacman) or (not self.red and enemy_pacman):
best_enemy_pos = (int(min(best_enemy_pos[0], len(self.digital_state[0][0]) / 2)), best_enemy_pos[1])
while self.digital_state[0][best_enemy_pos[1]][len(self.digital_state[0][0]) - 1 - best_enemy_pos[0]] == 1:
best_enemy_pos = (best_enemy_pos[0] - 1, best_enemy_pos[1])
if (self.red and not enemy_pacman) or (not self.red and not enemy_pacman):
best_enemy_pos = (int(max(best_enemy_pos[0], int(len(self.digital_state[0][0]) / 2 + 1))), best_enemy_pos[1])
while self.digital_state[0][best_enemy_pos[1]][len(self.digital_state[0][0]) - 1 - best_enemy_pos[0]] == 1:
best_enemy_pos = (best_enemy_pos[0] + 1, best_enemy_pos[1])
self.last_enemy_pos[enemy_idx] = best_enemy_pos
return self.last_enemy_pos[enemy_idx]
def get_state_info(self, reshape=False):
if reshape:
digital_state = self.reshape_state(self.digital_state)
else:
digital_state = self.digital_state
return digital_state, self.score, self.time_left
def get_dense_state_representation(self, agent_idx):
dense_state = np.zeros(shape=self.digital_state[0].shape)
dense_state += -self.digital_state[0] # wall: -1
dense_state += self.digital_state[1] # food: 1
dense_state += 2 * self.digital_state[2] # capsule: 2
player_loc = np.nonzero(self.digital_state[3])
player_loc = [(player_loc[0][i], player_loc[1][i]) for i in range(len(player_loc[0]))]
indices = self.indices.copy()
indices.discard(agent_idx)
agent_idx2 = indices.pop()
enemy_indices = self.agent.getOpponents(self.gameState)
info_idx_map = {int(self.corresponding_index[agent_idx]): 0, int(self.corresponding_index[agent_idx2]): 1, 2: 2,
4: 3}
info_original_idx_map = {agent_idx: 0, agent_idx2: 1, int(min(enemy_indices)): 2, int(max(enemy_indices)): 3}
player_pos = [0 for _ in range(4 * 2)]
food_carrying = [0, 0, 0, 0]
scared_timer = [0, 0, 0, 0]
for loc in player_loc:
player = int(self.digital_state[3][loc[0]][loc[1]])
if player < 10:
player_pos[info_idx_map[player] * 2: info_idx_map[player] * 2 + 2] = loc
food_carrying[info_idx_map[player]] = self.digital_state[4][loc[0]][loc[1]] / (self.initial_food / 2)
scared_timer[info_idx_map[player]] = self.digital_state[5][loc[0]][loc[1]] / self.total_time
else:
players = split_players(player)
for p in players:
player_pos[info_idx_map[p] * 2: info_idx_map[p] * 2 + 2] = loc
food_carrying[info_idx_map[p]] = self.digital_state[4][loc[0]][loc[1]] / 2 / (
self.initial_food / 2)
scared_timer[info_idx_map[p]] = self.digital_state[5][loc[0]][loc[1]] / 2 / self.total_time
objective_dense = np.zeros(shape=(dense_state.shape[0] * 2, dense_state.shape[1] * 2))
height = len(dense_state)
width = len(dense_state[0])
mid_height = int(len(objective_dense) / 2)
mid_width = int(len(objective_dense[0]) / 2)
objective_dense[mid_height - player_pos[0]:mid_height + height - player_pos[0], mid_width - player_pos[1]:mid_width + width - player_pos[1]] = dense_state
# pacman
pacman = [0 for _ in range(4)]
for p in range(4):
agent_state = self.gameState.getAgentState(p)
if agent_state.isPacman:
pacman[info_original_idx_map[p]] = 1
other_player_pos = [player_pos[i] - player_pos[i % 2] for i in range(2, 8)]
return list(objective_dense.flatten()) + other_player_pos + pacman + food_carrying + scared_timer + \
[self.score / (self.initial_food / 2) * 10, self.time_left / self.total_time]
def reshape_state(self, state):
# reshape the state into 3 * 32 * 32
reshaped = np.zeros(shape=(3, 32, 32))
for i in range(3):
reshaped[i, :16, :] = state[2 * i, :, :]
reshaped[i, 16:, :] = state[2 * i + 1, :, :]
return reshaped
def visualise_reshaped_state(self, reshaped):
digital_state = np.zeros(shape=(6, 16, 32))
for i in range(3):
digital_state[2 * i, :, :] = reshaped[i, :16, :]
digital_state[2 * i + 1, :, :] = reshaped[i, 16:, :]
self.visualise_digital_state(digital_state)
def visualise_digital_state(self, digital_state):
st = ""
food_carrying = {}
scared = {}
for i in range(len(digital_state[0])):
for j in range(len(digital_state[0][0])):
if digital_state[3][i][j] != 0:
st += str(int(digital_state[3][i][j]))
if digital_state[4][i][j] != 0:
food_carrying[int(digital_state[3][i][j])] = digital_state[4][i][j]
if digital_state[5][i][j] != 0:
scared[int(digital_state[3][i][j])] = digital_state[5][i][j]
elif digital_state[1][i][j] == 1:
st += "."
elif digital_state[0][i][j] == 1:
st += "%"
elif digital_state[2][i][j] == 1:
st += "o"
else:
st += " "
st += "\n"
st = st[:-1]
info = ""
if bool(food_carrying):
info += "Food carrying: "
for k in food_carrying.keys():
info += "%d - %d " % (k, food_carrying[k])
if bool(scared):
info += "Scared timer: "
for k in scared.keys():
info += "%d - %d " % (k, scared[k])
print(st)
print(info)
print()
def visualise_state(self):
print("Time left %s, score %s. " % (self.time_left, self.score))
self.visualise_digital_state(self.digital_state)
def visualise_one_layer(self, layer):
st = ""
for i in range(len(layer)):
for j in range(len(layer[0])):
if layer[i][j] == 0:
st += " "
else:
st += str(int(layer[i][j])) + " "
st += "\n"
print(st)
def check_eaten(self, old_sate, new_state, prev_agent_state, agent_state, enemy_idx):
enemy_state = old_sate.getAgentState(enemy_idx)
if self.agent.distancer.getDistance(prev_agent_state.getPosition(), agent_state.getPosition()) <= 1:
if enemy_state.getPosition() is not None and self.agent.distancer.getDistance(
prev_agent_state.getPosition(), enemy_state.getPosition()) <= 2:
new_enemy_state = new_state.getAgentState(enemy_idx)
if new_enemy_state.getPosition() is None:
return True
return False
def get_reward(self, new_state, old_state, mode="individual", agent_idx=None):
if mode == "individual":
assert agent_idx is not None
indices = [agent_idx]
else:
indices = self.agent.getTeam(old_state)
reward = 0
for idx in indices:
agent_reward = 0
prev_agent_state = old_state.getAgentState(idx)
agent_state = new_state.getAgentState(idx)
if (agent_state.numReturned - prev_agent_state.numReturned) <= 0:
agent_reward += ((agent_state.numCarrying - prev_agent_state.numCarrying) / self.initial_food)*0.8
agent_reward += ((agent_state.numReturned - prev_agent_state.numReturned) / self.initial_food)*1.2
powered = False
enemy_food = 0
for enemy_idx in self.agent.getOpponents(old_state):
enemy_state = new_state.getAgentState(enemy_idx)
enemy_food += enemy_state.numCarrying
prev_enemy_state = old_state.getAgentState(enemy_idx)
if (enemy_state.numReturned - prev_enemy_state.numReturned)<=0:
food_carrying_diff = enemy_state.numCarrying - prev_enemy_state.numCarrying
agent_reward -= (food_carrying_diff / self.initial_food) / len(indices)
if enemy_state.scaredTimer > 0 and prev_enemy_state.scaredTimer == 0:
powered = True
if self.check_eaten(old_state, new_state, prev_agent_state, agent_state, enemy_idx):
agent_reward += 0.2
agent_reward -= ((enemy_state.numReturned - prev_enemy_state.numReturned) / self.initial_food) / len(
indices)
if self.agent.distancer.getDistance(prev_agent_state.getPosition(), agent_state.getPosition()) > 1:
agent_reward -= 0.2
if powered:
agent_reward += 0.2 / len(indices) / 10
if agent_state.scaredTimer > 0 and prev_agent_state.scaredTimer == 0:
agent_reward -= 0.2 / len(indices) / 10
reward = agent_reward
return reward
def get_positional_reward(self, new_state, old_state, distancer, mode="individual", agent_idx=None,
annealing_start=None, current_timestep=None):
if mode == "individual":
assert agent_idx is not None
indices = [agent_idx]
else:
indices = self.agent.getTeam(old_state)
# current_timestep = get_timesteps()
reward = 0
for idx in indices:
agent_reward = 0
prev_agent_state = old_state.getAgentState(idx)
agent_state = new_state.getAgentState(idx)
prev_agent_state_pos = prev_agent_state.getPosition()
agent_state_pos = agent_state.getPosition()
if self.red:
food = old_state.getBlueFood()
else:
food = old_state.getRedFood()
min_dist = 1e4
agent_state_pos = (int(agent_state_pos[0]), int(agent_state_pos[1]))
prev_agent_state_pos = (int(prev_agent_state_pos[0]), int(prev_agent_state_pos[1]))
prev_min_dist = 1e4
for i in range(food.width):
for j in range(food.height):
if food[i][j]:
if distancer.getDistance(agent_state_pos, (i, j)) < min_dist:
min_dist = distancer.getDistance(agent_state_pos, (i, j))
if distancer.getDistance(prev_agent_state_pos, (i, j)) < prev_min_dist:
prev_min_dist = distancer.getDistance(prev_agent_state_pos, (i, j))
if agent_state.numCarrying == 0:
dist_diff = (prev_min_dist - min_dist)
go_to_food_reward = dist_diff * (37 * prev_min_dist ** 2 + 1975 * prev_min_dist + 242200) / 12650000
if annealing_start is None:
agent_reward += go_to_food_reward
else:
assert current_timestep is not None and current_timestep >= annealing_start
agent_reward += go_to_food_reward * 0.9997 ** (current_timestep - annealing_start)
if agent_state_pos == prev_agent_state_pos:
if annealing_start is None:
agent_reward -= 0.5
else:
assert current_timestep is not None and current_timestep >= annealing_start
agent_reward -= 0.5 * 0.99977 ** (current_timestep - annealing_start)
# prev_agent_distances = old_state.getAgentDistances()
# agent_distances = new_state.getAgentDistances()
# for enemy_idx in self.agent.getOpponents(old_state):
# enemy_state = new_state.getAgentState(enemy_idx)
#
# dist = agent_distances[enemy_idx]
# prev_dist = prev_agent_distances[enemy_idx]
# if agent_state.isPacman and not enemy_state.isPacman and enemy_state.scaredTimer == 0:
# agent_reward += dist - prev_dist
# if not agent_state.isPacman and enemy_state.isPacman and agent_state.scaredTimer == 0:
# agent_reward += prev_dist - dist
# if not agent_state.isPacman and enemy_state.isPacman and agent_state.scaredTimer > 0:
# if dist < agent_state.scaredTimer + 2:
# agent_reward += dist - prev_dist
reward += agent_reward
return reward
| 45.59562
| 162
| 0.554862
|
7bafd531f0ca42d80bcb7e40a1a93322746fc122
| 13,685
|
py
|
Python
|
train_refinedet.py
|
HaoIrving/RefineDet.PyTorch
|
ae453a7b701aeb0cb82b46f9c72f23ae4520ddcd
|
[
"MIT"
] | null | null | null |
train_refinedet.py
|
HaoIrving/RefineDet.PyTorch
|
ae453a7b701aeb0cb82b46f9c72f23ae4520ddcd
|
[
"MIT"
] | null | null | null |
train_refinedet.py
|
HaoIrving/RefineDet.PyTorch
|
ae453a7b701aeb0cb82b46f9c72f23ae4520ddcd
|
[
"MIT"
] | null | null | null |
from data import *
from utils.augmentations import SSDAugmentation
from layers.modules import RefineDetMultiBoxLoss
#from ssd import build_ssd
from models.refinedet import build_refinedet
# from models.refinedet_bn import build_refinedet
import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
from utils.logger import Logger
import math
import datetime
from mmdet.models import build_loss
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--dataset', default='COCO', choices=['VOC', 'COCO'],
type=str, help='VOC or COCO')
parser.add_argument('--input_size', default='512', choices=['320', '512'],
type=str, help='RefineDet320 or RefineDet512')
parser.add_argument('--dataset_root', default=VOC_ROOT,
help='Dataset root directory path')
parser.add_argument('--basenet', default='./weights/vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
# parser.add_argument('--start_iter', default=0, type=int,
# help='Resume training at this iter')
parser.add_argument('--num_workers', default=8, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--visdom', default=False, type=str2bool,
help='Use visdom for loss visualization')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models')
parser.add_argument('--resume_epoch', default=0,
type=int, help='resume iter for retraining')
parser.add_argument('-max','--max_epoch', default=300,
type=int, help='max epoch for retraining')
parser.add_argument('--ngpu', default=4, type=int, help='gpus')
args = parser.parse_args()
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
sys.stdout = Logger(os.path.join(args.save_folder, 'log.txt'))
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# os.system('CUDA_VISIBLE__DEVICES = 2')
# args.lr = 1e-5
# args.batch_size = 4
# args.ngpu = 2
# args.num_workers = 0
args.input_size = str(512)
args.max_epoch = 300
negpos_ratio = 3
initial_lr = args.lr
pretrained = None
# pretrained = args.basenet
def train():
if args.visdom:
import visdom
viz = visdom.Visdom()
print('Loading the dataset...')
if args.dataset == 'COCO':
if args.dataset_root == VOC_ROOT:
if not os.path.exists(COCOroot):
parser.error('Must specify dataset_root if specifying dataset')
print("WARNING: Using default COCO dataset_root because " +
"--dataset_root was not specified.")
args.dataset_root = COCOroot
cfg = coco_refinedet[args.input_size]
# dataset = COCODetection(root=args.dataset_root, image_set='train',
# transform=SSDAugmentation(cfg['min_dim'],
# MEANS))
train_sets = [('sarship', 'train')]
dataset = COCODetection(COCOroot, train_sets, SSDAugmentation(cfg['min_dim'],
MEANS))
elif args.dataset == 'VOC':
'''if args.dataset_root == COCO_ROOT:
parser.error('Must specify dataset if specifying dataset_root')'''
cfg = voc_refinedet[args.input_size]
dataset = VOCDetection(root=args.dataset_root,
transform=SSDAugmentation(cfg['min_dim'],
MEANS))
print('Training RefineDet on:', dataset.name)
print('Using the specified args:')
print(args)
device = torch.device('cuda:0' if args.cuda else 'cpu')
refinedet_net = build_refinedet('train', cfg['min_dim'], cfg['num_classes'])
# refinedet_net = build_s2rn('train', cfg['min_dim'], cfg['num_classes'])
net = refinedet_net
print(net)
if args.ngpu > 1 and args.cuda:
net = torch.nn.DataParallel(refinedet_net, device_ids=list(range(args.ngpu)))
cudnn.benchmark = True
net = net.to(device)
if args.resume:
print('Resuming training, loading {}...'.format(args.resume))
refinedet_net.load_weights(args.resume)
else:
from weights_init import kaiming_init, constant_init, normal_init
def weights_init_relu(m):
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
print('Initializing weights...')
refinedet_net.vgg.apply(weights_init_relu)
# vgg_weights = torch.load(args.basenet)
# print('Loading base network...')
# refinedet_net.vgg.load_state_dict(vgg_weights)
# initialize newly added layers' weights with xavier method
# refinedet_net.conv4_3_Norm.apply(weights_init_relu)
# refinedet_net.conv5_3_Norm.apply(weights_init_relu)
# refinedet_net.conv7_Norm.apply(weights_init_relu)
# refinedet_net.conv_extra_Norm.apply(weights_init_relu)
refinedet_net.extras.apply(weights_init_relu)
# refinedet_net.extras.apply(weights_init)
refinedet_net.arm_loc.apply(weights_init)
refinedet_net.arm_conf.apply(weights_init)
refinedet_net.odm_loc.apply(weights_init)
refinedet_net.odm_conf.apply(weights_init)
refinedet_net.tcb0.apply(weights_init)
refinedet_net.tcb1.apply(weights_init)
refinedet_net.tcb2.apply(weights_init)
# refinedet_net.init_weights(pretrained=pretrained)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
arm_criterion = RefineDetMultiBoxLoss( 2, 0.5, True, 0, True, negpos_ratio, 0.5,
False, args.cuda)
odm_criterion = RefineDetMultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, negpos_ratio, 0.5,
False, args.cuda, use_ARM=True)
net.train()
# loss counters
arm_loc_loss = 0
arm_conf_loss = 0
odm_loc_loss = 0
odm_conf_loss = 0
epoch = 0 + args.resume_epoch
epoch_size = math.ceil(len(dataset) / args.batch_size)
max_iter = args.max_epoch * epoch_size
stepvalues = (args.max_epoch * 2 // 3 * epoch_size, args.max_epoch * 8 // 9 * epoch_size, args.max_epoch * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
if args.visdom:
vis_title = 'RefineDet.PyTorch on ' + dataset.name
vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
iter_plot = create_vis_plot(viz, 'Iteration', 'Loss', vis_title, vis_legend)
epoch_plot = create_vis_plot(viz, 'Epoch', 'Loss', vis_title, vis_legend)
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
if args.visdom and iteration != 0:
update_vis_plot(viz, epoch, arm_loc_loss, arm_conf_loss, epoch_plot, None,
'append', epoch_size)
# reset epoch loss counters
arm_loc_loss = 0
arm_conf_loss = 0
odm_loc_loss = 0
odm_conf_loss = 0
# create batch iterator
batch_iterator = iter(data_loader)
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 ==0 and epoch > 200):
torch.save(net.state_dict(), args.save_folder+'RefineDet'+ args.input_size +'_'+args.dataset + '_epoches_'+
repr(epoch) + '.pth')
epoch += 1
t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
images = images.to(device)
targets = [ann.to(device) for ann in targets]
for an in targets:
for instance in an:
for cor in instance[:-1]:
if cor < 0 or cor > 1:
raise StopIteration
# forward
out = net(images)
# backprop
optimizer.zero_grad()
arm_loss_l, arm_loss_c = arm_criterion(out, targets)
odm_loss_l, odm_loss_c = odm_criterion(out, targets)
arm_loss = arm_loss_l + arm_loss_c
odm_loss = odm_loss_l + odm_loss_c
loss = arm_loss + odm_loss
loss.backward()
optimizer.step()
arm_loc_loss += arm_loss_l.item()
arm_conf_loss += arm_loss_c.item()
odm_loc_loss += odm_loss_l.item()
odm_conf_loss += odm_loss_c.item()
t1 = time.time()
batch_time = t1 - t0
eta = int(batch_time * (max_iter - iteration))
print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || ARM_L Loss: {:.4f} ARM_C Loss: {:.4f} ODM_L Loss: {:.4f} ODM_C Loss: {:.4f} loss: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'.\
format(epoch, args.max_epoch, (iteration % epoch_size) + 1, epoch_size, iteration + 1, max_iter, arm_loss_l.item(), arm_loss_c.item(), odm_loss_l.item(), odm_loss_c.item(), loss.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
# if iteration % 10 == 0:
# print('Batch time: %.4f sec. ||' % (batch_time) + 'Eta: {}'.format(str(datetime.timedelta(seconds=eta))))
# print('iter ' + repr(iteration) + ' || ARM_L Loss: %.4f ARM_C Loss: %.4f ODM_L Loss: %.4f ODM_C Loss: %.4f loss: %.4f ||' \
# % (arm_loss_l.item(), arm_loss_c.item(), odm_loss_l.item(), odm_loss_c.item(), loss.item()), end=' ')
if args.visdom:
update_vis_plot(viz, iteration, arm_loss_l.item(), arm_loss_c.item(),
iter_plot, epoch_plot, 'append')
torch.save(refinedet_net.state_dict(), args.save_folder + '/RefineDet{}_{}_final.pth'.format(args.input_size, args.dataset))
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
warmup_epoch = 5
if epoch <= warmup_epoch:
lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)
else:
lr = initial_lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def xavier(param):
init.xavier_uniform_(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
xavier(m.weight.data)
m.bias.data.zero_()
def create_vis_plot(viz, _xlabel, _ylabel, _title, _legend):
return viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1, 3)).cpu(),
opts=dict(
xlabel=_xlabel,
ylabel=_ylabel,
title=_title,
legend=_legend
)
)
def update_vis_plot(viz, iteration, loc, conf, window1, window2, update_type,
epoch_size=1):
viz.line(
X=torch.ones((1, 3)).cpu() * iteration,
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,
win=window1,
update=update_type
)
# initialize epoch plot on first iteration
if iteration == 0:
viz.line(
X=torch.zeros((1, 3)).cpu(),
Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),
win=window2,
update=True
)
if __name__ == '__main__':
train()
| 40.131965
| 252
| 0.612934
|
a01aa5afdf9beda1e93d25cbfa3280ed982f5c0d
| 4,063
|
py
|
Python
|
old/parameterisation.py
|
abyrne55/osvr-review
|
777884dcbcb2283247ffe256da51254aa3d6896b
|
[
"MIT"
] | null | null | null |
old/parameterisation.py
|
abyrne55/osvr-review
|
777884dcbcb2283247ffe256da51254aa3d6896b
|
[
"MIT"
] | null | null | null |
old/parameterisation.py
|
abyrne55/osvr-review
|
777884dcbcb2283247ffe256da51254aa3d6896b
|
[
"MIT"
] | null | null | null |
# Direct Port of constructParams.m
import numpy as np
def construct_admm_parameters(dataset, labelset, epsilon, bias, flag):
datacells = dataset.T
labelcells = labelset.T
N = datacells.size
T = np.zeros((N,1))
num_pairs_max = 0;
num_intensity = 0;
# Collect Statistics of the Dataset
for j in range(N):
D = T[j] = datacells[j].size
num_pairs_max += T[j]*(T[j]+1)/2
num_intensity += 2*labelcells[j].size
# initialize the components for OSVR problem
# pre-allocate storage for A and e for efficiency
# TODO: Bug? Seems to use last value assigned to D in for loop
num_pairs_max = int(num_pairs_max.flat[0])
#num_intensity = num_intensity.flat[0]
A = np.zeros((num_intensity+num_pairs_max, D+bias))
c = np.ones((num_intensity+num_pairs_max, 1))
weight = np.ones((num_intensity+num_pairs_max, 1))
idx_row_I = 0
idx_row_P = num_intensity
num_pairs = 0
for n in range(N):
data = datacells[n]
label = labelcells[n]
nframe = label.shape[0] #PN1i
# index of apex frame
peak = label[0].max(0)[1] #PN1i
# all the indices with peak intensity
idx = np.array([np.argmax(label[0])]).flatten()
# select apx to be the median one of all the peak frames
# TODO: Verify this line. Probably buggy
import ipdb; ipdb.set_trace()
apx = label[idx[int(max(0, np.ceil(len(idx) / 2.0))-1)]]
# based on apex frame, create the ordinal set
# number of ordinal pair
pairs = np.zeros((T[n]*(T[n]+1)/2,2))
dist = np.ones((T[n]*(T[n]+1)/2,1))
count = 0
#TODO PN1i conv probs needed
for i in range(apx, 2, -1):
pairs[count+1:count+i-1,1] = i
pairs[count+1:count+i-1,2] = np.concatenate(np.arange(i - 1,1,- 1)).T
dist[count+1:count+i-1] = np.concatenate(np.arange(1,i - 1)).T
count += i-1
if apx < T[n]:
for i in np.arange(apx,T[n]).reshape(-1):
pairs[count+1:count+T[n] - i,1]=i
pairs[count+1:count+T[n] - i,2]=np.concatenate(np.arange(i + 1,T[n])).T
dist[count+1:count+T[n] - i]=np.concatenate(np.arange(1,T[n] - i)).T
count=count + T[n] - i
pairs=pairs[1:count,:]
dist=dist[1:count]
num_pairs=num_pairs + count
# compute objective function value and gradient of objective function
dat=data[:,label[:,1]] # D*num_labels
tij=data[:,pairs[:,1]] - data[:,pairs[:,2]] # D*num_pairs
# assign values to parameters
# TODO: PN1i probs needed for the next 10 lines
A[idx_row_I + 1:idx_row_I + nframe,1:D]=dat.T
A[idx_row_I + 1 + num_intensity / 2:idx_row_I + nframe + num_intensity / 2,1:D]=- dat.T
A[idx_row_P + 1:idx_row_P + count,1:D]=- tij.T
c[idx_row_I + 1:idx_row_I + nframe]=np.dot(- epsilon[1],np.ones(nframe,1)) - label[:,2]
c[idx_row_I + 1 + num_intensity / 2:idx_row_I + nframe + num_intensity / 2]= np.dot(- epsilon[1],np.ones(nframe,1)) + label[:,2]
c[idx_row_P + 1:idx_row_P + count]=epsilon[2]
weight[idx_row_P + 1:idx_row_P + count]=1.0 / dist
idx_row_I=idx_row_I + nframe
idx_row_P=idx_row_P + count
# truncate to the actual number of rows
A=A[1:num_intensity + num_pairs,:]
if bias: # augment A for including bias term
A[1:num_intensity / 2,D + 1]=1
A[1 + num_intensity / 2:num_intensity,D + 1]=- 1
c=c[1:num_intensity + num_pairs,:]
weight=weight[1:num_intensity + num_pairs]
# unsupervisd flag to exclude all the rows associated with intensity lables
# TODO: Below probably real buggy
if flag:
A=A[num_intensity + 1:,:]
c=c[num_intensity + 1:,:]
weight=weight[num_intensity + 1:,:]
num_intensity=0
return A,c,D,num_intensity,num_pairs,weight
| 37.275229
| 136
| 0.578144
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.