hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd648f873c9e0b7a1a62eebe81b3f595a28bde5d
| 2,944
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/github.py
|
justinsousa/integrations-core
|
223e337492a04de517bc35ec85ddf921108fd8d2
|
[
"BSD-3-Clause"
] | 2
|
2019-05-28T03:48:29.000Z
|
2019-07-05T07:05:58.000Z
|
datadog_checks_dev/datadog_checks/dev/tooling/github.py
|
justinsousa/integrations-core
|
223e337492a04de517bc35ec85ddf921108fd8d2
|
[
"BSD-3-Clause"
] | 4
|
2019-07-03T02:53:19.000Z
|
2019-07-10T14:52:14.000Z
|
datadog_checks_dev/datadog_checks/dev/tooling/github.py
|
justinsousa/integrations-core
|
223e337492a04de517bc35ec85ddf921108fd8d2
|
[
"BSD-3-Clause"
] | 1
|
2020-01-15T16:58:51.000Z
|
2020-01-15T16:58:51.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
import requests
from .constants import CHANGELOG_LABEL_PREFIX
API_URL = 'https://api.github.com'
PR_ENDPOINT = API_URL + '/repos/DataDog/{}/pulls/{}'
DEFAULT_REPO = 'integrations-core'
PR_PATTERN = re.compile(r'\(#(\d+)\)') # match something like `(#1234)` and return `1234` in a group
def get_auth_info(config=None):
"""
See if a personal access token was passed
"""
gh_config = (config or {}).get('github', {})
user = gh_config.get('user') or os.getenv('DD_GITHUB_USER')
token = gh_config.get('token') or os.getenv('DD_GITHUB_TOKEN')
if user and token:
return user, token
def get_pr_labels(pr_payload):
labels = []
for label in pr_payload.get('labels') or []:
name = label.get('name')
if name:
labels.append(name)
return labels
def get_pr_milestone(pr_payload):
return (pr_payload.get('milestone') or {}).get('title', '')
def get_changelog_types(pr_payload):
"""
Fetch the labels from the PR and process the ones related to the changelog.
"""
changelog_labels = []
for name in get_pr_labels(pr_payload):
if name.startswith(CHANGELOG_LABEL_PREFIX):
# only add the name, e.g. for `changelog/Added` it's just `Added`
changelog_labels.append(name.split(CHANGELOG_LABEL_PREFIX)[1])
return changelog_labels
def get_pr(pr_num, config=None, repo=DEFAULT_REPO, raw=False):
"""
Get the payload for the given PR number. Let exceptions bubble up.
"""
response = requests.get(PR_ENDPOINT.format(repo, pr_num), auth=get_auth_info(config))
if raw:
return response
else:
response.raise_for_status()
return response.json()
def get_pr_from_hash(commit_hash, repo, config=None, raw=False):
response = requests.get(
'https://api.github.com/search/issues?q=sha:{}+repo:DataDog/{}'.format(commit_hash, repo),
auth=get_auth_info(config),
)
if raw:
return response
else:
response.raise_for_status()
return response.json()
def from_contributor(pr_payload):
"""
If the PR comes from a fork, we can safely assumed it's from an
external contributor.
"""
try:
return pr_payload.get('head', {}).get('repo', {}).get('fork') is True
except Exception:
return False
def parse_pr_number(log_line):
match = re.search(PR_PATTERN, log_line)
if match:
return match.group(1)
def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
"""
prs = []
for line in git_log_lines:
pr_number = parse_pr_number(line)
if pr_number:
prs.append(pr_number)
return prs
| 26.285714
| 101
| 0.650815
|
233ed42d7a88f5cc5491fcb61f22fc58088f76b6
| 809
|
py
|
Python
|
Mundos/Mundo 2/Aulas/Aula 15/ex069.py
|
NicolasdeLimaAlves/Curso-de-Python
|
4987a2c8075a76f676aa69bfd968fdf8d1c7fa52
|
[
"MIT"
] | null | null | null |
Mundos/Mundo 2/Aulas/Aula 15/ex069.py
|
NicolasdeLimaAlves/Curso-de-Python
|
4987a2c8075a76f676aa69bfd968fdf8d1c7fa52
|
[
"MIT"
] | null | null | null |
Mundos/Mundo 2/Aulas/Aula 15/ex069.py
|
NicolasdeLimaAlves/Curso-de-Python
|
4987a2c8075a76f676aa69bfd968fdf8d1c7fa52
|
[
"MIT"
] | null | null | null |
dezoito = homens = mulher = cont = 0
while True:
print('=' * 20)
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F] ')).upper().strip()
while sexo != 'F' and sexo != 'M':
sexo = str(input('Sexo [M/F] ')).upper().strip()
print('=' * 20)
continuar = str(input('Quer continuar? [S/N] ')).upper().strip()
while continuar != 'S' and continuar != 'N':
continuar = str(input('Quer continuar? [S/N] ')).upper().strip()
if idade > 18:
dezoito += 1
if sexo == 'M':
homens += 1
if sexo == 'F' and idade < 20:
mulher += 1
if continuar == 'N':
break
print('=' * 32)
print(f'{dezoito} pessoas tem mais de 18 anos')
print(f'{homens} homens foram cadastrados')
print(f'{mulher} mulheres tem menos de 20 anos')
print('=' * 32)
| 33.708333
| 72
| 0.546354
|
c36cb162b80a75b78ee916804eb325df2bfc0bde
| 1,535
|
py
|
Python
|
src/components/TesteLista.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/TesteLista.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/TesteLista.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import functools
jan = Tk()
jan.title("Bot Mercado Envios")
jan.geometry("800x300")
jan.configure(background="#2b2b2b")
jan.resizable(width=False, height=False)
jan.iconbitmap(default="C:\programas\Programaçâo\GitHub\Bot-ML\Bot-ML\images\LogoIcon.ico")
logo = PhotoImage(file="C:\programas\Programaçâo\GitHub\Bot-ML\Bot-ML\images\logo.png")
messagebox.showinfo("Hello World !", "Seja Bem-Vindo ")
LeftFrame = Frame(jan, width=220, height=500, bg="#FF8C00", relief="raise")
LeftFrame.pack(side=LEFT)
RightFrame = Frame(jan, width=575, height=500, bg="#4f4f4f", relief="raise")
RightFrame.pack(side=RIGHT)
Caixas = Label(RightFrame, text="Total De Caixas:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Caixas.place(x=5, y=10)
CaixasEntry = ttk.Entry(RightFrame, width=53)
CaixasEntry.place(x=230, y=25)
Lote = Label(RightFrame, text="Nº Do Lote:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Lote.place(x=5, y=75)
LoteEntry = ttk.Entry(RightFrame, width=53)
LoteEntry.place(x=230, y=90)
Valores = Label(RightFrame, text="Codigos Lidos: ", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Valores.place(x=5, y=140)
ValoresEntry = Text(RightFrame, width=40, height=5)
# ValoresEntry.config(state=state)
ValoresEntry.place(x=230, y=155)
def lote():
Lote = LoteEntry.get()
print(Lote)
ConfButton = ttk.Button(RightFrame, text="Adicionar Lista", width= 30, command=lote)
ConfButton.place(x=5, y=190)
jan.mainloop()
| 30.7
| 106
| 0.725081
|
e91183e5e27770e36ca47c2edda313ffafc57452
| 924
|
py
|
Python
|
codelab/urls.py
|
mrdulin/django-codelab
|
b7ff9746fb3be9dc5de90e81e1c604b3aeea692d
|
[
"MIT"
] | null | null | null |
codelab/urls.py
|
mrdulin/django-codelab
|
b7ff9746fb3be9dc5de90e81e1c604b3aeea692d
|
[
"MIT"
] | 2
|
2021-06-09T18:43:26.000Z
|
2021-06-10T19:25:38.000Z
|
codelab/urls.py
|
mrdulin/django-codelab
|
b7ff9746fb3be9dc5de90e81e1c604b3aeea692d
|
[
"MIT"
] | 1
|
2021-09-27T10:55:07.000Z
|
2021-09-27T10:55:07.000Z
|
"""codelab URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
# path('polls/', include('polls.urls')),
path('learning_logs/', include('learning_logs.urls')),
path('users/', include('users.urls', namespace='users')),
path('admin/', admin.site.urls),
]
| 36.96
| 77
| 0.695887
|
96145eecbc5783606145557ab731fa5f91eb42f9
| 429
|
py
|
Python
|
src/rl/core/recording_policy.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | 1
|
2019-10-06T11:45:52.000Z
|
2019-10-06T11:45:52.000Z
|
src/rl/core/recording_policy.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
src/rl/core/recording_policy.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
class RecordingPolicy:
def __init__(self, policy):
self._policy = policy
self._probabilities = []
def action(self, observation, deterministic):
action, probability_distribution = self._policy.step(observation, deterministic)
self._probabilities.append(probability_distribution.probabilities())
return action
def get_probabilities(self):
return self._probabilities
| 26.8125
| 88
| 0.706294
|
298b1dbf4c8e09777df2474ffe5966e6df40ad58
| 4,670
|
py
|
Python
|
utils.py
|
cometa/cometa-dronek
|
63b4da12200a9cbcadd08523062df85ca423ff41
|
[
"Apache-2.0"
] | 4
|
2016-11-16T18:06:21.000Z
|
2022-03-22T00:05:04.000Z
|
utils.py
|
cometa/cometa-dronekit
|
63b4da12200a9cbcadd08523062df85ca423ff41
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
cometa/cometa-dronekit
|
63b4da12200a9cbcadd08523062df85ca423ff41
|
[
"Apache-2.0"
] | null | null | null |
""" Utility functions Cometa agent for DroneKit.
Author: Marco Graziano
"""
__license__ = """
Copyright 2016 Visible Energy Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from dronekit import LocationGlobal, LocationGlobalRelative
def check_rpc_msg(req):
ret = False
id = None
k = req.keys()
# check presence of required id attribute
if 'id' in k:
id = req['id']
else:
return ret, id
# check object length
if (len(k) != 4):
return ret, id
# check presence of required attributes
if (not 'jsonrpc' in k) or (not 'method' in k) or (not 'params' in k):
return ret, id
# check for version
if req['jsonrpc'] != "2.0":
return ret, id
# valid request
return True,id
def isanumber(x):
try:
int(x)
except ValueError:
try:
float(x)
except ValueError:
return False
return True
"""
Functions to make it easy to convert between the different frames-of-reference. In particular these
make it easy to navigate in terms of "metres from the current position" when using commands that take
absolute positions in decimal degrees.
The methods are approximations only, and may be less accurate over longer distances, and when close
to the Earth's poles.
Specifically, it provides:
* get_location_meters - Get LocationGlobal (decimal degrees) at distance (m) North & East of a given LocationGlobal.
* get_distance_meters - Get the distance between two LocationGlobal objects in metres
* get_bearing - Get the bearing in degrees to a LocationGlobal
"""
def get_location_meters(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` meters from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation;
def get_distance_meters(aLocation1, aLocation2):
"""
Returns the ground distance in meters between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(aLocation1, aLocation2):
"""
Returns the bearing between the two LocationGlobal objects passed as parameters.
This method is an approximation, and may not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
off_x = aLocation2.lon - aLocation1.lon
off_y = aLocation2.lat - aLocation1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing;
| 37.36
| 118
| 0.721842
|
3bd5d3d72b33aebfee289e904c35da2b81d68355
| 6,930
|
py
|
Python
|
backend/t1d_regimen_29628/settings.py
|
crowdbotics-apps/t1d-regimen-29628
|
531beffb60980649a8a3a2a48c62ac37143c2f18
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/t1d_regimen_29628/settings.py
|
crowdbotics-apps/t1d-regimen-29628
|
531beffb60980649a8a3a2a48c62ac37143c2f18
|
[
"FTL",
"AML",
"RSA-MD"
] | 26
|
2021-08-15T19:19:10.000Z
|
2022-01-30T15:31:33.000Z
|
backend/t1d_regimen_29628/settings.py
|
crowdbotics-apps/t1d-regimen-29628
|
531beffb60980649a8a3a2a48c62ac37143c2f18
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for t1d_regimen_29628 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 't1d_regimen_29628.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 't1d_regimen_29628.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.615385
| 112
| 0.731025
|
28fbb87e51c6b6aa8255dc8105338490fdfa27ae
| 1,591
|
py
|
Python
|
utils/builder/register_builder/riscv/BootPriority.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | null | null | null |
utils/builder/register_builder/riscv/BootPriority.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | null | null | null |
utils/builder/register_builder/riscv/BootPriority.py
|
jeremybennett/force-riscv
|
a5222a3b3fa8a0b9464204056ddca148f16b7e49
|
[
"Apache-2.0"
] | 1
|
2020-06-17T09:37:45.000Z
|
2020-06-17T09:37:45.000Z
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#**************************************************************************************************
# BootPriority.py
#
# This file defines the BootPriority helper class.
#**************************************************************************************************
#**************************************************************************************************
# The boot priority class defines helper methods associated with boot priority.
#**************************************************************************************************
class BootPriority:
## Returns the appropriate boot priority based on the name and type of register provided along
# with if the register is write only
def getBootPriority(aName = None, aType = None, aWriteOnly = 0):
#if aType is this_particular_type:
#return a_particular_boot_priority
#if aName is this_particular_name:
#return a_particular_boot_priority
return 1
| 45.457143
| 99
| 0.553111
|
ee81e4de860e7f4394b6a08e73c8dd0408db2e08
| 4,914
|
py
|
Python
|
setup.py
|
ninjapapa/SMV2
|
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ninjapapa/SMV2
|
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
|
[
"Apache-2.0"
] | 34
|
2022-02-26T04:27:34.000Z
|
2022-03-29T23:05:47.000Z
|
setup.py
|
ninjapapa/SMV2
|
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Template from: https://github.com/kennethreitz/setup.py/blob/master/setup.py
# Local test -- inside of a clean virtualenv:
# pip install .
# pip install .[pyspark] -- to test with pyspark support
# Packing Background
#
# 1) the "name" section indicates the name of our package as referenced on pypi and is
# referred to in the documentation as the "root package"
# 2) the MANIFEST.in file indicates what files should be uploaded to PyPi during the upload
# upload step. They dictate the contents of the wheel and source distribution (i.e. .zip file)
# that forms the distribution. This behavior is enabled by "include_package_data=True"
# 3) the "packages" list indicates what packages -- i.e. directories -- should be created inside
# python's site-packages directory. Any entry with a "." indicates to create a subdirectory
# (e.g. 'smv.target' means create a target directory inside of the site-packages/smv directory)
# 3) the "packages_data" dict ties together the MANIFEST to the "packages" section.
# the keys of the "packages_data" dict indicate the location (i.e. directory or subdirectory)
# to copy (i.e. install) files into. An empty section ('') indicates the "root package" as specifed
# by the "name" section. Any name with a dot indicates a sub-directory (e.g. smv.target
# means copy into the the subdirectory smv/target).
# The values of the packages dict indicate which files should be copies from the MANIFEST into the
# directory specified by the "key"
# Instructions for adding new files to the distribution:
#
# 1. If the directory is to be within an existing sub-package, then the only thing you need
# to do is make sure the contents are refernced inside of the MANIFEST.in file, and ignore
# the rest of these instructions
# 2. If the addition is of a NEW directory in the release "root", then declare
# it as an smv sub-package in the "packages" section. Then add the contents you
# want to appear in the release to the MANIFEST.in file. Finally, add an entry
# to the package_dir section indicating which smv sub-package should receive the
# new files added to the MANIFEST.in
import io
import os
import sys
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
def read_file(path_from_root):
with io.open(os.path.join(here, path_from_root), encoding='utf-8') as f:
return f.read().strip()
# Package meta-data.
NAME = 'smv'
DESCRIPTION = 'A modular framework for creating applications in Apache Spark'
URL = 'https://github.com/TresAmigosSD/SMV'
EMAIL = 'bzhangusc@live.com'
AUTHOR = 'Bo Zhang, Ali Tajeldin, Kai Chen, Lane Barlow, Guangning Yu'
REQUIRES_PYTHON = '>=2.7'
VERSION = read_file('.smv_version')
# Sounds like wheel or twine don't like our README
# README_CONTENTS = read_file('README.md')
# What packages are required for this module to be executed?
requirements_file_path = os.path.join("tools", "requirements.txt")
requirements_file_as_list = read_file(requirements_file_path).split('\n')
# What packages are optional?
# this is that powers the pip install smv[pyspark] syntax
EXTRAS = {
'pyspark': ['pyspark'],
}
# Where the magic happens:
setuptools.setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
# long_description=README_CONTENTS,
# long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
tests_require=['sphinx', 'tox'],
# Need to call find_packages so that newly introduced
# sub-packages will be included
packages=setuptools.find_packages(
"src/main/python",
exclude=['test_support', 'scripts']
) + [
'smv.target',
'smv.docker',
'smv.docs',
'smv.tools',
'smv.src',
],
# https://docs.python.org/2/distutils/setupscript.html#listing-whole-packages
package_dir={
'':'src/main/python',
'smv.target': 'target/scala-2.11',
'smv.docker': 'docker',
'smv.docs': 'docs',
'smv.tools': 'tools',
'smv.src': 'src',
},
include_package_data=True,
scripts=[
'tools/smv-shell',
'tools/smv-server',
'tools/smv-init',
'tools/smv-jupyter',
'tools/smv-pytest',
'tools/spark-install',
'tools/_env.sh',
],
install_requires=requirements_file_as_list,
extras_require=EXTRAS,
license='Apache License, Version 2.0',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 39
| 102
| 0.691494
|
4e6782fee2b4ab668818ec5a146cb452f4d16ab8
| 32
|
py
|
Python
|
prototype/test/pythonvm_book/test_var.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
prototype/test/pythonvm_book/test_var.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
prototype/test/pythonvm_book/test_var.py
|
zoloypzuo/ZeloPy
|
43d9242a509737fe1bb66deba73aa9e749b53c62
|
[
"MIT"
] | null | null | null |
a = 1
b = a + 1
print a
print b
| 6.4
| 9
| 0.53125
|
a410e0c7908a60d6d35ee95578adbd994db73052
| 1,059
|
py
|
Python
|
tensorflow_datasets/image/cassava_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | 2
|
2020-06-09T10:44:36.000Z
|
2020-06-09T10:44:46.000Z
|
tensorflow_datasets/image/cassava_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/image/cassava_test.py
|
Ir1d/datasets
|
2a04ce5208e53bcf9b5acacf690bb7446285176e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for cassava leaf dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.image import cassava
class CassavaTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = cassava.Cassava
SPLITS = {
"train": 5,
"test": 5,
"validation": 5,
}
if __name__ == "__main__":
testing.test_main()
| 28.621622
| 74
| 0.75543
|
42159a9181a336e965306fbe80d3cb84bb3829c2
| 624
|
py
|
Python
|
cerberus/__init__.py
|
peterdemin/cerberus
|
895d8ba16c48d7f6d0f95229f8443b4793324ca0
|
[
"0BSD"
] | null | null | null |
cerberus/__init__.py
|
peterdemin/cerberus
|
895d8ba16c48d7f6d0f95229f8443b4793324ca0
|
[
"0BSD"
] | null | null | null |
cerberus/__init__.py
|
peterdemin/cerberus
|
895d8ba16c48d7f6d0f95229f8443b4793324ca0
|
[
"0BSD"
] | null | null | null |
"""
Extensible validation for Python dictionaries.
:copyright: 2012-2016 by Nicola Iarocci.
:license: ISC, see LICENSE for more details.
Full documentation is available at http://python-cerberus.org/
"""
from __future__ import absolute_import
from cerberus.validator import Validator, DocumentError
from cerberus.schema import (rules_set_registry, schema_registry, Registry,
SchemaError)
__version__ = "1.1"
__all__ = [
DocumentError.__name__,
Registry.__name__,
SchemaError.__name__,
Validator.__name__,
'schema_registry',
'rules_set_registry'
]
| 22.285714
| 75
| 0.711538
|
9b88bf47000e0cbaf15c3a966db8f252e88735cf
| 5,202
|
py
|
Python
|
configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py
|
rahulmangalampalli/UniverseNet
|
49622e0e8a9672829ebb57979fbae89c36f15c6c
|
[
"Apache-2.0"
] | null | null | null |
configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py
|
rahulmangalampalli/UniverseNet
|
49622e0e8a9672829ebb57979fbae89c36f15c6c
|
[
"Apache-2.0"
] | null | null | null |
configs/swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py
|
rahulmangalampalli/UniverseNet
|
49622e0e8a9672829ebb57979fbae89c36f15c6c
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/cascade_mask_rcnn_swin_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.2,
patch_norm=True,
use_checkpoint=False),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
fp16 = dict(loss_scale='dynamic')
| 36.893617
| 78
| 0.502499
|
dba8f04dfbdd5d15a40c5f9de42e68bc2676ac2d
| 3,447
|
py
|
Python
|
carbonserver/tests/api/routers/test_users.py
|
nikhil153/codecarbon
|
b3cc0cc29abd4b82e39699695d535351e86f1ec0
|
[
"MIT"
] | 346
|
2020-11-11T02:27:03.000Z
|
2022-03-30T12:43:21.000Z
|
carbonserver/tests/api/routers/test_users.py
|
nikhil153/codecarbon
|
b3cc0cc29abd4b82e39699695d535351e86f1ec0
|
[
"MIT"
] | 163
|
2020-11-10T18:57:38.000Z
|
2022-03-30T14:44:53.000Z
|
carbonserver/tests/api/routers/test_users.py
|
nikhil153/codecarbon
|
b3cc0cc29abd4b82e39699695d535351e86f1ec0
|
[
"MIT"
] | 57
|
2020-11-11T02:26:15.000Z
|
2022-03-30T14:36:39.000Z
|
from unittest import mock
import pytest
from container import ServerContainer
from fastapi import FastAPI, status
from fastapi.testclient import TestClient
from carbonserver.api.infra.repositories.repository_users import SqlAlchemyRepository
from carbonserver.api.routers import users
from carbonserver.api.schemas import User
API_KEY = "U5W0EUP9y6bBENOnZWJS0g"
USER_ID_1 = "f52fe339-164d-4c2b-a8c0-f562dfce066d"
USER_ID_2 = "e52fe339-164d-4c2b-a8c0-f562dfce066d"
USER_1 = {
"id": USER_ID_1,
"name": "Gontran Bonheur",
"email": "xyz@email.com",
"api_key": API_KEY,
"organizations": [],
"teams": [],
"is_active": True,
}
USER_2 = {
"id": USER_ID_2,
"name": "Jonnhy Monnay",
"email": "1234+1@email.fr",
"api_key": API_KEY,
"organizations": [],
"teams": [],
"is_active": True,
}
USER_TO_CREATE = {
"name": "Gontran Bonheur",
"email": "xyz@email.com",
"password": "pwd",
}
USER_WITH_BAD_EMAIL = {
"name": "Gontran Bonheur",
"email": "xyz",
"password": "pwd",
}
@pytest.fixture
def custom_test_server():
container = ServerContainer()
container.wire(modules=[users])
app = FastAPI()
app.container = container
app.include_router(users.router)
yield app
@pytest.fixture
def client(custom_test_server):
yield TestClient(custom_test_server)
def test_create_user(client, custom_test_server):
repository_mock = mock.Mock(spec=SqlAlchemyRepository)
expected_user = USER_1
repository_mock.create_user.return_value = User(**expected_user)
with custom_test_server.container.user_repository.override(repository_mock):
response = client.post("/user", json=USER_TO_CREATE)
actual_user = response.json()
assert response.status_code == status.HTTP_201_CREATED
assert actual_user == expected_user
def test_create_user_with_bad_email_fails_at_http_layer(client):
response = client.post("/user", json=USER_WITH_BAD_EMAIL)
actual_response = response.json()
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert actual_response["detail"][0]["type"] == "value_error.email"
def test_list_users_list_all_existing_users_with_200(client, custom_test_server):
repository_mock = mock.Mock(spec=SqlAlchemyRepository)
expected_user = USER_1
expected_user_2 = USER_2
expected_user_list = [expected_user, expected_user_2]
repository_mock.list_users.return_value = [
User(**expected_user),
User(**expected_user_2),
]
with custom_test_server.container.user_repository.override(repository_mock):
response = client.get("/users")
actual_user_list = response.json()
assert response.status_code == status.HTTP_200_OK
assert actual_user_list == expected_user_list
def test_get_user_by_id_returns_correct_user_with_correct_id(
client, custom_test_server
):
repository_mock = mock.Mock(spec=SqlAlchemyRepository)
expected_user = USER_1
repository_mock.get_user_by_id.return_value = User(**expected_user)
container_mock = mock.Mock(spec=ServerContainer)
container_mock.db.return_value = True
with custom_test_server.container.user_repository.override(repository_mock):
response = client.get("/user/get_user_by_id/", params={"user_id": USER_ID_1})
actual_user = response.json()
assert response.status_code == status.HTTP_200_OK
assert actual_user == expected_user
| 28.966387
| 85
| 0.731651
|
d4b45b772bdc865fc91f9d58a59bb76869bb2a20
| 1,589
|
py
|
Python
|
tests/test_python_claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | 7
|
2019-05-14T07:57:53.000Z
|
2021-06-07T08:00:04.000Z
|
tests/test_python_claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | 1
|
2021-02-21T13:10:41.000Z
|
2021-02-21T13:10:41.000Z
|
tests/test_python_claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for `python_claml` module.
"""
import os
import time
import pytest
from python_claml import claml
from python_claml.claml_types import ClaML
class TestPythonClaml(object):
@classmethod
def setup_class(cls):
pass
def test_small_file(self):
start = time.perf_counter()
contents = open('resources/test.xml', 'r').read()
mid = time.perf_counter()
classification: ClaML = claml.CreateFromDocument(contents)
end = time.perf_counter()
with open('test.log', 'a') as log_file:
log_file.write('Test 1 took {} s, reading: {}, parsing: {}\n'.format(
end - start,
mid - start,
end - mid
))
assert classification is not None
large_test_file = 'resources/icd10gm2019syst_claml_20180921.xml'
@pytest.mark.skipif(not os.path.isfile(large_test_file), reason="large test file not available")
def test_large_file(self):
start = time.perf_counter()
contents = open(TestPythonClaml.large_test_file, 'r').read()
mid = time.perf_counter()
classification = claml.CreateFromDocument(contents)
end = time.perf_counter()
with open('test.log', 'a') as log_file:
log_file.write('Test 2 took {} s, reading: {}, parsing: {}\n'.format(
end - start,
mid - start,
end - mid
))
assert classification is not None
@classmethod
def teardown_class(cls):
pass
| 28.375
| 100
| 0.599119
|
48013279547f576b5b3e116cb96874164aa5a7d9
| 50
|
py
|
Python
|
topic/tests.py
|
reBiocoder/my_hubu
|
19dbfc12d9e5ee509b3cc70826eafa6a5014f21a
|
[
"MIT"
] | 6
|
2020-05-02T11:08:39.000Z
|
2021-07-20T02:55:47.000Z
|
topic/tests.py
|
reBiocoder/my_hubu
|
19dbfc12d9e5ee509b3cc70826eafa6a5014f21a
|
[
"MIT"
] | 8
|
2020-06-06T01:45:02.000Z
|
2022-03-12T00:24:54.000Z
|
topic/tests.py
|
reBiocoder/my_hubu
|
19dbfc12d9e5ee509b3cc70826eafa6a5014f21a
|
[
"MIT"
] | 3
|
2020-05-04T00:36:46.000Z
|
2021-02-15T09:49:44.000Z
|
import datetime
print(datetime.datetime.now())
| 16.666667
| 30
| 0.76
|
89657b38b6b80349e44070a879f64149a26c0931
| 59
|
py
|
Python
|
ioos_qc/__init__.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/__init__.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/__init__.py
|
glos/ioos_qc
|
17e69ad582275be7ad0f5a2af40c11d810b344e8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
__version__ = "2.0.0"
| 14.75
| 21
| 0.661017
|
177b3a6ad417be25d5ae7917714c90e30a7e2dde
| 5,012
|
py
|
Python
|
runners/mlcube_docker/mlcube_docker/docker_run.py
|
fridex/mlcube
|
5e11557d39066b3fc0b90eee360580ad74c925c7
|
[
"Apache-2.0"
] | null | null | null |
runners/mlcube_docker/mlcube_docker/docker_run.py
|
fridex/mlcube
|
5e11557d39066b3fc0b90eee360580ad74c925c7
|
[
"Apache-2.0"
] | null | null | null |
runners/mlcube_docker/mlcube_docker/docker_run.py
|
fridex/mlcube
|
5e11557d39066b3fc0b90eee360580ad74c925c7
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import typing
from mlcube.common import mlcube_metadata
logger = logging.getLogger(__name__)
class DockerRun(object):
def __init__(self, mlcube: mlcube_metadata.MLCube):
"""Docker Runner.
Args:
mlcube (mlcube_metadata.MLCube): MLCube specification including platform configuration for Docker.
"""
self.mlcube: mlcube_metadata.MLCube = mlcube
@staticmethod
def get_env_variables() -> dict:
env_vars = {}
for proxy_var in ('http_proxy', 'https_proxy'):
if os.environ.get(proxy_var, None) is not None:
env_vars[proxy_var] = os.environ[proxy_var]
return env_vars
def image_exists(self, image_name: str) -> bool:
"""Check if docker image exists.
Args:
image_name (str): Name of a docker image.
Returns:
True if image exists, else false.
"""
return self._run_or_die(f"docker inspect --type=image {image_name} > /dev/null 2>&1", die_on_error=False) == 0
def configure(self):
"""Build Docker Image on a current host."""
image_name: str = self.mlcube.platform.container.image
# According to MLCube specs (?), build directory is {mlcube.root}/build that contains all files to build MLCube.
# Dockerfiles are built taking into account that {mlcube.root}/build is the context (build) directory.
build_path: str = self.mlcube.build_path
docker_file: str = os.path.join(build_path, 'Dockerfile')
if not os.path.exists(docker_file):
cmd: str = f"docker pull {image_name}"
else:
env_args = ' '.join([f"--build-arg {var}={name}" for var, name in DockerRun.get_env_variables().items()])
cmd: str = f"docker build {env_args} -t {image_name} -f {docker_file} {build_path}"
logger.info(cmd)
self._run_or_die(cmd)
def run(self):
"""Run a cube."""
image_name: str = self.mlcube.platform.container.image
if not self.image_exists(image_name):
logger.warning("Docker image (%s) does not exist. Running 'configure' phase.", image_name)
self.configure()
# The 'mounts' dictionary maps host path to container path
mounts, args = self._generate_mounts_and_args()
print(f"mounts={mounts}, args={args}")
volumes_str = ' '.join(['--volume {}:{}'.format(t[0], t[1]) for t in mounts.items()])
runtime: str = self.mlcube.platform.container.runtime
runtime_arg = "--runtime=" + runtime if runtime is not None else ""
env_args = ' '.join([f"-e {var}={name}" for var, name in DockerRun.get_env_variables().items()])
# Let's assume singularity containers provide entry point in the right way.
args = ' '.join(args)
cmd = f"docker run --rm {runtime_arg} --net=host --privileged=true {volumes_str} {env_args} {image_name} {args}"
logger.info(cmd)
self._run_or_die(cmd)
def _generate_mounts_and_args(self) -> typing.Tuple[dict, list]:
mounts, args = {}, [self.mlcube.invoke.task_name]
def _create(binding_: dict, input_specs_: dict):
# name: parameter name, path: parameter value
for name, path in binding_.items():
path = path.replace('$WORKSPACE', self.mlcube.workspace_path)
path_type = input_specs_[name]
if path_type == 'directory':
os.makedirs(path, exist_ok=True)
mounts[path] = mounts.get(
path,
'/mlcube_io{}/{}'.format(len(mounts), os.path.basename(path))
)
args.append('--{}={}'.format(name, mounts[path]))
elif path_type == 'file':
file_path, file_name = os.path.split(path)
os.makedirs(file_path, exist_ok=True)
mounts[file_path] = mounts.get(
file_path,
'/mlcube_io{}/{}'.format(len(mounts), file_path)
)
args.append('--{}={}'.format(name, mounts[file_path] + '/' + file_name))
else:
raise RuntimeError(f"Invalid path type: '{path_type}'")
_create(self.mlcube.invoke.input_binding, self.mlcube.task.inputs)
_create(self.mlcube.invoke.output_binding, self.mlcube.task.outputs)
return mounts, args
def _run_or_die(self, cmd: str, die_on_error: bool = True) -> int:
"""Execute shell command.
Args:
cmd(str): Command to execute.
die_on_error (bool): If true and shell returns non-zero exit status, raise RuntimeError.
Returns:
Exit code.
"""
print(cmd)
return_code: int = os.system(cmd)
if return_code != 0 and die_on_error:
raise RuntimeError('Command failed: {}'.format(cmd))
return return_code
| 42.117647
| 120
| 0.591181
|
3abdd6d1b07fa4cb7e009702de5ed680b15a5cdf
| 1,174
|
py
|
Python
|
solardatatools/plotting.py
|
tadatoshi/solar-data-tools
|
51b3e1ac2d1daefef88b8f110c42db42da3ba952
|
[
"BSD-2-Clause"
] | null | null | null |
solardatatools/plotting.py
|
tadatoshi/solar-data-tools
|
51b3e1ac2d1daefef88b8f110c42db42da3ba952
|
[
"BSD-2-Clause"
] | null | null | null |
solardatatools/plotting.py
|
tadatoshi/solar-data-tools
|
51b3e1ac2d1daefef88b8f110c42db42da3ba952
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
''' Plotting Module
'''
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def plot_2d(D, figsize=(12, 6), units='kW', clear_days=None):
if D is not None:
with sns.axes_style("white"):
fig, ax = plt.subplots(nrows=1, figsize=figsize)
foo = ax.imshow(D, cmap='hot', interpolation='none', aspect='auto', vmin=0)
ax.set_title('Measured power')
plt.colorbar(foo, ax=ax, label=units)
ax.set_xlabel('Day number')
ax.set_yticks([])
ax.set_ylabel('(sunset) Time of day (sunrise)')
if clear_days is not None:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
use_day = clear_days
days = np.arange(D.shape[1])
y1 = np.ones_like(days[use_day]) * D.shape[0] * .99
ax.scatter(days[use_day], y1, marker='|', color='yellow', s=2)
ax.scatter(days[use_day], .995*y1, marker='|', color='yellow', s=2)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
return fig
else:
return
| 33.542857
| 87
| 0.524702
|
50efc70f361d69fb5b9bf5423a84622027145947
| 372
|
py
|
Python
|
src/hrl/__init__.py
|
skugele/hrl-algorithms
|
5a444132b9a3e4ccfbdc15a10a9e1311ae0bf228
|
[
"MIT"
] | null | null | null |
src/hrl/__init__.py
|
skugele/hrl-algorithms
|
5a444132b9a3e4ccfbdc15a10a9e1311ae0bf228
|
[
"MIT"
] | null | null | null |
src/hrl/__init__.py
|
skugele/hrl-algorithms
|
5a444132b9a3e4ccfbdc15a10a9e1311ae0bf228
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pkg_resources import get_distribution, DistributionNotFound
try:
# Change here if project is renamed and does not equal the package name
dist_name = 'hrl-algorithms/'
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
| 31
| 75
| 0.763441
|
a0fb5c17c53881cc5c1fdd4a3fd5dc49b72e4048
| 193
|
py
|
Python
|
yuno/objects/__init__.py
|
Animenosekai/yuno
|
bcc48f7ceda022e26392e653c03606d3f5f66806
|
[
"MIT"
] | 1
|
2022-02-25T13:39:18.000Z
|
2022-02-25T13:39:18.000Z
|
yuno/objects/__init__.py
|
Animenosekai/yuno
|
bcc48f7ceda022e26392e653c03606d3f5f66806
|
[
"MIT"
] | null | null | null |
yuno/objects/__init__.py
|
Animenosekai/yuno
|
bcc48f7ceda022e26392e653c03606d3f5f66806
|
[
"MIT"
] | null | null | null |
"""
Objects
Defining objects imitating the behavior of Python's built-in objects but linked to the database.
"""
from yuno.objects.dict import YunoDict
from yuno.objects.list import YunoList
| 21.444444
| 96
| 0.792746
|
89d658331720148606f0b15c150f54b1cbd68fe7
| 1,885
|
py
|
Python
|
datawarehouse/edw_migrations/versions/101cfb715b9a_dimrequesttypes.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | null | null | null |
datawarehouse/edw_migrations/versions/101cfb715b9a_dimrequesttypes.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | 3
|
2022-01-05T18:01:41.000Z
|
2022-02-08T21:51:32.000Z
|
datawarehouse/edw_migrations/versions/101cfb715b9a_dimrequesttypes.py
|
bcgov/foi-reporting
|
25856ce87b668df964ddd16ac7459fae4aa6a7c5
|
[
"Apache-2.0"
] | null | null | null |
"""dimRequestTypes
Revision ID: 101cfb715b9a
Revises: 2e97ccf78659
Create Date: 2022-01-26 23:07:13.900897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '101cfb715b9a'
down_revision = '2e97ccf78659'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('dimRequestTypes',
sa.Column('requesttypeid', sa.Integer(), nullable=False),
sa.Column('requesttypename', sa.VARCHAR(length=3000)),
sa.Column('requesttypecode', sa.VARCHAR(length=4)),
sa.Column('createddate', sa.DateTime()),
sa.Column('modifieddate', sa.DateTime()),
sa.Column('priority', sa.VARCHAR(length=3000)),
sa.Column('casetype', sa.CHAR(length=1)),
sa.Column('acttype', sa.CHAR(length=1)),
sa.Column('acttypename', sa.VARCHAR(length=50)),
sa.Column('cdelete', sa.CHAR(length=1)),
sa.Column('siretentionpolicyid', sa.Integer()),
sa.Column('cIsconsultation', sa.CHAR(length=1)),
sa.Column('cIslitigation', sa.CHAR(length=1)),
sa.Column('timultitracktype', sa.Integer()),
sa.Column('tiprocessingdays', sa.Integer()),
sa.Column('tisimpleprocessingdays', sa.Integer()),
sa.Column('ticomplexprocessingdays', sa.Integer()),
sa.Column('tiexpediteprocessingdays', sa.Integer()),
sa.Column('csimpleprocessingdays', sa.CHAR(length=1)),
sa.Column('ccomplexprocessingdays', sa.CHAR(length=1)),
sa.Column('cexpediteprocessingdays', sa.CHAR(length=1)),
sa.Column('cshowinhomepage', sa.CHAR(length=1)),
sa.Column('cactsubtype', sa.CHAR(length=1)),
sa.Column('tiapplicablesection', sa.Integer()),
sa.Column('cactive', sa.CHAR(length=1)),
sa.Column('cconsultationtype', sa.CHAR(length=1)),
sa.Column('cenableapprovalpackage', sa.CHAR(length=1)),
sa.Column('iduplicatesearchoptions', sa.Integer()),
sa.PrimaryKeyConstraint('requesttypeid')
)
def downgrade():
op.drop_table('dimRequestTypes')
| 34.272727
| 59
| 0.720955
|
1b34338a697d3b7abe747c0737bb60ed45835918
| 69
|
py
|
Python
|
variables.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
variables.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
variables.py
|
HrishikV/Ineuron_bankbot_internship
|
eb044d0047e2ad3cb6c9e69476e23bab7f6074be
|
[
"MIT"
] | null | null | null |
i = 0
intent = ""
output_type = -2
cust_id = None
check = False
c = 0
| 11.5
| 16
| 0.623188
|
a9f2ede825260d8cca809432410dbc8c47f836f7
| 634
|
py
|
Python
|
projectmanager/migrations/0004_auto_20200924_1549.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | 1
|
2020-09-19T21:56:40.000Z
|
2020-09-19T21:56:40.000Z
|
projectmanager/migrations/0004_auto_20200924_1549.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | null | null | null |
projectmanager/migrations/0004_auto_20200924_1549.py
|
leonolan2020/phoenix
|
b5956a7003e548f01255cbd5d0d76cfd0ac77a81
|
[
"MIT"
] | 5
|
2020-09-18T18:53:03.000Z
|
2020-10-21T14:42:00.000Z
|
# Generated by Django 3.1 on 2020-09-24 12:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projectmanager', '0003_auto_20200924_1537'),
]
operations = [
migrations.AlterField(
model_name='project',
name='status',
field=models.CharField(choices=[('پیش فرض', 'پیش فرض'), ('در حال انجام', 'در حال انجام'), ('آماده سازی اولیه', 'آماده سازی اولیه'), ('انجام شده', 'انجام شده'), ('تحویل شده', 'تحویل شده'), ('درحال آنالیز', 'درحال آنالیز')], default='پیش فرض', max_length=50, verbose_name='status'),
),
]
| 33.368421
| 292
| 0.615142
|
56a4fcf895d52c24d1b1d984d9354c0efb86dda1
| 449
|
py
|
Python
|
practicas/implementaciones/02/practice2.py
|
memoherreraacosta/robot_vision
|
fa73ab234bcea0b0a7b2411364dbb4cca27c3d56
|
[
"MIT"
] | null | null | null |
practicas/implementaciones/02/practice2.py
|
memoherreraacosta/robot_vision
|
fa73ab234bcea0b0a7b2411364dbb4cca27c3d56
|
[
"MIT"
] | null | null | null |
practicas/implementaciones/02/practice2.py
|
memoherreraacosta/robot_vision
|
fa73ab234bcea0b0a7b2411364dbb4cca27c3d56
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('elephants.png', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('ElephantsDream.mp4'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
| 32.071429
| 63
| 0.719376
|
72da0ab2e1f4688cc4a22343a790ff7b7445e24f
| 4,454
|
py
|
Python
|
src/sage/quadratic_forms/quadratic_form__local_density_interfaces.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/quadratic_forms/quadratic_form__local_density_interfaces.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/quadratic_forms/quadratic_form__local_density_interfaces.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
"""
Local Density Interfaces
"""
## // This is needed in the filter for primitivity...
## #include "../max-min.h"
from sage.rings.arith import valuation
from sage.rings.rational_field import QQ
def local_density(self, p, m):
"""
Gives the local density -- should be called by the user. =)
NOTE: This screens for imprimitive forms, and puts the quadratic
form in local normal form, which is a *requirement* of the
routines performing the computations!
INPUT:
`p` -- a prime number > 0
`m` -- an integer
OUTPUT:
a rational number
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1,1]) ## NOTE: This is already in local normal form for *all* primes p!
sage: Q.local_density(p=2, m=1)
1
sage: Q.local_density(p=3, m=1)
8/9
sage: Q.local_density(p=5, m=1)
24/25
sage: Q.local_density(p=7, m=1)
48/49
sage: Q.local_density(p=11, m=1)
120/121
"""
n = self.dim()
if (n == 0):
raise TypeError, "Oops! We currently don't handle 0-dim'l forms. =("
## Find the local normal form and p-scale of Q -- Note: This uses the valuation ordering of local_normal_form.
## TO DO: Write a separate p-scale and p-norm routines!
Q_local = self.local_normal_form(p)
if n == 1:
p_valuation = valuation(Q_local[0,0], p)
else:
p_valuation = min(valuation(Q_local[0,0], p), valuation(Q_local[0,1], p))
## If m is less p-divisible than the matrix, return zero
if ((m != 0) and (valuation(m,p) < p_valuation)): ## Note: The (m != 0) condition protects taking the valuation of zero.
return QQ(0)
## If the form is imprimitive, rescale it and call the local density routine
p_adjustment = QQ(1) / p**p_valuation
m_prim = QQ(m) / p**p_valuation
Q_prim = Q_local.scale_by_factor(p_adjustment)
## Return the densities for the reduced problem
return Q_prim.local_density_congruence(p, m_prim)
def local_primitive_density(self, p, m):
"""
Gives the local primitive density -- should be called by the user. =)
NOTE: This screens for imprimitive forms, and puts the
quadratic form in local normal form, which is a *requirement* of
the routines performing the computations!
INPUT:
`p` -- a prime number > 0
`m` -- an integer
OUTPUT:
a rational number
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 4, range(10))
sage: Q[0,0] = 5
sage: Q[1,1] = 10
sage: Q[2,2] = 15
sage: Q[3,3] = 20
sage: Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 5 1 2 3 ]
[ * 10 5 6 ]
[ * * 15 8 ]
[ * * * 20 ]
sage: Q.theta_series(20)
1 + 2*q^5 + 2*q^10 + 2*q^14 + 2*q^15 + 2*q^16 + 2*q^18 + O(q^20)
sage: Q.local_normal_form(2)
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 0 1 0 0 ]
[ * 0 0 0 ]
[ * * 0 1 ]
[ * * * 0 ]
sage: Q.local_primitive_density(2, 1)
3/4
sage: Q.local_primitive_density(5, 1)
24/25
sage: Q.local_primitive_density(2, 5)
3/4
sage: Q.local_density(2, 5)
3/4
"""
n = self.dim()
if (n == 0):
raise TypeError, "Oops! We currently don't handle 0-dim'l forms. =("
## Find the local normal form and p-scale of Q -- Note: This uses the valuation ordering of local_normal_form.
## TO DO: Write a separate p-scale and p-norm routines!
Q_local = self.local_normal_form(p)
if n == 1:
p_valuation = valuation(Q_local[0,0], p)
else:
p_valuation = min(valuation(Q_local[0,0], p), valuation(Q_local[0,1], p))
## If m is less p-divisible than the matrix, return zero
if ((m != 0) and (valuation(m,p) < p_valuation)): ## Note: The (m != 0) condition protects taking the valuation of zero.
return QQ(0)
## If the form is imprimitive, rescale it and call the local density routine
p_adjustment = QQ(1) / p**p_valuation
m_prim = QQ(m) / p**p_valuation
Q_prim = Q_local.scale_by_factor(p_adjustment)
## Return the densities for the reduced problem
return Q_prim.local_primitive_density_congruence(p, m_prim)
| 30.29932
| 126
| 0.585092
|
71e214cbb7ea76158e7a4602445ff09fc18fc5e6
| 953
|
py
|
Python
|
framework/framework/branch/standard_branch.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
framework/framework/branch/standard_branch.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
framework/framework/branch/standard_branch.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
from vkbottle.framework.framework.rule import AbstractMessageRule
from enum import Enum
class Branch:
branch_name: str
branch_kwargs: dict
def __init__(self, branch_name: str, **pass_to_branch):
self.branch_name = branch_name
self.branch_kwargs = pass_to_branch
class ExitBranch:
pass
class BranchCheckupKey(Enum):
PEER_ID = "peer_id"
FROM_ID = "from_id"
class ImmutableBranchData:
def __init__(self, name: str, **kwargs):
self.name: str = name
self.data = kwargs
def __call__(self) -> dict:
return {"name": self.name, **self.data}
def __repr__(self):
return f"<Branch ImmutableBranchData name={self.name} data={self.data}>"
def rule_disposal(*rules: AbstractMessageRule):
disposal = []
def wrapper(func):
for rule in rules:
rule.create(func)
disposal.append(rule)
return func, disposal
return wrapper
| 21.177778
| 80
| 0.657922
|
848ca830045512f85e1693ff67b0d2f5a13d8bb2
| 32,615
|
py
|
Python
|
test/unit/obj/test_ssync_sender.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | 2
|
2016-01-26T14:31:04.000Z
|
2016-01-26T14:31:08.000Z
|
test/unit/obj/test_ssync_sender.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | null | null | null |
test/unit/obj/test_ssync_sender.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import shutil
import StringIO
import tempfile
import time
import unittest
import eventlet
import mock
from swift.common import exceptions, utils
from swift.obj import ssync_sender, diskfile
from test.unit import DebugLogger
class FakeReplicator(object):
def __init__(self, testdir):
self.logger = mock.MagicMock()
self.conn_timeout = 1
self.node_timeout = 2
self.http_timeout = 3
self.network_chunk_size = 65536
self.disk_chunk_size = 4096
conf = {
'devices': testdir,
'mount_check': 'false',
}
self._diskfile_mgr = diskfile.DiskFileManager(conf, DebugLogger())
class NullBufferedHTTPConnection(object):
def __init__(*args, **kwargs):
pass
def putrequest(*args, **kwargs):
pass
def putheader(*args, **kwargs):
pass
def endheaders(*args, **kwargs):
pass
def getresponse(*args, **kwargs):
pass
class FakeResponse(object):
def __init__(self, chunk_body=''):
self.status = 200
self.close_called = False
if chunk_body:
self.fp = StringIO.StringIO(
'%x\r\n%s\r\n0\r\n\r\n' % (len(chunk_body), chunk_body))
def close(self):
self.close_called = True
class FakeConnection(object):
def __init__(self):
self.sent = []
self.closed = False
def send(self, data):
self.sent.append(data)
def close(self):
self.closed = True
class TestSender(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
self.replicator = FakeReplicator(self.testdir)
self.sender = ssync_sender.Sender(self.replicator, None, None, None)
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=1)
def _make_open_diskfile(self, device='dev', partition='9',
account='a', container='c', obj='o', body='test',
extra_metadata=None):
object_parts = account, container, obj
req_timestamp = utils.normalize_timestamp(time.time())
df = self.sender.daemon._diskfile_mgr.get_diskfile(device, partition,
*object_parts)
content_length = len(body)
etag = hashlib.md5(body).hexdigest()
with df.create() as writer:
writer.write(body)
metadata = {
'X-Timestamp': req_timestamp,
'Content-Length': content_length,
'ETag': etag,
}
if extra_metadata:
metadata.update(extra_metadata)
writer.put(metadata)
df.open()
return df
def test_call_catches_MessageTimeout(self):
def connect(self):
exc = exceptions.MessageTimeout(1, 'test connect')
# Cancels Eventlet's raising of this since we're about to do it.
exc.cancel()
raise exc
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
self.assertFalse(self.sender())
call = self.replicator.logger.error.mock_calls[0]
self.assertEqual(
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
self.assertEqual(str(call[1][-1]), '1 second: test connect')
def test_call_catches_ReplicationException(self):
def connect(self):
raise exceptions.ReplicationException('test connect')
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
self.assertFalse(self.sender())
call = self.replicator.logger.error.mock_calls[0]
self.assertEqual(
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
self.assertEqual(str(call[1][-1]), 'test connect')
def test_call_catches_other_exceptions(self):
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
self.assertFalse(self.sender())
call = self.replicator.logger.exception.mock_calls[0]
self.assertEqual(
call[1],
('%s:%s/%s/%s EXCEPTION in replication.Sender', '1.2.3.4', 5678,
'sda1', '9'))
def test_call_catches_exception_handling_exception(self):
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = None # Will cause inside exception handler to fail
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
self.sender.connect = 'cause exception'
self.assertFalse(self.sender())
self.replicator.logger.exception.assert_called_once_with(
'EXCEPTION in replication.Sender')
def test_call_calls_others(self):
self.sender.suffixes = ['abc']
self.sender.connect = mock.MagicMock()
self.sender.missing_check = mock.MagicMock()
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
self.assertTrue(self.sender())
self.sender.connect.assert_called_once_with()
self.sender.missing_check.assert_called_once_with()
self.sender.updates.assert_called_once_with()
self.sender.disconnect.assert_called_once_with()
def test_call_calls_others_returns_failure(self):
self.sender.suffixes = ['abc']
self.sender.connect = mock.MagicMock()
self.sender.missing_check = mock.MagicMock()
self.sender.updates = mock.MagicMock()
self.sender.disconnect = mock.MagicMock()
self.sender.failures = 1
self.assertFalse(self.sender())
self.sender.connect.assert_called_once_with()
self.sender.missing_check.assert_called_once_with()
self.sender.updates.assert_called_once_with()
self.sender.disconnect.assert_called_once_with()
def test_connect_send_timeout(self):
self.replicator.conn_timeout = 0.01
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
def putrequest(*args, **kwargs):
eventlet.sleep(0.1)
with mock.patch.object(
ssync_sender.bufferedhttp.BufferedHTTPConnection,
'putrequest', putrequest):
self.assertFalse(self.sender())
call = self.replicator.logger.error.mock_calls[0]
self.assertEqual(
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
self.assertEqual(str(call[1][-1]), '0.01 seconds: connect send')
def test_connect_receive_timeout(self):
self.replicator.node_timeout = 0.02
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
def getresponse(*args, **kwargs):
eventlet.sleep(0.1)
with mock.patch.object(
ssync_sender.bufferedhttp, 'BufferedHTTPConnection',
FakeBufferedHTTPConnection):
self.assertFalse(self.sender())
call = self.replicator.logger.error.mock_calls[0]
self.assertEqual(
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
self.assertEqual(str(call[1][-1]), '0.02 seconds: connect receive')
def test_connect_bad_status(self):
self.replicator.node_timeout = 0.02
node = dict(ip='1.2.3.4', port=5678, device='sda1')
job = dict(partition='9')
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
def getresponse(*args, **kwargs):
response = FakeResponse()
response.status = 503
return response
with mock.patch.object(
ssync_sender.bufferedhttp, 'BufferedHTTPConnection',
FakeBufferedHTTPConnection):
self.assertFalse(self.sender())
call = self.replicator.logger.error.mock_calls[0]
self.assertEqual(
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
self.assertEqual(str(call[1][-1]), 'Expected status 200; got 503')
def test_readline_newline_in_buffer(self):
self.sender.response_buffer = 'Has a newline already.\r\nOkay.'
self.assertEqual(self.sender.readline(), 'Has a newline already.\r\n')
self.assertEqual(self.sender.response_buffer, 'Okay.')
def test_readline_buffer_exceeds_network_chunk_size_somehow(self):
self.replicator.network_chunk_size = 2
self.sender.response_buffer = '1234567890'
self.assertEqual(self.sender.readline(), '1234567890')
self.assertEqual(self.sender.response_buffer, '')
def test_readline_at_start_of_chunk(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n')
self.assertEqual(self.sender.readline(), 'x\n')
def test_readline_chunk_with_extension(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO(
'2 ; chunk=extension\r\nx\n\r\n')
self.assertEqual(self.sender.readline(), 'x\n')
def test_readline_broken_chunk(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO('q\r\nx\n\r\n')
self.assertRaises(
exceptions.ReplicationException, self.sender.readline)
self.assertTrue(self.sender.response.close_called)
def test_readline_terminated_chunk(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO('b\r\nnot enough')
self.assertRaises(
exceptions.ReplicationException, self.sender.readline)
self.assertTrue(self.sender.response.close_called)
def test_readline_all(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n0\r\n\r\n')
self.assertEqual(self.sender.readline(), 'x\n')
self.assertEqual(self.sender.readline(), '')
self.assertEqual(self.sender.readline(), '')
def test_readline_all_trailing_not_newline_termed(self):
self.sender.response = FakeResponse()
self.sender.response.fp = StringIO.StringIO(
'2\r\nx\n\r\n3\r\n123\r\n0\r\n\r\n')
self.assertEqual(self.sender.readline(), 'x\n')
self.assertEqual(self.sender.readline(), '123')
self.assertEqual(self.sender.readline(), '')
self.assertEqual(self.sender.readline(), '')
def test_missing_check_timeout(self):
self.sender.connection = FakeConnection()
self.sender.connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.missing_check)
def test_missing_check_has_empty_suffixes(self):
def yield_hashes(device, partition, suffixes=None):
if device != 'dev' or partition != '9' or suffixes != [
'abc', 'def']:
yield # Just here to make this a generator
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc', 'def']
self.sender.response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.missing_check()
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, [])
def test_missing_check_has_suffixes(self):
def yield_hashes(device, partition, suffixes=None):
if device == 'dev' and partition == '9' and suffixes == [
'abc', 'def']:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
'9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000')
yield (
'/srv/node/dev/objects/9/def/'
'9d41d8cd98f00b204e9800998ecf0def',
'9d41d8cd98f00b204e9800998ecf0def',
'1380144472.22222')
yield (
'/srv/node/dev/objects/9/def/'
'9d41d8cd98f00b204e9800998ecf1def',
'9d41d8cd98f00b204e9800998ecf1def',
'1380144474.44444')
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc', 'def']
self.sender.response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.missing_check()
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0def 1380144472.22222\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf1def 1380144474.44444\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, [])
def test_missing_check_far_end_disconnect(self):
def yield_hashes(device, partition, suffixes=None):
if device == 'dev' and partition == '9' and suffixes == ['abc']:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
'9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000')
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(chunk_body='\r\n')
exc = None
try:
self.sender.missing_check()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
def test_missing_check_far_end_disconnect2(self):
def yield_hashes(device, partition, suffixes=None):
if device == 'dev' and partition == '9' and suffixes == ['abc']:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
'9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000')
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(
chunk_body=':MISSING_CHECK: START\r\n')
exc = None
try:
self.sender.missing_check()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
def test_missing_check_far_end_unexpected(self):
def yield_hashes(device, partition, suffixes=None):
if device == 'dev' and partition == '9' and suffixes == ['abc']:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
'9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000')
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc']
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.response = FakeResponse(chunk_body='OH HAI\r\n')
exc = None
try:
self.sender.missing_check()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'OH HAI'")
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
def test_missing_check_send_list(self):
def yield_hashes(device, partition, suffixes=None):
if device == 'dev' and partition == '9' and suffixes == ['abc']:
yield (
'/srv/node/dev/objects/9/abc/'
'9d41d8cd98f00b204e9800998ecf0abc',
'9d41d8cd98f00b204e9800998ecf0abc',
'1380144470.00000')
else:
raise Exception(
'No match for %r %r %r' % (device, partition, suffixes))
self.sender.connection = FakeConnection()
self.sender.job = {'device': 'dev', 'partition': '9'}
self.sender.suffixes = ['abc']
self.sender.response = FakeResponse(
chunk_body=(
':MISSING_CHECK: START\r\n'
'0123abc\r\n'
':MISSING_CHECK: END\r\n'))
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
self.sender.missing_check()
self.assertEqual(
''.join(self.sender.connection.sent),
'17\r\n:MISSING_CHECK: START\r\n\r\n'
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
'15\r\n:MISSING_CHECK: END\r\n\r\n')
self.assertEqual(self.sender.send_list, ['0123abc'])
def test_updates_timeout(self):
self.sender.connection = FakeConnection()
self.sender.connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
def test_updates_empty_send_list(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates()
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_unexpected_response_lines1(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
'abc\r\n'
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'abc'")
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_unexpected_response_lines2(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'abc\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'abc'")
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_is_deleted(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts)
object_hash = utils.hash_path(*object_parts)
delete_timestamp = utils.normalize_timestamp(time.time())
df.delete(delete_timestamp)
self.sender.connection = FakeConnection()
self.sender.job = {'device': device, 'partition': part}
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates()
self.sender.send_delete.assert_called_once_with(
'/a/c/o', delete_timestamp)
self.assertEqual(self.sender.send_put.mock_calls, [])
# note that the delete line isn't actually sent since we mock
# send_delete; send_delete is tested separately.
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_put(self):
device = 'dev'
part = '9'
object_parts = ('a', 'c', 'o')
df = self._make_open_diskfile(device, part, *object_parts)
object_hash = utils.hash_path(*object_parts)
expected = df.get_metadata()
self.sender.connection = FakeConnection()
self.sender.job = {'device': device, 'partition': part}
self.sender.node = {}
self.sender.send_list = [object_hash]
self.sender.send_delete = mock.MagicMock()
self.sender.send_put = mock.MagicMock()
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
self.sender.updates()
self.assertEqual(self.sender.send_delete.mock_calls, [])
self.assertEqual(1, len(self.sender.send_put.mock_calls))
args, _kwargs = self.sender.send_put.call_args
path, df = args
self.assertEqual(path, '/a/c/o')
self.assert_(isinstance(df, diskfile.DiskFile))
self.assertEqual(expected, df.get_metadata())
# note that the put line isn't actually sent since we mock send_put;
# send_put is tested separately.
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_timeout_start(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
orig_readline = self.sender.readline
def delayed_readline():
eventlet.sleep(1)
return orig_readline()
self.sender.readline = delayed_readline
self.sender.daemon.http_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
def test_updates_read_response_disconnect_start(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(chunk_body='\r\n')
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_unexp_start(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
'anything else\r\n'
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_timeout_end(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
':UPDATES: END\r\n'))
orig_readline = self.sender.readline
def delayed_readline():
rv = orig_readline()
if rv == ':UPDATES: END\r\n':
eventlet.sleep(1)
return rv
self.sender.readline = delayed_readline
self.sender.daemon.http_timeout = 0.01
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
def test_updates_read_response_disconnect_end(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'\r\n'))
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), 'Early disconnect')
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_updates_read_response_unexp_end(self):
self.sender.connection = FakeConnection()
self.sender.send_list = []
self.sender.response = FakeResponse(
chunk_body=(
':UPDATES: START\r\n'
'anything else\r\n'
':UPDATES: END\r\n'))
exc = None
try:
self.sender.updates()
except exceptions.ReplicationException as err:
exc = err
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
self.assertEqual(
''.join(self.sender.connection.sent),
'11\r\n:UPDATES: START\r\n\r\n'
'f\r\n:UPDATES: END\r\n\r\n')
def test_send_delete_timeout(self):
self.sender.connection = FakeConnection()
self.sender.connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
exc = None
try:
self.sender.send_delete('/a/c/o', '1381679759.90941')
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_delete')
def test_send_delete(self):
self.sender.connection = FakeConnection()
self.sender.send_delete('/a/c/o', '1381679759.90941')
self.assertEqual(
''.join(self.sender.connection.sent),
'30\r\n'
'DELETE /a/c/o\r\n'
'X-Timestamp: 1381679759.90941\r\n'
'\r\n\r\n')
def test_send_put_initial_timeout(self):
df = self._make_open_diskfile()
df._disk_chunk_size = 2
self.sender.connection = FakeConnection()
self.sender.connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
exc = None
try:
self.sender.send_put('/a/c/o', df)
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_put')
def test_send_put_chunk_timeout(self):
df = self._make_open_diskfile()
self.sender.connection = FakeConnection()
self.sender.daemon.node_timeout = 0.01
one_shot = [None]
def mock_send(data):
try:
one_shot.pop()
except IndexError:
eventlet.sleep(1)
self.sender.connection.send = mock_send
exc = None
try:
self.sender.send_put('/a/c/o', df)
except exceptions.MessageTimeout as err:
exc = err
self.assertEqual(str(exc), '0.01 seconds: send_put chunk')
def test_send_put(self):
body = 'test'
extra_metadata = {'Some-Other-Header': 'value'}
df = self._make_open_diskfile(body=body,
extra_metadata=extra_metadata)
expected = dict(df.get_metadata())
expected['body'] = body
expected['chunk_size'] = len(body)
self.sender.connection = FakeConnection()
self.sender.send_put('/a/c/o', df)
self.assertEqual(
''.join(self.sender.connection.sent),
'82\r\n'
'PUT /a/c/o\r\n'
'Content-Length: %(Content-Length)s\r\n'
'ETag: %(ETag)s\r\n'
'Some-Other-Header: value\r\n'
'X-Timestamp: %(X-Timestamp)s\r\n'
'\r\n'
'\r\n'
'%(chunk_size)s\r\n'
'%(body)s\r\n' % expected)
def test_disconnect_timeout(self):
self.sender.connection = FakeConnection()
self.sender.connection.send = lambda d: eventlet.sleep(1)
self.sender.daemon.node_timeout = 0.01
self.sender.disconnect()
self.assertEqual(''.join(self.sender.connection.sent), '')
self.assertTrue(self.sender.connection.closed)
def test_disconnect(self):
self.sender.connection = FakeConnection()
self.sender.disconnect()
self.assertEqual(''.join(self.sender.connection.sent), '0\r\n\r\n')
self.assertTrue(self.sender.connection.closed)
if __name__ == '__main__':
unittest.main()
| 39.247894
| 79
| 0.591384
|
502cf0af7c80f1a285d57c93338c3a09afdf4eb5
| 11,270
|
py
|
Python
|
framework/CodeInterfaces/PHISICS/FissionYieldParser.py
|
sonatsen/raven
|
30764491e7ecaa16de2a4e0ddab3bc9e169e5f95
|
[
"Apache-2.0"
] | 2
|
2019-10-11T15:59:10.000Z
|
2021-04-08T18:23:57.000Z
|
framework/CodeInterfaces/PHISICS/FissionYieldParser.py
|
sonatsen/raven
|
30764491e7ecaa16de2a4e0ddab3bc9e169e5f95
|
[
"Apache-2.0"
] | 1
|
2018-03-27T13:06:00.000Z
|
2018-03-27T13:06:00.000Z
|
framework/CodeInterfaces/PHISICS/FissionYieldParser.py
|
sonatsen/raven
|
30764491e7ecaa16de2a4e0ddab3bc9e169e5f95
|
[
"Apache-2.0"
] | 1
|
2017-08-29T16:09:13.000Z
|
2017-08-29T16:09:13.000Z
|
"""
Created on June 25th, 2017
@author: rouxpn
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default', DeprecationWarning)
import os
import re
from decimal import Decimal
class FissionYieldParser():
"""
Parses the PHISICS xml fission yield file and replaces the nominal values by the perturbed values.
"""
def __init__(self, inputFiles, workingDir, **pertDict):
"""
Constructor.
@ In, inputFiles, string, xml fission yield file.
@ In, workingDir, string, absolute path to the working directory
@ In, pertDict, dictionary, dictionary of perturbed variables
@ Out, None
"""
self.allYieldList = [] # all the fis. yield families in fast and thermal spectrum
self.inputFiles = inputFiles
self.spectrum = ['Thermal', 'Fast'] # Possible spectrum found in the library.
self.typeOfSpectrum = None # Flag. Takes the value of one of the possible spectrum, depending what line of the file is parsed
self.isotopeList = [] # Fission products having a fission yield defined
self.spectrumNumbering = {
} # Keys: type of spectrum (fast or thermal), values: numbering dictionary
self.listedYieldDict = {} # Nested dictionary of perturbed variables
self.pertYieldDict = self.scientificNotation(
pertDict) # Perturbed variables
# open the unperturbed file
openInputFile = open(self.inputFiles, "r")
lines = openInputFile.readlines()
openInputFile.close()
self.characterizeLibrary(lines)
self.isotopeList = list(set(
self.isotopeList)) # Removes all the repetion in the isotope list
self.numberOfIsotopes = len(self.isotopeList)
self.fileReconstruction() # Puts the perturbed variables in a dictionary
self.printInput(
workingDir, lines
) # Replaces the the nominal values by the perturbed one and print in a file
def scientificNotation(self, pertDict):
"""
Converts the numerical values into a scientific notation.
@ In, pertDict, dictionary, perturbed variables
@ Out, pertDict, dictionary, perturbed variables in scientific format
"""
for key, value in pertDict.items():
pertDict[key] = '%.3E' % Decimal(str(value))
return pertDict
def characterizeLibrary(self,lines):
"""
Characterizes the structure of the library. Teaches the type of decay available for the fast spectrum family and thermal family.
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
concatenateYieldList = []
for line in lines:
if re.match(r'(.*?)Thermal Fission Yield', line):
self.typeOfSpectrum = self.spectrum[0]
elif re.match(r'(.*?)Fast Fission Yield', line):
self.typeOfSpectrum = self.spectrum[1]
if (re.match(r'(.*?)\w+(-?)\d+\s+\w+\s+\w(-?)\d+\s+\w', line)
and any(s in "FY" for s in line)): # create dynamic column detector
count = 0
FYgroup = [
] # reset the counter and the dictionary numbering if new colum sequence is detected
numbering = {}
line = re.sub(r'FY', r'', line)
splitStringYieldType = line.upper().split()
for i in splitStringYieldType:
FYgroup.append(
i.replace('-', '')
) # get the fission yield group's names (U235, Pu239 etc.) and remove the dash in those IDs
concatenateYieldList = concatenateYieldList + FYgroup # concatenate all the possible yield type (including repetition among actinides and FP)
self.allYieldList = list(set(concatenateYieldList))
for i in range(
len(splitStringYieldType
)): # assign the column position of the given yield types
count = count + 1
numbering[FYgroup[
i]] = count # assign the column position of the given Yield types
splitStringYieldType[i] = re.sub(r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4',
splitStringYieldType[i])
if self.typeOfSpectrum == self.spectrum[0]:
self.spectrumNumbering[self.spectrum[0]] = numbering
if self.typeOfSpectrum == self.spectrum[1]:
self.spectrumNumbering[self.spectrum[1]] = numbering
numbering[splitStringYieldType[i]] = count
if re.match(r'(.*?)\s+\D+(-?)\d+(M?)\s+\d+.\d', line) or re.match(
r'(.*?)ALPHA\s+\d+.\d', line):
isotopeLines = line.split()
self.isotopeList.append(isotopeLines[0])
def matrixPrinter(self, line, outfile, spectra):
"""
Prints the perturbed decay matrix in the outfile.
@ In, lines, list, unperturbed input file lines
@ In, outfile, file object, output file in file object format
@ In, spectra, integer, indicates if the yields are related to a thermal spectrum (0) or a fast spectrum (1)
@ Out, None
"""
isotopeCounter = 0
if re.search(r'END\s+', line):
return
line = line.strip()
line = line.upper().split()
line[0] = re.sub(r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4',
line[0]) # remove the dashes in isotope names
spectraUpper = spectra.upper()
try:
for fissionProductID in self.listedYieldDict[spectraUpper].keys():
for actinideID in self.listedYieldDict[spectraUpper][
fissionProductID].keys():
if line[0] == fissionProductID:
typeOfYieldPerturbed = []
self.spectrumUpperCase = [x.upper() for x in self.spectrum]
typeOfYieldPerturbed = self.listedYieldDict.get(
spectraUpper).get(fissionProductID).keys()
for i in range(len(typeOfYieldPerturbed)):
try:
if self.listedYieldDict.get(spectraUpper).get(
fissionProductID).get(typeOfYieldPerturbed[i]) != {}:
line[self.spectrumNumbering.get(spectra).get(
typeOfYieldPerturbed[i])] = str(
self.listedYieldDict.get(spectraUpper).get(
fissionProductID).get(typeOfYieldPerturbed[i]))
print (line[self.spectrumNumbering.get(spectra).get(
typeOfYieldPerturbed[i])])
except TypeError:
raise Exception(
'Make sure the fission yields you are perturbing have existing values in the unperturbed fission yield library'
)
except KeyError:
pass # pass you pertub 'FAST': {u'ZN67': {u'U235': '5.659E+00'}} only, the case 'THERMAL': {u'ZN67': {u'U235': '5.659E+00'}} ignored in the line for fissionProductID in self.listedYieldDict[spectraUpper].keys() (because non existent)
try:
isotopeCounter = isotopeCounter + 1
line[0] = "{0:<7s}".format(line[0])
i = 1
while i <= len(
self.spectrumNumbering.get(spectra)
): # while i is smaller than the number of columns that represents the number of fission yield families
try:
line[i] = "{0:<11s}".format(line[i])
i = i + 1
except IndexError:
i = i + 1
outfile.writelines(' ' + ''.join(
line[0:len(self.spectrumNumbering.get(spectra)) + 1]) + "\n")
if isotopeCounter == self.numberOfIsotopes:
for lineInput in lines:
lineStripped = lineInput.strip()
except KeyError:
raise Exception(
'Make sure the fission yields you are perturbing have existing values in the unperturbed fission yield library'
)
def hardcopyPrinter(self, spectra, lines):
"""
Prints the hardcopied information at the begining of the xml file.
@ In, spectra, integer, indicates if the yields are related to a thermal spectrum (0) or a fast spectrum (1)
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
flag = 0
matrixFlag = 0
with open(self.inputFiles, 'a+') as outfile:
for line in lines:
if not line.split():
continue
if re.search(r'' + self.spectrum[1] + ' Fission Yield ' , line.strip()) and spectra == self.spectrum[1]: # find the line- END Fast Fission Yield (2)
flag = 2
if flag == 2:
if re.match(r'(.*?)\w+(-?)\d+\s+\w+\s+\w(-?)\d+\s+\w',line.strip()) and spectra == self.spectrum[1] and matrixFlag == 0:
outfile.writelines(line)
matrixFlag = 4
elif matrixFlag == 4:
self.matrixPrinter(line, outfile, spectra)
else:
outfile.writelines(line)
if (re.match(r'(.*?)' + self.spectrum[0], line.strip()) and spectra == self.spectrum[0]): # find the line- Thermal Fission Yield (1)
flag = 1
if flag == 1:
if re.search(r'Fast Fission Yield ', line) : # find the line- END Fast Fission Yield (2)
outfile.writelines('END ')
flag = 2
break
if re.match(r'(.*?)\w+(-?)\d+\s+\w+\s+\w(-?)\d+\s+\w', line.strip()) and spectra == self.spectrum[0] and matrixFlag == 0: # find the line U-235 FY U-238 FY (last hardcopied line)
outfile.writelines(line)
matrixFlag = 3
elif matrixFlag == 3:
self.matrixPrinter(line, outfile, spectra)
else:
outfile.writelines(line)
outfile.close()
def fileReconstruction(self):
"""
Converts the formatted dictionary pertdict -> {'FY|THERMAL|U235|XE135':1.30}.
into a dictionary of dictionaries that has the format -> {'FY':{'THERMAL':{'U235':{'XE135':1.30}}}}
@ In, None
@ Out, None
"""
fissioningActinide = []
resultingFP = []
spectrumType = []
for key in self.pertYieldDict.keys():
splittedYieldKeywords = key.split('|')
spectrumType.append(splittedYieldKeywords[1])
fissioningActinide.append(splittedYieldKeywords[2])
resultingFP.append(splittedYieldKeywords[3])
for i in range(len(spectrumType)):
self.listedYieldDict[spectrumType[i]] = {}
for j in range(len(resultingFP)):
self.listedYieldDict[spectrumType[i]][resultingFP[j]] = {
} # declare all the dictionaries
for k in range(len(fissioningActinide)):
self.listedYieldDict[spectrumType[i]][resultingFP[j]][
fissioningActinide[k]] = {}
for yieldTypeKey, yieldValue in self.pertYieldDict.items():
self.listedYieldDict[yieldTypeKey.split('|')[1]][yieldTypeKey.split('|')[
3]][yieldTypeKey.split('|')[2]] = yieldValue
def printInput(self, workingDir, lines):
"""
Prints out the pertubed fission yield library into a .dat file. The workflow is:
open a new file with a dummy name; parse the unperturbed library; print the line in the dummy and
replace with perturbed variables if necessary, Change the name of the dummy file.
@ In, workingDir, string, path to working directory
@ In, lines, list, unperturbed input file lines
@ Out, None
"""
if os.path.exists(self.inputFiles):
os.remove(self.inputFiles) # remove the file if was already existing
for spectra in self.spectrum:
self.hardcopyPrinter(spectra, lines)
with open(self.inputFiles, 'a') as outfile:
outfile.writelines(' end')
| 44.545455
| 240
| 0.622981
|
5dad84f28380998f22f560c059b6ad7b404bf737
| 978
|
py
|
Python
|
devgram/users/models.py
|
smc0210/devgram
|
de7dd2b00f546e3c0ef69a14388870d8f1edfee8
|
[
"MIT"
] | null | null | null |
devgram/users/models.py
|
smc0210/devgram
|
de7dd2b00f546e3c0ef69a14388870d8f1edfee8
|
[
"MIT"
] | 16
|
2020-02-12T00:51:19.000Z
|
2022-03-11T23:20:09.000Z
|
devgram/users/models.py
|
smc0210/devgram
|
de7dd2b00f546e3c0ef69a14388870d8f1edfee8
|
[
"MIT"
] | 1
|
2018-04-18T04:32:53.000Z
|
2018-04-18T04:32:53.000Z
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
""" User Model """
GENDER_CHOICES = (
('mail', 'Male'),
('female', 'Female'),
('not-specified', 'Not specified')
)
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
website = models.URLField(null=True)
bio = models.TextField(null=True)
phone = models.CharField(max_length=140, null=True)
gender = models.CharField(max_length=80, choices=GENDER_CHOICES, null=True)
followers = models.ManyToManyField("self")
following = models.ManyToManyField("self")
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| 30.5625
| 79
| 0.686094
|
e3927f66b99afcb8db3d8333e0b424c1b3b3e2e5
| 2,105
|
py
|
Python
|
connect.py
|
staticfloat/SidewalkWebpage
|
6138d24101a8d4288b0126c008314bfd8592f29f
|
[
"MIT"
] | 60
|
2016-02-12T14:33:05.000Z
|
2022-03-11T21:33:52.000Z
|
connect.py
|
staticfloat/SidewalkWebpage
|
6138d24101a8d4288b0126c008314bfd8592f29f
|
[
"MIT"
] | 2,292
|
2016-02-24T18:07:04.000Z
|
2022-03-28T22:51:08.000Z
|
connect.py
|
staticfloat/SidewalkWebpage
|
6138d24101a8d4288b0126c008314bfd8592f29f
|
[
"MIT"
] | 26
|
2016-04-08T20:55:09.000Z
|
2022-03-09T08:40:17.000Z
|
import os
from os.path import expanduser
import boto3
import psycopg2
import psycopg2.extras
from sqlalchemy import create_engine
def connect_to_mturk():
# Get key from an external file
secret_key = {}
with open("rootkey.csv") as myfile:
for line in myfile:
name, var = line.partition("=")[::2]
secret_key[name.strip()] = str(var.strip())
# Setup mTurk parameters
#host = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com/'
host = 'https://mturk-requester.us-east-1.amazonaws.com'
region_name = 'us-east-1'
aws_access_key_id = secret_key["AWSAccessKeyId"]
aws_secret_access_key = secret_key["AWSSecretKey"]
mturk = boto3.client('mturk',
endpoint_url=host,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
# Sample line of code to get account balance [$10,000.00]
print mturk.get_account_balance()['AvailableBalance']
return mturk
def connect_to_db():
home = expanduser("~")
file_path = home + "/.pgpass"
with open(file_path) as filename:
arr = []
for line in filename:
l = line.strip()
if l[0] != '#':
arr = l.split(":")
dbhost = arr[0]
dbport = arr[1]
dbname = arr[2]
dbuser = arr[3]
dbpass = arr[4]
# Format of the connection string: dialect+driver://username:password@host:port/database
connection_str = ('postgresql://' + dbuser + ':' + dbpass +
'@' + dbhost + ':' + dbport + '/' + dbname)
print connection_str
engine = create_engine(connection_str)
conn = psycopg2.connect("dbname=" + dbname +
" user=" + dbuser +
" host=" + dbhost +
" port=" + dbport +
" password=" + dbpass + "")
return conn, engine
| 31.893939
| 96
| 0.542043
|
fcf6b6d29e9e0b700c9287948ee666b3d44edc80
| 1,972
|
py
|
Python
|
BRUCE_SENSE/util/test_visual_dumper.py
|
Westwood-Robotics/BRUCE_SENSE
|
b44f5c9f6dc9c76fee94d0cdc9e7eb11159a2220
|
[
"Apache-2.0"
] | 2
|
2022-02-04T03:21:47.000Z
|
2022-02-07T19:00:04.000Z
|
BRUCE_SENSE/util/test_visual_dumper.py
|
Westwood-Robotics/BRUCE_SENSE
|
b44f5c9f6dc9c76fee94d0cdc9e7eb11159a2220
|
[
"Apache-2.0"
] | null | null | null |
BRUCE_SENSE/util/test_visual_dumper.py
|
Westwood-Robotics/BRUCE_SENSE
|
b44f5c9f6dc9c76fee94d0cdc9e7eb11159a2220
|
[
"Apache-2.0"
] | 1
|
2022-03-03T01:04:34.000Z
|
2022-03-03T01:04:34.000Z
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
from bruce_sense import Manager
s = Manager.SENSOR(port='COM7', baudrate=2000000)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ts = []
rs = []
ps = []
ys = []
rxs = []
rys = []
rzs = []
ddxs = []
ddys = []
ddzs = []
ddxs_nog = []
ddys_nog = []
ddzs_nog = []
t0 = time.time()
data = {}
def animate(i, ts, rs, ps, ys, rxs, rys, rzs, ddxs, ddys, ddzs, ddxs_nog, ddys_nog, ddzs_nog):
data = s.get_dump()[0]
ts.append(time.time() - t0)
ddxs_nog.append(data[6])
ddys_nog.append(data[7])
ddzs_nog.append(data[8])
ddxs.append(data[0])
ddys.append(data[1])
ddzs.append(data[2])
rxs.append(data[3])
rys.append(data[4])
rzs.append(data[5])
rs.append(data[9])
ps.append(data[10])
ys.append(data[11])
ax1.clear()
ax2.clear()
ax3.clear()
ax4.clear()
ax1.plot(ts[-100:], rs[-100:], label='roll (x)')
ax1.plot(ts[-100:], ps[-100:], label='pitch (y)')
#ax1.plot(ts[-100:], ys[-100:], label='yaw (z)')
ax1.legend(loc='upper left')
ax2.plot(ts[-100:], rxs[-100:], label='du (x)')
ax2.plot(ts[-100:], rys[-100:], label='dv (y)')
ax2.plot(ts[-100:], rzs[-100:], label='dw (z)')
ax2.legend(loc='upper left')
ax3.plot(ts[-100:], ddxs[-100:], label='ddx')
ax3.plot(ts[-100:], ddys[-100:], label='ddy')
ax3.plot(ts[-100:], ddzs[-100:], label='ddz')
ax3.legend(loc='upper left')
ax4.plot(ts[-100:], ddxs_nog[-100:], label='ddx_no_g')
ax4.plot(ts[-100:], ddys_nog[-100:], label='ddy_no_g')
ax4.plot(ts[-100:], ddzs_nog[-100:], label='ddz_no_g')
ax4.legend(loc='upper left')
ani = animation.FuncAnimation(fig, animate, fargs = (ts, rs, ps, ys, rxs, rys, rzs, ddxs, ddys, ddzs, ddxs_nog, ddys_nog,
ddzs_nog), interval = 1)
plt.show()
| 26.293333
| 121
| 0.583671
|
9a84ef271873d86499080f43e7975fbb2eafccc2
| 6,217
|
py
|
Python
|
tutorials/Tutorial8_Preprocessing.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 4,544
|
2019-11-14T11:57:49.000Z
|
2022-03-31T17:41:18.000Z
|
tutorials/Tutorial8_Preprocessing.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 1,679
|
2020-01-14T15:55:58.000Z
|
2022-03-31T20:55:25.000Z
|
tutorials/Tutorial8_Preprocessing.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 820
|
2019-11-27T13:01:42.000Z
|
2022-03-31T12:54:34.000Z
|
"""
Preprocessing
Haystack includes a suite of tools to extract text from different file types, normalize white space
and split text into smaller pieces to optimize retrieval.
These data preprocessing steps can have a big impact on the systems performance and effective handling of data is key to getting the most out of Haystack.
Ultimately, Haystack pipelines expect data to be provided as a list documents in the following dictionary format:
docs = [
{
'text': DOCUMENT_TEXT_HERE,
'meta': {'name': DOCUMENT_NAME, ...}
}, ...
]
This tutorial will show you all the tools that Haystack provides to help you cast your data into the right format.
"""
# Here are the imports we need
from haystack.file_converter.txt import TextConverter
from haystack.file_converter.pdf import PDFToTextConverter
from haystack.file_converter.docx import DocxToTextConverter
from haystack.preprocessor.utils import convert_files_to_dicts, fetch_archive_from_http
from haystack.preprocessor.preprocessor import PreProcessor
def tutorial8_preprocessing():
# This fetches some sample files to work with
doc_dir = "data/preprocessing_tutorial"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/preprocessing_tutorial.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
"""
## Converters
Haystack's converter classes are designed to help you turn files on your computer into the documents
that can be processed by the Haystack pipeline.
There are file converters for txt, pdf, docx files as well as a converter that is powered by Apache Tika.
The parameter `valid_langugages` does not convert files to the target language, but checks if the conversion worked as expected.
For converting PDFs, try changing the encoding to UTF-8 if the conversion isn't great.
"""
# Here are some examples of how you would use file converters
converter = TextConverter(remove_numeric_tables=True, valid_languages=["en"])
doc_txt = converter.convert(file_path="data/preprocessing_tutorial/classics.txt", meta=None)
converter = PDFToTextConverter(remove_numeric_tables=True, valid_languages=["en"])
doc_pdf = converter.convert(file_path="data/preprocessing_tutorial/bert.pdf", meta=None)
converter = DocxToTextConverter(remove_numeric_tables=False, valid_languages=["en"])
doc_docx = converter.convert(file_path="data/preprocessing_tutorial/heavy_metal.docx", meta=None)
# Haystack also has a convenience function that will automatically apply the right converter to each file in a directory.
all_docs = convert_files_to_dicts(dir_path="data/preprocessing_tutorial")
"""
## PreProcessor
The PreProcessor class is designed to help you clean text and split text into sensible units.
File splitting can have a very significant impact on the system's performance.
Have a look at the [Preprocessing](https://haystack.deepset.ai/docs/latest/preprocessingmd)
and [Optimization](https://haystack.deepset.ai/docs/latest/optimizationmd) pages on our website for more details.
"""
# This is a default usage of the PreProcessor.
# Here, it performs cleaning of consecutive whitespaces
# and splits a single large document into smaller documents.
# Each document is up to 1000 words long and document breaks cannot fall in the middle of sentences
# Note how the single document passed into the document gets split into 5 smaller documents
preprocessor = PreProcessor(
clean_empty_lines=True,
clean_whitespace=True,
clean_header_footer=False,
split_by="word",
split_length=1000,
split_respect_sentence_boundary=True
)
docs_default = preprocessor.process(doc_txt)
print(f"n_docs_input: 1\nn_docs_output: {len(docs_default)}")
"""
## Cleaning
- `clean_empty_lines` will normalize 3 or more consecutive empty lines to be just a two empty lines
- `clean_whitespace` will remove any whitespace at the beginning or end of each line in the text
- `clean_header_footer` will remove any long header or footer texts that are repeated on each page
## Splitting
By default, the PreProcessor will respect sentence boundaries, meaning that documents will not start or end
midway through a sentence.
This will help reduce the possibility of answer phrases being split between two documents.
This feature can be turned off by setting `split_respect_sentence_boundary=False`.
"""
# Not respecting sentence boundary vs respecting sentence boundary
preprocessor_nrsb = PreProcessor(split_respect_sentence_boundary=False)
docs_nrsb = preprocessor_nrsb.process(doc_txt)
print("RESPECTING SENTENCE BOUNDARY")
end_text = docs_default[0]["content"][-50:]
print("End of document: \"..." + end_text + "\"")
print()
print("NOT RESPECTING SENTENCE BOUNDARY")
end_text_nrsb = docs_nrsb[0]["content"][-50:]
print("End of document: \"..." + end_text_nrsb + "\"")
"""
A commonly used strategy to split long documents, especially in the field of Question Answering,
is the sliding window approach. If `split_length=10` and `split_overlap=3`, your documents will look like this:
- doc1 = words[0:10]
- doc2 = words[7:17]
- doc3 = words[14:24]
- ...
You can use this strategy by following the code below.
"""
# Sliding window approach
preprocessor_sliding_window = PreProcessor(
split_overlap=3,
split_length=10,
split_respect_sentence_boundary=False
)
docs_sliding_window = preprocessor_sliding_window.process(doc_txt)
doc1 = docs_sliding_window[0]["content"][:200]
doc2 = docs_sliding_window[1]["content"][:100]
doc3 = docs_sliding_window[2]["content"][:100]
print("Document 1: \"" + doc1 + "...\"")
print("Document 2: \"" + doc2 + "...\"")
print("Document 3: \"" + doc3 + "...\"")
if __name__ == "__main__":
tutorial8_preprocessing()
# This Haystack script was made with love by deepset in Berlin, Germany
# Haystack: https://github.com/deepset-ai/haystack
# deepset: https://deepset.ai/
| 40.633987
| 154
| 0.731703
|
d3286bf109363d2a4fa05797bcba3dd0fd23443d
| 1,445
|
py
|
Python
|
setup.py
|
wayfair-tremor/tremor-mkdocs-lexer
|
6fcaf67d4aac42eb5f65840095937fc6bfaa2808
|
[
"Apache-2.0"
] | 1
|
2021-02-06T23:16:42.000Z
|
2021-02-06T23:16:42.000Z
|
setup.py
|
wayfair-tremor/tremor-mkdocs-lexer
|
6fcaf67d4aac42eb5f65840095937fc6bfaa2808
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
wayfair-tremor/tremor-mkdocs-lexer
|
6fcaf67d4aac42eb5f65840095937fc6bfaa2808
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Setup tremor-mkdocs-lexer."""
from setuptools import setup, find_packages
entry_points = '''
[pygments.lexers]
tremor=tremor_mkdocs_lexer:TremorLexer
trickle=tremor_mkdocs_lexer:TrickleLexer
'''
setup(
name='tremor-mkdocs-lexer',
version='0.8.0',
description='Pygments lexer package for tremor-script v0.8+ and tremor-query v0.8+',
author='Darach Ennis',
author_email='dennis[at]wayfair.com',
url='https://github.com/tremor-rs/tremor-mkdocs-lexer',
packages=find_packages(),
entry_points=entry_points,
install_requires=[
'Pygments>=2.3.1'
],
zip_safe=True,
license='Apache License',
classifiers=[
'Development Status :: 8 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: Markup :: HTML'
]
)
| 33.604651
| 88
| 0.631834
|
1379ad8d81d3c415732257ed30ac2123aeb19444
| 257
|
py
|
Python
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_min_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_min_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/any_uri/schema_instance/nistschema_sv_iv_atomic_any_uri_min_length_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.any_uri.schema_instance.nistschema_sv_iv_atomic_any_uri_min_length_3_xsd.nistschema_sv_iv_atomic_any_uri_min_length_3 import NistschemaSvIvAtomicAnyUriMinLength3
__all__ = [
"NistschemaSvIvAtomicAnyUriMinLength3",
]
| 42.833333
| 197
| 0.894942
|
5040c221b5d40e6ad3df7af83c4bb29f3c334626
| 10,171
|
py
|
Python
|
models/mnist_lenet_fs.py
|
IsaacChanghau/DeepNaryECOC
|
08981309e21cf4d7519a36b034d35741c7fe0bf5
|
[
"MIT"
] | 3
|
2020-12-14T02:21:02.000Z
|
2021-03-17T14:23:04.000Z
|
models/mnist_lenet_fs.py
|
IsaacChanghau/DeepNaryECOC
|
08981309e21cf4d7519a36b034d35741c7fe0bf5
|
[
"MIT"
] | null | null | null |
models/mnist_lenet_fs.py
|
IsaacChanghau/DeepNaryECOC
|
08981309e21cf4d7519a36b034d35741c7fe0bf5
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import tensorflow as tf
from utils.logger import get_logger, Progbar
from utils.prepro_text import remap_labels, dense_to_one_hot
from utils.data_funcs import compute_ensemble_accuracy
def mnist_lenet(inputs, training=False, reuse=tf.AUTO_REUSE, name="lenet"): # used for mnist
with tf.variable_scope(name, reuse=reuse, dtype=tf.float32):
x = tf.reshape(inputs, shape=[-1, 28, 28, 1])
# first convolutional layer
conv1 = tf.layers.conv2d(x, filters=32, kernel_size=(3, 3), padding="same", activation=tf.nn.relu,
use_bias=True, kernel_initializer=tf.glorot_uniform_initializer(), name="conv1",
reuse=tf.AUTO_REUSE, bias_initializer=tf.constant_initializer(0.05))
pool1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), strides=(2, 2), padding="same", name="max_pool1")
drop1 = tf.layers.dropout(pool1, rate=0.5, training=training, name="dropout1")
# second convolutional layer
conv2 = tf.layers.conv2d(drop1, filters=64, kernel_size=(3, 3), padding="same", activation=tf.nn.relu,
use_bias=True, kernel_initializer=tf.glorot_uniform_initializer(), name="conv2",
reuse=tf.AUTO_REUSE, bias_initializer=tf.constant_initializer(0.05))
pool2 = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), strides=(2, 2), padding="same", name="max_pool2")
drop2 = tf.layers.dropout(pool2, rate=0.5, training=training, name="dropout2")
# third convolutional layer
conv3 = tf.layers.conv2d(drop2, filters=128, kernel_size=(3, 3), padding="same", activation=tf.nn.relu,
use_bias=True, kernel_initializer=tf.glorot_uniform_initializer(), name="conv3",
reuse=tf.AUTO_REUSE, bias_initializer=tf.constant_initializer(0.05))
pool3 = tf.layers.max_pooling2d(conv3, pool_size=(2, 2), strides=(2, 2), padding="same", name="max_pool3")
drop3 = tf.layers.dropout(pool3, rate=0.5, training=training, name="dropout3")
# flatten
features = tf.layers.flatten(drop3, name="flatten")
return features
def fc_layer(features, labels, label_size, training=False, reuse=tf.AUTO_REUSE, name="fc_layer"):
with tf.variable_scope(name, reuse=reuse, dtype=tf.float32):
# first dense layer
dense1 = tf.layers.dense(features, units=512, activation=tf.nn.relu, use_bias=True, reuse=tf.AUTO_REUSE,
name="dense1")
dense1_drop = tf.layers.dropout(dense1, rate=0.5, training=training, name="dense_dropout")
# second dense layer
logits = tf.layers.dense(dense1_drop, units=label_size, activation=None, use_bias=True, reuse=tf.AUTO_REUSE,
name="logits")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels))
pred_labels = tf.argmax(logits, 1)
return pred_labels, cost
class MnistLeNet:
def __init__(self, num_class, num_classifier, nary_ecoc, ckpt_path):
self.ckpt_path, self.num_class = ckpt_path, num_class
self.num_classifier = num_classifier
self.nary_ecoc = nary_ecoc
if not os.path.exists(self.ckpt_path):
os.makedirs(self.ckpt_path)
self.logger = get_logger(self.ckpt_path + "log.txt")
self.batch_size, self.epochs, self.lr, self.lr_decay = 200, 10, 0.001, 0.9
with tf.Graph().as_default():
self._build_model()
self.logger.info("total params: {}".format(np.sum([np.prod(v.get_shape().as_list()) for v in
tf.trainable_variables()])))
self._init_session()
def _init_session(self):
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self.saver = tf.train.Saver(max_to_keep=1)
self.sess.run(tf.global_variables_initializer())
def save_session(self, steps):
self.saver.save(self.sess, self.ckpt_path + "mnist_lenet", global_step=steps)
def restore_last_session(self):
ckpt = tf.train.get_checkpoint_state(self.ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
raise ValueError("No pre-trained model at {}".format(self.ckpt_path))
def close_session(self):
self.sess.close()
def _get_feed_dict(self, images, labels, training=False):
feed_dict = {self.inputs: images}
for i in range(self.num_classifier):
feed_dict[self.labels[i]] = labels[i]
feed_dict[self.training] = training
return feed_dict
def _build_model(self):
# add placeholders
self.inputs = tf.placeholder(tf.float32, shape=[None, 784], name="input_images")
self.labels = []
for i in range(self.num_classifier):
self.labels.append(tf.placeholder(tf.float32, shape=(None, self.num_class), name="labels_%d" % i))
self.training = tf.placeholder(tf.bool, shape=[], name="is_training")
# build model
x = tf.reshape(self.inputs, shape=[-1, 28, 28, 1])
features = mnist_lenet(x, self.training)
self.pred_labels = []
self.cost = []
for i in range(self.num_classifier):
pred_labels, loss = fc_layer(features, self.labels[i], self.num_class, self.training, name="fc_%d" % i)
pred_labels = tf.expand_dims(pred_labels, axis=-1)
self.pred_labels.append(pred_labels)
self.cost.append(loss)
self.pred_labels = tf.concat(self.pred_labels, axis=-1)
self.cost = tf.add_n(self.cost)
# build optimizer and training operation
optimizer = tf.train.RMSPropOptimizer(self.lr, decay=self.lr_decay)
self.train_op = optimizer.minimize(self.cost)
def train(self, train_dataset, test_dataset):
global_test_acc = 0.0
global_step = 0
test_imgs, test_labels = test_dataset.images, test_dataset.labels
self.logger.info("start training...")
for epoch in range(1, self.epochs + 1):
self.logger.info("Epoch {}/{}:".format(epoch, self.epochs))
num_batches = train_dataset.num_examples // self.batch_size
prog = Progbar(target=num_batches)
prog.update(0, [("Global Step", 0), ("Train Loss", 0.0), ("Train Acc", 0.0), ("Test Loss", 0.0),
("Test Acc", 0.0)])
for i in range(num_batches):
global_step += 1
train_imgs, train_labels = train_dataset.next_batch(self.batch_size)
b_labels = []
for j in range(self.num_classifier):
ecoc_array = self.nary_ecoc[:, j]
b_lbs = remap_labels(train_labels.copy(), ecoc_array)
b_lbs = dense_to_one_hot(b_lbs, self.num_class)
b_labels.append(b_lbs)
feed_dict = self._get_feed_dict(train_imgs, b_labels, True)
_, pred_labels, loss = self.sess.run([self.train_op, self.pred_labels, self.cost], feed_dict=feed_dict)
acc = compute_ensemble_accuracy(pred_labels, self.nary_ecoc, train_labels)
if global_step % 100 == 0:
y_labels = []
for j in range(self.num_classifier):
ecoc_array = self.nary_ecoc[:, j]
b_lbs = remap_labels(test_labels.copy(), ecoc_array)
b_lbs = dense_to_one_hot(b_lbs, self.num_class)
y_labels.append(b_lbs)
feed_dict = self._get_feed_dict(test_imgs, y_labels)
test_pred_labels, test_loss = self.sess.run([self.pred_labels, self.cost], feed_dict=feed_dict)
test_acc = compute_ensemble_accuracy(test_pred_labels, self.nary_ecoc, test_labels)
prog.update(i + 1, [("Global Step", int(global_step)), ("Train Loss", loss), ("Train Acc", acc),
("Test Loss", test_loss), ("Test Acc", test_acc)])
if test_acc > global_test_acc:
global_test_acc = test_acc
self.save_session(global_step)
else:
prog.update(i + 1, [("Global Step", int(global_step)), ("Train Loss", loss), ("Train Acc", acc)])
y_labels = []
for j in range(self.num_classifier):
ecoc_array = self.nary_ecoc[:, j]
b_lbs = remap_labels(test_labels.copy(), ecoc_array)
b_lbs = dense_to_one_hot(b_lbs, self.num_class)
y_labels.append(b_lbs)
feed_dict = self._get_feed_dict(test_imgs, y_labels)
test_pred_labels, test_loss = self.sess.run([self.pred_labels, self.cost], feed_dict=feed_dict)
test_acc = compute_ensemble_accuracy(test_pred_labels, self.nary_ecoc, test_labels)
self.logger.info("Epoch: {}, Global Step: {}, Test Loss: {}, Test Accuracy: {}".format(
epoch, global_step, test_loss, test_acc))
def test(self, test_dataset, print_info=True):
self.restore_last_session()
test_imgs, test_labels = test_dataset.images, test_dataset.labels
y_labels = []
for j in range(self.num_classifier):
ecoc_array = self.nary_ecoc[:, j]
b_lbs = remap_labels(test_labels.copy(), ecoc_array)
b_lbs = dense_to_one_hot(b_lbs, self.num_class)
y_labels.append(b_lbs)
feed_dict = self._get_feed_dict(test_imgs, y_labels)
pred_labels, test_loss = self.sess.run([self.pred_labels, self.cost], feed_dict=feed_dict)
test_acc = compute_ensemble_accuracy(pred_labels, self.nary_ecoc, test_dataset.labels)
if print_info:
self.logger.info(" -- Test Loss: {}, Test Accuracy: {}".format(test_loss, test_acc))
return pred_labels
| 55.884615
| 119
| 0.618818
|
6a2d74f7cb79c48ea083ae55b989865d95e197eb
| 55
|
py
|
Python
|
lotes/urls.py
|
20191-ads/glotes_serve
|
ed48a1b69f051e9f4f017aeef879a782e28be346
|
[
"MIT"
] | null | null | null |
lotes/urls.py
|
20191-ads/glotes_serve
|
ed48a1b69f051e9f4f017aeef879a782e28be346
|
[
"MIT"
] | null | null | null |
lotes/urls.py
|
20191-ads/glotes_serve
|
ed48a1b69f051e9f4f017aeef879a782e28be346
|
[
"MIT"
] | null | null | null |
from django.urls import path
urlpatterns = [
]
| 6.875
| 28
| 0.636364
|
aa811f4e6f71a05f3c54c626af3b1711f61ebc40
| 1,134
|
py
|
Python
|
Active/CodeForces/ContestFetcher.py
|
pawan-nirpal-031/Algorithms-And-ProblemSolving
|
24ce9649345dabe7275920f6912e410efc2c8e84
|
[
"Apache-2.0"
] | 2
|
2021-03-05T08:40:01.000Z
|
2021-04-25T13:58:42.000Z
|
Active/CodeForces/ContestFetcher.py
|
pawan-nirpal-031/Algorithms-And-ProblemSolving
|
24ce9649345dabe7275920f6912e410efc2c8e84
|
[
"Apache-2.0"
] | null | null | null |
Active/CodeForces/ContestFetcher.py
|
pawan-nirpal-031/Algorithms-And-ProblemSolving
|
24ce9649345dabe7275920f6912e410efc2c8e84
|
[
"Apache-2.0"
] | null | null | null |
import requests
url = "http://codeforces.com/api"
options = '/contest.list'
link = 'http://codeforces.com/contest/'
data = requests.get(url+options)
data = data.json()
Div3 = 'Div. 3'
Div2 = 'Div. 2'
Educational = 'Educational'
Contests = {Div3:"Divison_3.txt",Div2:"Divison_2.txt",Educational:"Educational_Round.txt"}
def CodeforcesContestFetcher(contest_name,contest_links,data):
if(data['status']=='OK'):
data = data['result']
for cont in data:
if (contest_name in cont['name']):
contest_links.append(int(cont['id']))
contest_links.sort()
links_file = open(Contests[contest_name],"w+")
num = 1
if(len(contest_links)==0):
print("Unable to fetch links, aborting... ")
return
for cont in contest_links:
links_file.write(str(num)+" : "+link+str(cont)+'\n')
num+=1
links_file.close()
else :
print("Unexpected error occured, try after a few moments")
Div3Contest = []
Div2Contest = []
EducationalContest = []
CodeforcesContestFetcher(Div2,Div3Contest,data)
| 22.235294
| 90
| 0.614638
|
117139075a55fb54b4e4798bfec70734e858c36e
| 36
|
py
|
Python
|
datasets/__init__.py
|
tonysy/clockwork-fcn
|
19a413e705df091344d1136df1389646be76001a
|
[
"BSD-4-Clause-UC"
] | 162
|
2016-08-24T05:46:45.000Z
|
2022-03-14T08:16:42.000Z
|
datasets/__init__.py
|
tonysy/clockwork-fcn
|
19a413e705df091344d1136df1389646be76001a
|
[
"BSD-4-Clause-UC"
] | 11
|
2016-11-10T03:42:58.000Z
|
2019-12-10T19:59:56.000Z
|
datasets/__init__.py
|
tonysy/clockwork-fcn
|
19a413e705df091344d1136df1389646be76001a
|
[
"BSD-4-Clause-UC"
] | 61
|
2016-08-24T05:46:46.000Z
|
2021-01-09T19:47:42.000Z
|
# Datasets for clockwork train/eval
| 18
| 35
| 0.805556
|
62e98a7dee3461912c6cc7c7b9f82bb96f830013
| 50,191
|
py
|
Python
|
angr/state_plugins/symbolic_memory.py
|
AkshaySG14/582proj-angr
|
8c17fcd31332ec115ed01666a6c8229620d1faa1
|
[
"BSD-2-Clause"
] | 1
|
2020-05-09T13:11:41.000Z
|
2020-05-09T13:11:41.000Z
|
angr/state_plugins/symbolic_memory.py
|
AkshaySG14/582proj-angr
|
8c17fcd31332ec115ed01666a6c8229620d1faa1
|
[
"BSD-2-Clause"
] | null | null | null |
angr/state_plugins/symbolic_memory.py
|
AkshaySG14/582proj-angr
|
8c17fcd31332ec115ed01666a6c8229620d1faa1
|
[
"BSD-2-Clause"
] | 1
|
2020-05-09T13:15:26.000Z
|
2020-05-09T13:15:26.000Z
|
from collections import defaultdict
import logging
import itertools
l = logging.getLogger(name=__name__)
import claripy
from ..storage.memory import SimMemory, DUMMY_SYMBOLIC_READ_VALUE
from ..storage.paged_memory import SimPagedMemory
from ..storage.memory_object import SimMemoryObject
from ..sim_state_options import SimStateOptions
from ..misc.ux import once
DEFAULT_MAX_SEARCH = 8
class MultiwriteAnnotation(claripy.Annotation):
@property
def eliminatable(self):
return False
@property
def relocateable(self):
return True
def _multiwrite_filter(mem, ast): #pylint:disable=unused-argument
# this is a huge hack, but so is the whole multiwrite crap
return any(isinstance(a, MultiwriteAnnotation) for a in ast._uneliminatable_annotations)
class SimSymbolicMemory(SimMemory): #pylint:disable=abstract-method
_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx', 'any', 'any_approx', 'max', 'max_approx',
'symbolic_nonzero', 'symbolic_nonzero_approx', 'norepeats' ]
_SAFE_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx' ]
def __init__(
self, memory_backer=None, permissions_backer=None, mem=None, memory_id="mem",
endness=None, abstract_backer=False, check_permissions=None,
read_strategies=None, write_strategies=None, stack_region_map=None, generic_region_map=None
):
SimMemory.__init__(self,
endness=endness,
abstract_backer=abstract_backer,
stack_region_map=stack_region_map,
generic_region_map=generic_region_map
)
self.id = memory_id
if check_permissions is None:
check_permissions = self.category == 'mem'
self.mem = SimPagedMemory(
memory_backer=memory_backer,
permissions_backer=permissions_backer,
check_permissions=check_permissions
) if mem is None else mem
# set up the strategies
self.read_strategies = read_strategies
self.write_strategies = write_strategies
#
# Lifecycle management
#
@SimMemory.memo
def copy(self, _):
"""
Return a copy of the SimMemory.
"""
#l.debug("Copying %d bytes of memory with id %s." % (len(self.mem), self.id))
c = type(self)(
mem=self.mem.branch(),
memory_id=self.id,
endness=self.endness,
abstract_backer=self._abstract_backer,
read_strategies=[ s.copy() for s in self.read_strategies ],
write_strategies=[ s.copy() for s in self.write_strategies ],
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
return c
#
# Merging stuff
#
def _changes_to_merge(self, others):
changed_bytes = set()
for o in others: # pylint:disable=redefined-outer-name
changed_bytes |= self.changed_bytes(o)
return changed_bytes
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
"""
Merge this SimMemory with the other SimMemory
"""
changed_bytes = self._changes_to_merge(others)
l.info("Merging %d bytes", len(changed_bytes))
l.info("... %s has changed bytes %s", self.id, changed_bytes)
self.read_strategies = self._merge_strategies(self.read_strategies, *[
o.read_strategies for o in others
])
self.write_strategies = self._merge_strategies(self.write_strategies, *[
o.write_strategies for o in others
])
merged_bytes = self._merge(others, changed_bytes, merge_conditions=merge_conditions)
return len(merged_bytes) > 0
@staticmethod
def _merge_strategies(*strategy_lists):
if len(set(len(sl) for sl in strategy_lists)) != 1:
raise SimMergeError("unable to merge memories with amounts of strategies")
merged_strategies = [ ]
for strategies in zip(*strategy_lists):
if len(set(s.__class__ for s in strategies)) != 1:
raise SimMergeError("unable to merge memories with different types of strategies")
unique = list(set(strategies))
if len(unique) > 1:
unique[0].merge(unique[1:])
merged_strategies.append(unique[0])
return merged_strategies
def widen(self, others):
changed_bytes = self._changes_to_merge(others)
l.info("Memory %s widening bytes %s", self.id, changed_bytes)
self._merge(others, changed_bytes, is_widening=True)
return len(changed_bytes) > 0
def _merge(self, others, changed_bytes, merge_conditions=None, is_widening=False):
all_memories = [self] + others
if merge_conditions is None:
merge_conditions = [ None ] * len(all_memories)
merged_to = None
merged_objects = set()
merged_bytes = set()
for b in sorted(changed_bytes):
if merged_to is not None and not b >= merged_to:
l.info("merged_to = %d ... already merged byte 0x%x", merged_to, b)
continue
l.debug("... on byte 0x%x", b)
memory_objects = []
unconstrained_in = []
# first get a list of all memory objects at that location, and
# all memories that don't have those bytes
for sm, fv in zip(all_memories, merge_conditions):
if b in sm.mem:
l.info("... present in %s", fv)
memory_objects.append((sm.mem[b], fv))
else:
l.info("... not present in %s", fv)
unconstrained_in.append((sm, fv))
mos = set(mo for mo,_ in memory_objects)
mo_bases = set(mo.base for mo, _ in memory_objects)
mo_lengths = set(mo.length for mo, _ in memory_objects)
if not unconstrained_in and not (mos - merged_objects):
continue
# first, optimize the case where we are dealing with the same-sized memory objects
if len(mo_bases) == 1 and len(mo_lengths) == 1 and not unconstrained_in:
our_mo = self.mem[b]
to_merge = [(mo.object, fv) for mo, fv in memory_objects]
# Update `merged_to`
mo_base = list(mo_bases)[0]
merged_to = mo_base + list(mo_lengths)[0]
merged_val = self._merge_values(
to_merge, memory_objects[0][0].length, is_widening=is_widening
)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if not to_merge[0][0].uninitialized and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
# do the replacement
new_object = self.mem.replace_memory_object(our_mo, merged_val)
merged_objects.add(new_object)
merged_objects.update(mos)
merged_bytes.add(b)
else:
# get the size that we can merge easily. This is the minimum of
# the size of all memory objects and unallocated spaces.
min_size = min([mo.length - (b - mo.base) for mo, _ in memory_objects])
for um, _ in unconstrained_in:
for i in range(0, min_size):
if b + i in um:
min_size = i
break
merged_to = b + min_size
l.info("... determined minimum size of %d", min_size)
# Now, we have the minimum size. We'll extract/create expressions of that
# size and merge them
extracted = [(mo.bytes_at(b, min_size), fv) for mo, fv in memory_objects] if min_size != 0 else []
created = [
(self.get_unconstrained_bytes("merge_uc_%s_%x" % (uc.id, b), min_size * self.state.arch.byte_width), fv) for
uc, fv in unconstrained_in
]
to_merge = extracted + created
merged_val = self._merge_values(to_merge, min_size, is_widening=is_widening)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if (not unconstrained_in or not unconstrained_in[0][0] is self) \
and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
self.store(b, merged_val, endness='Iend_BE', inspect=False) # do not convert endianness again
merged_bytes.add(b)
return merged_bytes
def set_state(self, state):
super(SimSymbolicMemory, self).set_state(state)
self.mem.state = state._get_weakref()
if self.state is not None:
if self.read_strategies is None:
self._create_default_read_strategies()
if self.write_strategies is None:
self._create_default_write_strategies()
def _create_default_read_strategies(self):
self.read_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
# first, we try to resolve the read address by approximation
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024, exact=False),
)
# then, we try symbolic reads, with a maximum width of a kilobyte
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024)
)
if options.CONSERVATIVE_READ_STRATEGY not in self.state.options:
# finally, we concretize to any one solution
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyAny(),
)
def _create_default_write_strategies(self):
self.write_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
if options.SYMBOLIC_WRITE_ADDRESSES not in self.state.options:
# we try to resolve a unique solution by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategySingle(exact=False),
)
else:
# we try a solution range by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128, exact=False)
)
if options.SYMBOLIC_WRITE_ADDRESSES in self.state.options:
# we try to find a range of values
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128)
)
else:
# we try to find a range of values, but only for ASTs annotated with the multiwrite annotation
self.write_strategies.append(concretization_strategies.SimConcretizationStrategyRange(
128,
filter=_multiwrite_filter
))
# finally, we just grab the maximum solution
if options.CONSERVATIVE_WRITE_STRATEGY not in self.state.options:
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyMax()
)
#
# Symbolicizing!
#
def make_symbolic(self, name, addr, length=None):
"""
Replaces `length` bytes starting at `addr` with a symbolic variable named name. Adds a constraint equaling that
symbolic variable to the value previously at `addr`, and returns the variable.
"""
l.debug("making %s bytes symbolic", length)
if isinstance(addr, str):
addr, length = self.state.arch.registers[addr]
else:
if length is None:
raise Exception("Unspecified length!")
r = self.load(addr, length)
v = self.get_unconstrained_bytes(name, r.size())
self.store(addr, v)
self.state.add_constraints(r == v)
l.debug("... eq constraints: %s", r == v)
return v
#
# Address concretization
#
def _resolve_size_range(self, size):
if not self.state.solver.symbolic(size):
i = self.state.solver.eval(size)
if i > self._maximum_concrete_size:
raise SimMemoryLimitError("Concrete size %d outside of allowable limits" % i)
return i, i
if options.APPROXIMATE_MEMORY_SIZES in self.state.options:
max_size_approx = self.state.solver.max_int(size, exact=True)
min_size_approx = self.state.solver.min_int(size, exact=True)
if max_size_approx < self._maximum_symbolic_size_approx:
return min_size_approx, max_size_approx
max_size = self.state.solver.max_int(size)
min_size = self.state.solver.min_int(size)
if min_size > self._maximum_symbolic_size:
self.state.history.add_event('memory_limit', message="Symbolic size %d outside of allowable limits" % min_size, size=size)
if options.BEST_EFFORT_MEMORY_STORING not in self.state.options:
raise SimMemoryLimitError("Symbolic size %d outside of allowable limits" % min_size)
else:
min_size = self._maximum_symbolic_size
return min_size, min(max_size, self._maximum_symbolic_size)
#
# Concretization strategies
#
def _apply_concretization_strategies(self, addr, strategies, action):
"""
Applies concretization strategies on the address until one of them succeeds.
"""
# we try all the strategies in order
for s in strategies:
# first, we trigger the SimInspect breakpoint and give it a chance to intervene
e = addr
self.state._inspect(
'address_concretization', BP_BEFORE, address_concretization_strategy=s,
address_concretization_action=action, address_concretization_memory=self,
address_concretization_expr=e, address_concretization_add_constraints=True
)
s = self.state._inspect_getattr('address_concretization_strategy', s)
e = self.state._inspect_getattr('address_concretization_expr', addr)
# if the breakpoint None'd out the strategy, we skip it
if s is None:
continue
# let's try to apply it!
try:
a = s.concretize(self, e)
except SimUnsatError:
a = None
# trigger the AFTER breakpoint and give it a chance to intervene
self.state._inspect(
'address_concretization', BP_AFTER,
address_concretization_result=a
)
a = self.state._inspect_getattr('address_concretization_result', a)
# return the result if not None!
if a is not None:
return a
# well, we tried
raise SimMemoryAddressError(
"Unable to concretize address for %s with the provided strategies." % action
)
def concretize_write_addr(self, addr, strategies=None):
"""
Concretizes an address meant for writing.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
"""
if isinstance(addr, int):
return [ addr ]
elif not self.state.solver.symbolic(addr):
return [ self.state.solver.eval(addr) ]
strategies = self.write_strategies if strategies is None else strategies
return self._apply_concretization_strategies(addr, strategies, 'store')
def concretize_read_addr(self, addr, strategies=None):
"""
Concretizes an address meant for reading.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
"""
if isinstance(addr, int):
return [ addr ]
elif not self.state.solver.symbolic(addr):
return [ self.state.solver.eval(addr) ]
strategies = self.read_strategies if strategies is None else strategies
return self._apply_concretization_strategies(addr, strategies, 'load')
def normalize_address(self, addr, is_write=False):
return self.concretize_read_addr(addr)
#
# Memory reading
#
def _fill_missing(self, addr, num_bytes, inspect=True, events=True):
if self.category == 'reg':
name = "reg_%s" % (self.state.arch.translate_register_name(addr))
else:
name = "%s_%x" % (self.id, addr)
all_missing = [
self.get_unconstrained_bytes(
name,
min(self.mem._page_size, num_bytes)*self.state.arch.byte_width,
source=i,
inspect=inspect,
events=events,
key=self.variable_key_prefix + (addr,),
eternal=False) # :(
for i in range(addr, addr+num_bytes, self.mem._page_size)
]
if all_missing:
is_mem = self.category == 'mem' and \
options.ZERO_FILL_UNCONSTRAINED_MEMORY not in self.state.options and \
options.SYMBOL_FILL_UNCONSTRAINED_MEMORY not in self.state.options
is_reg = self.category == 'reg' and \
options.ZERO_FILL_UNCONSTRAINED_REGISTERS not in self.state.options and \
options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS not in self.state.options
if is_mem or is_reg:
if once('mem_fill_warning'):
l.warning("The program is accessing memory or registers with an unspecified value. "
"This could indicate unwanted behavior.")
l.warning("angr will cope with this by generating an unconstrained symbolic variable and continuing. "
"You can resolve this by:")
l.warning("1) setting a value to the initial state")
l.warning("2) adding the state option ZERO_FILL_UNCONSTRAINED_{MEMORY,REGISTERS}, "
"to make unknown regions hold null")
l.warning("3) adding the state option SYMBOL_FILL_UNCONSTRAINED_{MEMORY_REGISTERS}, "
"to suppress these messages.")
if is_mem:
refplace_str = "unknown"
if not self.state._ip.symbolic:
refplace_int = self.state.solver.eval(self.state._ip)
refplace_int_s = "%#x" % refplace_int
if self.state.project:
refplace_str = self.state.project.loader.describe_addr(refplace_int)
else:
refplace_int_s = repr(self.state._ip)
l.warning("Filling memory at %#x with %d unconstrained bytes referenced from %s (%s)", addr, num_bytes, refplace_int_s, refplace_str)
else:
if addr == self.state.arch.ip_offset:
refplace_int_s = "0"
refplace_str = "symbolic"
else:
refplace_str = "unknown"
if not self.state._ip.symbolic:
refplace_int = self.state.solver.eval(self.state._ip)
refplace_int_s = "%#x" % refplace_int
if self.state.project:
refplace_str = self.state.project.loader.describe_addr(refplace_int)
else:
refplace_int_s = repr(self.state._ip)
reg_str = self.state.arch.translate_register_name(addr, size=num_bytes)
l.warning("Filling register %s with %d unconstrained bytes referenced from %s (%s)", reg_str, num_bytes, refplace_int_s, refplace_str)
# this is an optimization to ensure most operations in the future will deal with leaf ASTs (instead of reversed
# ASTs)
if self.category == 'reg' and self.state.arch.register_endness == 'Iend_LE':
all_missing = [ a.reversed for a in all_missing ]
elif self.category == 'file':
# file data has to be big-endian
pass
elif self.category != 'reg' and self.state.arch.memory_endness == 'Iend_LE':
# endianness of memory data
all_missing = [ a.reversed for a in all_missing ]
b = self.state.solver.Concat(*all_missing) if len(all_missing) > 1 else all_missing[0]
if events:
self.state.history.add_event('uninitialized', memory_id=self.id, addr=addr, size=num_bytes)
default_mo = SimMemoryObject(b, addr, byte_width=self.state.arch.byte_width)
self.state.scratch.push_priv(True)
self.mem.store_memory_object(default_mo, overwrite=False)
self.state.scratch.pop_priv()
return default_mo
def _read_from(self, addr, num_bytes, inspect=True, events=True, ret_on_segv=False):
return self.mem.load(addr, num_bytes)
def _load(self, dst, size, condition=None, fallback=None, inspect=True, events=True, ret_on_segv=False):
if self.state.solver.symbolic(size):
l.warning("Concretizing symbolic length. Much sad; think about implementing.")
# for now, we always load the maximum size
_, max_size = self._resolve_size_range(size)
if options.ABSTRACT_MEMORY not in self.state.options and self.state.solver.symbolic(size):
self.state.add_constraints(size == max_size, action=True)
if max_size == 0:
self.state.history.add_event('memory_limit', message="0-length read")
size = max_size
if self.state.solver.symbolic(dst) and options.AVOID_MULTIVALUED_READS in self.state.options:
return [ ], self.get_unconstrained_bytes("symbolic_read_unconstrained", size*self.state.arch.byte_width), [ ]
if type(dst) is int:
dst = claripy.BVV(dst, 64)
dst = claripy.SignExt(64 - len(dst), dst)
read_value = self.mem.load(dst, size)
return [dst], read_value, []
def _find(self, start, what, max_search=None, max_symbolic_bytes=None, default=None, step=1,
disable_actions=False, inspect=True, chunk_size=None):
if max_search is None:
max_search = DEFAULT_MAX_SEARCH
if isinstance(start, int):
start = self.state.solver.BVV(start, self.state.arch.bits)
constraints = [ ]
remaining_symbolic = max_symbolic_bytes
seek_size = len(what)//self.state.arch.byte_width
symbolic_what = self.state.solver.symbolic(what)
l.debug("Search for %d bytes in a max of %d...", seek_size, max_search)
chunk_start = 0
if chunk_size is None:
chunk_size = max(0x100, seek_size + 0x80)
chunk = self.load(start, chunk_size, endness="Iend_BE",
disable_actions=disable_actions, inspect=inspect)
cases = [ ]
match_indices = [ ]
offsets_matched = [ ] # Only used in static mode
byte_width = self.state.arch.byte_width
no_singlevalue_opt = options.SYMBOLIC_MEMORY_NO_SINGLEVALUE_OPTIMIZATIONS in self.state.options
cond_prefix = [ ]
if options.MEMORY_FIND_STRICT_SIZE_LIMIT in self.state.options:
cond_falseness_test = self.state.solver.is_false
else:
cond_falseness_test = lambda cond: cond.is_false()
for i in itertools.count(step=step):
l.debug("... checking offset %d", i)
if i > max_search - seek_size:
l.debug("... hit max size")
break
if remaining_symbolic is not None and remaining_symbolic == 0:
l.debug("... hit max symbolic")
break
if i - chunk_start > chunk_size - seek_size:
l.debug("loading new chunk")
chunk_start += chunk_size - seek_size + 1
chunk = self.load(start+chunk_start, chunk_size,
endness="Iend_BE", ret_on_segv=True,
disable_actions=disable_actions, inspect=inspect)
chunk_off = i-chunk_start
b = chunk[chunk_size*byte_width - chunk_off*byte_width - 1 : chunk_size*byte_width - chunk_off*byte_width - seek_size*byte_width]
condition = b == what
if not cond_falseness_test(condition):
if no_singlevalue_opt and cond_prefix:
condition = claripy.And(*(cond_prefix + [condition]))
cases.append([condition, claripy.BVV(i, len(start))])
match_indices.append(i)
if b.symbolic and no_singlevalue_opt:
# in tracing mode, we need to make sure that all previous bytes are not equal to what
cond_prefix.append(b != what)
if self.state.mode == 'static':
si = b._model_vsa
what_si = what._model_vsa
if isinstance(si, claripy.vsa.StridedInterval):
if not si.intersection(what_si).is_empty:
offsets_matched.append(start + i)
if si.identical(what_si):
break
if si.cardinality != 1:
if remaining_symbolic is not None:
remaining_symbolic -= 1
else:
# Comparison with other types (like IfProxy or ValueSet) is not supported
if remaining_symbolic is not None:
remaining_symbolic -= 1
else:
# other modes (e.g. symbolic mode)
if not b.symbolic and not symbolic_what and self.state.solver.eval(b) == self.state.solver.eval(what):
l.debug("... found concrete")
break
else:
if b.symbolic and remaining_symbolic is not None:
remaining_symbolic -= 1
if self.state.mode == 'static':
r = self.state.solver.ESI(self.state.arch.bits)
for off in offsets_matched:
r = r.union(off)
constraints = [ ]
return r, constraints, match_indices
else:
if default is None:
l.debug("... no default specified")
default = 0
constraints += [ self.state.solver.Or(*[ c for c,_ in cases]) ]
#l.debug("running ite_cases %s, %s", cases, default)
r = self.state.solver.ite_cases(cases, default - start) + start
return r, constraints, match_indices
def __contains__(self, dst):
return dst in self.mem
def was_written_to(self, dst):
if isinstance(dst, int):
addr = dst
elif self.state.solver.symbolic(dst):
l.warning("Currently unable to do SimMemory.was_written_to on symbolic variables.")
return False
else:
addr = self.state.solver.eval(dst)
return self.mem.contains_no_backer(addr)
#
# Writes
#
def _store(self, req):
l.debug("Doing a store...")
req._adjust_condition(self.state)
max_bytes = req.data.length//self.state.arch.byte_width
if req.size is None:
req.size = max_bytes
if self.state.solver.symbolic(req.size):
if options.AVOID_MULTIVALUED_WRITES in self.state.options:
return req
if options.CONCRETIZE_SYMBOLIC_WRITE_SIZES in self.state.options:
new_size = self.state.solver.eval(req.size)
self.state.add_constraints(req.size == new_size)
req.size = new_size
if self.state.solver.symbolic(req.addr) and options.AVOID_MULTIVALUED_WRITES in self.state.options:
return req
if not self.state.solver.symbolic(req.size) and self.state.solver.eval(req.size) > req.data.length//self.state.arch.byte_width:
raise SimMemoryError("Not enough data for requested storage size (size: {}, data: {})".format(req.size, req.data))
if self.state.solver.symbolic(req.size):
self.state.add_constraints(self.state.solver.ULE(req.size, max_bytes))
#
# store it!!!
#
req.size = self.state.solver.eval(req.size)
req.stored_values = []
condition = req.condition
# if (self.category == 'mem' and options.SIMPLIFY_MEMORY_WRITES in self.state.options) or (
# self.category == 'reg' and options.SIMPLIFY_REGISTER_WRITES in self.state.options):
# req.data = self.state.solver.simplify(req.data)
if req.endness == "Iend_LE" or (req.endness is None and self.endness == "Iend_LE"):
req.data = req.data.reversed
if type(req.addr) is int:
req.addr = claripy.BVV(req.addr, 64)
req.addr = claripy.SignExt(64 - len(req.addr), req.addr)
req.stored_values = [req.data]
self.mem.store(req.addr, req.data, req.size)
l.debug("... done")
req.completed = True
return req
def _insert_memory_object(self, value, address, size):
if self.category == 'mem':
self.state.scratch.dirty_addrs.update(range(address, address+size))
self.mem.store(address, value, size)
def _store_fully_concrete(self, address, size, data, endness, condition):
if type(size) is not int:
size = self.state.solver.eval(size)
if size < data.length//self.state.arch.byte_width:
data = data[len(data)-1:len(data)-size*self.state.arch.byte_width:]
if condition is not None:
try:
original_value = self._read_from(address, size)
except Exception as ex:
raise ex
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_value = original_value.reversed
conditional_value = self.state.solver.If(condition, data, original_value)
else:
conditional_value = data
return [ dict(value=conditional_value, addr=address, size=size) ]
def _store_symbolic_size(self, address, size, data, endness, condition):
address = self.state.solver.eval(address)
max_bytes = data.length//self.state.arch.byte_width
original_value = self._read_from(address, max_bytes)
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_value = original_value.reversed
befores = original_value.chop(bits=self.state.arch.byte_width)
afters = data.chop(bits=self.state.arch.byte_width)
stored_value = self.state.solver.Concat(*[
self.state.solver.If(self.state.solver.UGT(size, i), a, b)
for i, (a, b) in enumerate(zip(afters, befores))
])
conditional_value = self.state.solver.If(condition, stored_value, original_value) if condition is not None else stored_value
return [ dict(value=conditional_value, addr=address, size=max_bytes) ]
def _store_symbolic_addr(self, address, addresses, size, data, endness, condition):
size = self.state.solver.eval(size)
segments = self._get_segments(addresses, size)
if condition is None:
condition = claripy.BoolV(True)
original_values = [ self._read_from(segment['start'], segment['size']) for segment in segments ]
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_values = [ ov.reversed for ov in original_values ]
stored_values = []
for segment, original_value in zip(segments, original_values):
conditional_value = original_value
for opt in segment['options']:
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
high = ((opt['idx']+segment['size']) * self.state.arch.byte_width)-1
low = opt['idx']*self.state.arch.byte_width
else:
high = len(data) - 1 - (opt['idx']*self.state.arch.byte_width)
low = len(data) - ((opt['idx']+segment['size']) *self.state.arch.byte_width)
data_slice = data[high:low]
conditional_value = self.state.solver.If(self.state.solver.And(address == segment['start']-opt['idx'], condition), data_slice, conditional_value)
stored_values.append(dict(value=conditional_value, addr=segment['start'], size=segment['size']))
return stored_values
def flush_pages(self, whitelist):
flushed_regions = self.mem.flush_pages(whitelist)
if self.state.has_plugin('unicorn'):
for addr, length in flushed_regions:
self.state.unicorn.uncache_region(addr, length)
@staticmethod
def _create_segment(addr, size, s_options, idx, segments):
segment = dict(start=addr, size=size, options=s_options)
segments.insert(idx, segment)
@staticmethod
def _split_segment(addr, segments):
s_idx = SimSymbolicMemory._get_segment_index(addr, segments)
segment = segments[s_idx]
if segment['start'] == addr:
return s_idx
assert segment['start'] < addr < segment['start'] + segment['size']
size_prev = addr - segment['start']
size_next = segment['size'] - size_prev
assert size_prev != 0 and size_next != 0
segments.pop(s_idx)
SimSymbolicMemory._create_segment(segment['start'], size_prev, segment['options'], s_idx, segments)
SimSymbolicMemory._create_segment(addr, size_next, [{"idx": opt["idx"] + size_prev}
for opt in segment['options']], s_idx + 1, segments)
return s_idx + 1
@staticmethod
def _add_segments_overlap(idx, addr, segments):
for i in range(idx, len(segments)):
segment = segments[i]
if addr < segment['start'] + segment['size']:
segments[i]["options"].append({"idx": segment['start'] - addr})
@staticmethod
def _get_segment_index(addr, segments):
for i, segment in enumerate(segments):
if segment['start'] <= addr < segment['start'] + segment['size']:
return i
return -1
@staticmethod
def _get_segments(addrs, size):
segments = []
highest = 0
for addr in addrs:
if addr < highest:
idx = SimSymbolicMemory._split_segment(addr, segments)
SimSymbolicMemory._create_segment(highest, addr + size - highest, [], len(segments), segments)
SimSymbolicMemory._add_segments_overlap(idx, addr, segments)
else:
SimSymbolicMemory._create_segment(addr, size, [{'idx': 0}], len(segments), segments)
highest = addr + size
return segments
def _store_fully_symbolic(self, address, addresses, size, data, endness, condition):
stored_values = [ ]
byte_dict = defaultdict(list)
max_bytes = data.length//self.state.arch.byte_width
if condition is None:
condition = claripy.BoolV(True)
# chop data into byte-chunks
original_values = [self._read_from(a, max_bytes) for a in addresses]
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_values = [ ov.reversed for ov in original_values ]
data_bytes = data.chop(bits=self.state.arch.byte_width)
for a, fv in zip(addresses, original_values):
original_bytes = fv.chop(self.state.arch.byte_width)
for index, (d_byte, o_byte) in enumerate(zip(data_bytes, original_bytes)):
# create a dict of all all possible values for a certain address
byte_dict[a+index].append((a, index, d_byte, o_byte))
for byte_addr in sorted(byte_dict.keys()):
write_list = byte_dict[byte_addr]
# If this assertion fails something is really wrong!
assert all(v[3] is write_list[0][3] for v in write_list)
conditional_value = write_list[0][3]
for a, index, d_byte, o_byte in write_list:
# create the ast for each byte
conditional_value = self.state.solver.If(self.state.solver.And(address == a, size > index, condition), d_byte, conditional_value)
stored_values.append(dict(value=conditional_value, addr=byte_addr, size=1))
return stored_values
def _store_with_merge(self, req):
req._adjust_condition(self.state)
dst = req.addr
cnt = req.data
size = req.size
endness = req.endness
req.stored_values = [ ]
if options.ABSTRACT_MEMORY not in self.state.options:
raise SimMemoryError('store_with_merge is not supported without abstract memory.')
l.debug("Doing a store with merging...")
addrs = self.concretize_write_addr(dst)
if len(addrs) == 1:
l.debug("... concretized to 0x%x", addrs[0])
else:
l.debug("... concretized to %d values", len(addrs))
if size is None:
# Full length
length = len(cnt)
else:
raise NotImplementedError()
for addr in addrs:
# First we load old values
old_val = self._read_from(addr, length // self.state.arch.byte_width)
assert isinstance(old_val, claripy.Bits)
# FIXME: This is a big hack
def is_reversed(o):
if isinstance(o, claripy.Bits) and o.op == 'Reverse':
return True
return False
def can_be_reversed(o):
om = o._model_vsa
if isinstance(om, claripy.vsa.StridedInterval) and om.is_integer:
return True
return False
if endness == 'Iend_LE': cnt = cnt.reversed
reverse_it = False
if is_reversed(cnt):
if is_reversed(old_val):
cnt = cnt.args[0]
old_val = old_val.args[0]
reverse_it = True
elif can_be_reversed(old_val):
cnt = cnt.args[0]
reverse_it = True
if isinstance(old_val, (int, claripy.bv.BVV)):
merged_val = self.state.solver.SI(bits=len(old_val), to_conv=old_val)
else:
merged_val = old_val
merged_val = merged_val.union(cnt)
if reverse_it:
merged_val = merged_val.reversed
# Write the new value
self.store(addr, merged_val, size=size)
req.stored_values.append(merged_val)
req.completed = True
# TODO: revisit the following lines
req.constraints = [ ]
return req
def get_unconstrained_bytes(self, name, bits, source=None, key=None, inspect=True, events=True, **kwargs):
"""
Get some consecutive unconstrained bytes.
:param name: Name of the unconstrained variable
:param bits: Size of the unconstrained variable
:param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic
execution so that we can track the allocation depth.
:return: The generated variable
"""
if self.category == 'mem' and options.ZERO_FILL_UNCONSTRAINED_MEMORY in self.state.options:
return self.state.solver.BVV(0, bits)
elif self.category == 'reg' and options.ZERO_FILL_UNCONSTRAINED_REGISTERS in self.state.options:
return self.state.solver.BVV(0, bits)
elif options.SPECIAL_MEMORY_FILL in self.state.options and self.state._special_memory_filler is not None:
return self.state._special_memory_filler(name, bits, self.state)
else:
if options.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
if source is not None and type(source) is int:
alloc_depth = self.state.uc_manager.get_alloc_depth(source)
kwargs['uc_alloc_depth'] = 0 if alloc_depth is None else alloc_depth + 1
r = self.state.solver.Unconstrained(name, bits, key=key, inspect=inspect, events=events, **kwargs)
return r
# Unconstrain a byte
def unconstrain_byte(self, addr, inspect=True, events=True):
unconstrained_byte = self.get_unconstrained_bytes("%s_unconstrain_%#x" % (self.id, addr), self.state.arch.byte_width, inspect=inspect,
events=events, key=('manual_unconstrain', addr))
self.store(addr, unconstrained_byte)
# Replaces the differences between self and other with unconstrained bytes.
def unconstrain_differences(self, other):
changed_bytes = self.changed_bytes(other)
l.debug("Will unconstrain %d %s bytes", len(changed_bytes), self.id)
for b in changed_bytes:
self.unconstrain_byte(b)
@staticmethod
def _is_uninitialized(a):
return getattr(a._model_vsa, 'uninitialized', False)
def _merge_values(self, to_merge, merged_size, is_widening=False):
if options.ABSTRACT_MEMORY in self.state.options:
if self.category == 'reg' and self.state.arch.register_endness == 'Iend_LE':
should_reverse = True
elif self.state.arch.memory_endness == 'Iend_LE':
should_reverse = True
else:
should_reverse = False
merged_val = to_merge[0][0]
if should_reverse: merged_val = merged_val.reversed
for tm,_ in to_merge[1:]:
if should_reverse: tm = tm.reversed
if self._is_uninitialized(tm):
continue
if is_widening:
l.info("Widening %s %s...", merged_val, tm)
merged_val = merged_val.widen(tm)
l.info('... Widened to %s', merged_val)
else:
l.info("Merging %s %s...", merged_val, tm)
merged_val = merged_val.union(tm)
l.info("... Merged to %s", merged_val)
if should_reverse: merged_val = merged_val.reversed
else:
merged_val = self.state.solver.BVV(0, merged_size*self.state.arch.byte_width)
for tm,fv in to_merge:
merged_val = self.state.solver.If(fv, tm, merged_val)
return merged_val
def dbg_print(self, indent=0):
"""
Print out debugging information.
"""
lst = []
more_data = False
for i, addr in enumerate(self.mem.keys()):
lst.append(addr)
if i >= 20:
more_data = True
break
for addr in sorted(lst):
data = self.mem[addr]
if isinstance(data, SimMemoryObject):
memobj = data
print("%s%xh: (%s)[%d]" % (" " * indent, addr, memobj, addr - memobj.base))
else:
print("%s%xh: <default data>" % (" " * indent, addr))
if more_data:
print("%s..." % (" " * indent))
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True,
disable_actions=False):
src_memory = self if src_memory is None else src_memory
dst_memory = self if dst_memory is None else dst_memory
_,max_size = self._resolve_size_range(size)
if max_size == 0:
return None, [ ]
data = src_memory.load(src, max_size, inspect=inspect, disable_actions=disable_actions)
dst_memory.store(dst, data, size=size, condition=condition, inspect=inspect, disable_actions=disable_actions)
return data
#
# Things that are actually handled by SimPagedMemory
#
def changed_bytes(self, other):
"""
Gets the set of changed bytes between self and `other`.
:param other: The other :class:`SimSymbolicMemory`.
:returns: A set of differing bytes
"""
return self.mem.changed_bytes(other.mem)
def replace_all(self, old, new):
"""
Replaces all instances of expression old with expression new.
:param old: A claripy expression. Must contain at least one named variable (to make
to make it possible to use the name index for speedup)
:param new: The new variable to replace it with
"""
return self.mem.replace_all(old, new)
def addrs_for_name(self, n):
"""
Returns addresses that contain expressions that contain a variable
named `n`.
"""
return self.mem.addrs_for_name(n)
def addrs_for_hash(self, h):
"""
Returns addresses that contain expressions that contain a variable
with the hash of `h`.
"""
return self.mem.addrs_for_hash(h)
def replace_memory_object(self, old, new_content):
"""
Replaces the memory object 'old' with a new memory object containing
'new_content'.
:param old: A SimMemoryObject (i.e., one from memory_objects_for_hash() or
memory_objects_for_name())
:param new_content: the content (claripy expression) for the new memory object
"""
return self.mem.replace_memory_object(old, new_content)
def memory_objects_for_name(self, n):
"""
Returns a set of SimMemoryObjects that contain expressions that contain a variable
with the name of n. This is useful for replacing those values, in one fell swoop,
with replace_memory_object(), even if they've been partially overwritten.
"""
return self.mem.memory_objects_for_name(n)
def memory_objects_for_hash(self, n):
"""
Returns a set of SimMemoryObjects that contain expressions that contain a variable
with the hash of h. This is useful for replacing those values, in one fell swoop,
with replace_memory_object(), even if they've been partially overwritten.
"""
return self.mem.memory_objects_for_hash(n)
def permissions(self, addr, permissions=None):
"""
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
"""
out = self.mem.permissions(addr, permissions)
# if unicorn is in play and we've marked a page writable, it must be uncached
if permissions is not None and self.state.solver.is_true(permissions & 2 == 2):
if self.state.has_plugin('unicorn'):
p = self.mem._get_page(self.mem._page_id(addr))
self.state.unicorn.uncache_region(p._page_addr, p._page_size)
return out
def map_region(self, addr, length, permissions, init_zero=False):
"""
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
"""
l.info("Mapping [%#x, %#x] as %s", addr, addr + length - 1, permissions)
return self.mem.map_region(addr, length, permissions, init_zero=init_zero)
def unmap_region(self, addr, length):
"""
Unmap a number of pages at address `addr`
:param addr: address to unmap the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
"""
if self.state.has_plugin('unicorn'):
self.state.unicorn.uncache_region(addr, length)
return self.mem.unmap_region(addr, length)
# Register state options
SimStateOptions.register_option("symbolic_ip_max_targets", int,
default=256,
description="The maximum number of concrete addresses a symbolic instruction pointer "
"can be concretized to."
)
SimStateOptions.register_option("jumptable_symbolic_ip_max_targets", int,
default=16384,
description="The maximum number of concrete addresses a symbolic instruction pointer "
"can be concretized to if it is part of a jump table."
)
from angr.sim_state import SimState
SimState.register_default('sym_memory', SimSymbolicMemory)
from ..errors import SimUnsatError, SimMemoryError, SimMemoryLimitError, SimMemoryAddressError, SimMergeError
from .. import sim_options as options
from .inspect import BP_AFTER, BP_BEFORE
from .. import concretization_strategies
| 42.248316
| 161
| 0.601303
|
dc26cf8e04fb7f045bbea843193fbc83a61e4c91
| 7,696
|
py
|
Python
|
crae.py
|
scotty110/ANN-code
|
05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a
|
[
"MIT"
] | null | null | null |
crae.py
|
scotty110/ANN-code
|
05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a
|
[
"MIT"
] | null | null | null |
crae.py
|
scotty110/ANN-code
|
05ae6094dfe98c1c9fd0feb87ffb0c0c5206502a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.contrib.rnn import *
import numpy as np
import functools
#import image_handler as image_handler
import image_split_loader as isl
import os
"""
THIS MODEL WORKS
"""
def lazy_property(function):
'''
Danijar Hafner:
https://danijar.com/
https://gist.github.com/danijar
'''
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class videoPredictor:
'''
TensorFlow Model for a 2D convolutional autoencoder.
'''
def __init__(self, time_steps=9, patch_size=8, alpha=0.002, debug=False):
'''
Defining Tensorflow model properties
Inputs:
TODO
alpha - learning rate for optimizer
'''
#Feed parameters
self.time_steps = time_steps
self.patch_size = patch_size
self.input_rnn = tf.placeholder(tf.float32, shape=[None, self.time_steps, self.patch_size, self.patch_size], name='rnn_input')
self.true_image = tf.placeholder(tf.float32, shape=[None, self.patch_size, self.patch_size], name='uncompressed')
self.alpha = alpha
self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
self.debug = debug
#Model pieces
self.rnnNetwork
self.error
self.optimizer
#for Running
init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
config = tf.ConfigProto(device_count = {'GPU': 0})
#self.supervisor = tf.train.Supervisor(logdir="/tmp/model_logs", saver=self.saver, save_model_secs=600)
self.sess = tf.Session(config=config)
#self.sess = (self.supervisor).managed_session()
self.sess.run(init)
@lazy_property
def rnnNetwork(self):
''''
Our rnn portion of code
'''
with tf.name_scope('input_processing'):
#Orig input shape: [batch_size, time_step, pixel_values]
input_layer = self.input_rnn
#Reshape so patch become a vector
input_layer = tf.reshape( input_layer, shape=[-1,self.time_steps*self.patch_size**2,1] )
#input_layer = tf.transpose(input_layer, perm=[1,0,2] )
input_layer_shape = tf.shape(input_layer)
num_steps = tf.shape(input_layer)[1]
batch_size = tf.shape(input_layer)[0]
if(self.debug):
print "num steps: ", num_steps
print "batch size: ", batch_size
print "input layer shape: ", input_layer.get_shape().as_list()
with tf.name_scope('rnn_cell'):
#cell = BasicLSTMCell( self.patch_size**2, forget_bias=0.0, state_is_tuple=True, reuse=False)
cell = BasicRNNCell( 1024, reuse=False) #will need to chang in the future
state = cell.zero_state(batch_size,dtype=tf.float32)
rnn_output, state = tf.nn.dynamic_rnn(cell, input_layer, initial_state=state, time_major=False, dtype=tf.float32)
if self.debug:
print "rnn output shape: ", rnn_output.get_shape().as_list()
print "rnn output[0] shape: ", rnn_output[0].get_shape().as_list()
print "rnn state shape: ", state
with tf.name_scope("Reshape_final"):
output = tf.reshape(rnn_output[0], [batch_size, self.patch_size, self.patch_size] )
if(self.debug):
print "output shape: ", output.get_shape().as_list()
return output
@lazy_property
def optimizer(self):
'''
The optimizer to use for our autoencoder, using MSE as loss function
'''
# predictions - predicted output of model
# labels - ground truth output tensor, needs to be same dimension as predictions
loss = tf.losses.mean_squared_error( predictions=self.rnnNetwork, labels=self.true_image )
optimize = tf.train.AdamOptimizer( self.alpha )
optimizer = optimize.minimize(loss, global_step=self.global_step)
return optimizer
@lazy_property
def error(self):
'''
Calculates the l2 error of the encoder during training.
'''
# Function API: https://www.tensorflow.org/api_docs/python/tf/global_norm
error = tf.losses.mean_squared_error( predictions=self.rnnNetwork, labels=self.true_image )
return error
def train(self, image_compressed, image_raw, counter=0, batch_size=1, loop=1):
'''
Trains model on X data
'''
#Create training
X,Y,count = self.createTrainingData(image_compressed, image_raw, batch_size=batch_size, counter=counter)
for j in range(0,X.shape[0]):
for i in range(0,loop):
self.sess.run( self.optimizer, feed_dict={self.input_rnn:X[j][:], self.true_image:Y[j][:]} )
del X
del Y
#print "Done Training"
return count
def evaluate(self, image_compressed, image_raw, batch_size=1, counter=0):
'''
Calcs MSE for model on X data
'''
mse = 0
X,Y,count = self.createTrainingData(image_compressed, image_raw, batch_size=batch_size, counter=counter)
#print "made it this far"
for j in range(0,X.shape[0]):
mse = mse + self.sess.run( self.error, feed_dict={self.input_rnn:X[j][:], self.true_image:Y[j][:]} )
del X
del Y
return mse
def createTrainingData(self, image_compressed, image_raw, batch_size=1, counter=0):
#create training data
X,Y,count = isl.nextbatch(batch_size=batch_size, comp_file_array=image_compressed, raw_file_array=image_raw, starting_point=counter)
return X,Y,count
def save(self, checkpoint_directory):
'''
Saves tensorflow session
Inputs:
checkpoint_directory - directory and file where to save model information too (no file extensions)
'''
#saver = tf.train.Saver()
self.saver.save(self.sess, checkpoint_directory, global_step=self.global_step )
return True
def load(self, checkpoint_directory):
'''
Loads checkpoint file for tensorflow model.
Inputs:
checkpoint_directory - directory and file where to load model information from (no file extensions)
'''
#saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(os.path.dirname(checkpoint_directory))
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
return True
#saver.restore(self.sess, checkpoint_directory )
return False
def runner(self, image_compressed_dir, image_raw_dir, model_loc="./test_chp", loop=1, batch_size=2, epochs=10):
'''
Runs model, inclueds checkpointing features
'''
loop = 1
comp_files = isl.processDir(dirname=image_compressed_dir)
raw_files = isl.processDir(dirname=image_raw_dir)
#Need to add loop for taining over whole data set
for count_i in range(epochs+1):
#print "Batch size: ", batch_size
#print "counter: ", count_i
#print "len of files: ", len(comp_files), len(raw_files)
for j in range(0, len(comp_files), batch_size):
self.train(image_compressed=comp_files, image_raw=raw_files, counter=j, batch_size=batch_size, loop=loop)
if count_i%5==0:
for j in range(0, len(comp_files), batch_size):
mse = self.evaluate(image_compressed=comp_files, image_raw=raw_files, batch_size=batch_size, counter=j)
print( "summed MSE is: ", (mse) )
self.save(checkpoint_directory=model_loc)
#model.load(checkpoint_directory="/home/scott/Documents/Code/checkpoint_test/tester")
return 1
'''
Other Help:
https://www.tensorflow.org/tutorials/layers
https://www.tensorflow.org/api_docs/python/tf/cond
https://stackoverflow.com/questions/34959853/how-to-make-an-if-statement-using-a-boolean-tensor
https://www.tensorflow.org/versions/r0.12/how_tos/supervisor/
'''
| 35.62963
| 136
| 0.689969
|
2e75443b879718b3d2aeefb97db693a47728e297
| 11,185
|
py
|
Python
|
salt/utils/raetevent.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/utils/raetevent.py
|
dv-trading/salt
|
f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e
|
[
"Apache-2.0"
] | null | null | null |
salt/utils/raetevent.py
|
dv-trading/salt
|
f5d4334178c50d0dfcd205d5a7fb9cfb27fd369e
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Manage events
This module is used to manage events via RAET
'''
# pylint: disable=3rd-party-module-not-gated
# Import python libs
from __future__ import absolute_import
import os
import logging
import time
from collections import MutableMapping
# Import salt libs
import salt.payload
import salt.loader
import salt.state
import salt.utils.event
from salt.utils import kinds
from salt import transport
from salt import syspaths
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
class RAETEvent(object):
'''
The base class used to manage salt events
'''
def __init__(self, node, sock_dir=None, listen=True, opts=None):
'''
Set up the stack and remote yard
'''
self.node = node # application kind see kinds.APPL_KIND_NAMES
self.sock_dir = sock_dir
if opts is None:
opts = {}
self.opts = opts
self.stack = None
self.ryn = 'manor' # remote yard name
self.connected = False
self.cpub = False
self.__prep_stack(listen)
def __prep_stack(self, listen):
'''
Prepare the stack objects
'''
if not self.stack:
if hasattr(transport, 'jobber_stack') and transport.jobber_stack:
self.stack = transport.jobber_stack
else:
self.stack = transport.jobber_stack = self._setup_stack(ryn=self.ryn)
log.debug("RAETEvent Using Jobber Stack at = {0}\n".format(self.stack.ha))
if listen:
self.subscribe()
def _setup_stack(self, ryn='manor'):
kind = self.opts.get('__role', '') # opts optional for master
if kind: # not all uses of Raet SaltEvent has opts defined
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for RAET SaltEvent.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind != self.node:
emsg = ("Mismatch between node = '{0}' and kind = '{1}' in "
"RAET SaltEvent.".format(self.node, kind))
log.error(emsg + '\n')
raise ValueError(emsg)
if self.node in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]: # []'master', 'syndic']
lanename = 'master'
elif self.node in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]: # ['minion', 'caller']
role = self.opts.get('id', '') # opts required for minion
if not role:
emsg = ("Missing role required to setup RAET SaltEvent.")
log.error(emsg + "\n")
raise ValueError(emsg)
if not kind:
emsg = "Missing kind required to setup RAET SaltEvent."
log.error(emsg + '\n')
raise ValueError(emsg)
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application node kind '{0}' for RAET SaltEvent.".format(self.node))
log.error(emsg + '\n')
raise ValueError(emsg)
name = 'event' + nacling.uuid(size=18)
cachedir = self.opts.get('cachedir', os.path.join(syspaths.CACHE_DIR, self.node))
stack = LaneStack(
name=name,
lanename=lanename,
sockdirpath=self.sock_dir)
stack.Pk = raeting.PackKind.pack.value
stack.addRemote(RemoteYard(stack=stack,
lanename=lanename,
name=ryn,
dirpath=self.sock_dir))
return stack
def subscribe(self, tag=None):
'''
Included for compat with zeromq events, not required
'''
if not self.connected:
self.connect_pub()
def unsubscribe(self, tag=None):
'''
Included for compat with zeromq events, not required
'''
return
def connect_pub(self):
'''
Establish the publish connection
'''
try:
route = {'dst': (None, self.ryn, 'event_req'),
'src': (None, self.stack.local.name, None)}
msg = {'route': route}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
self.stack.serviceAll()
self.connected = True
self.cpub = True
except Exception:
pass
def connect_pull(self, timeout=1000):
'''
Included for compat with zeromq events, not required
'''
return
@classmethod
def unpack(cls, raw, serial=None):
'''
Included for compat with zeromq events, not required
'''
return raw
def get_event(self, wait=5, tag='', match_type=None, full=False, no_block=None,
auto_reconnect=False):
'''
Get a single publication.
IF no publication available THEN block for up to wait seconds
AND either return publication OR None IF no publication available.
IF wait is 0 then block forever.
'''
if not self.connected:
self.connect_pub()
start = time.time()
while True:
self.stack.serviceAll()
if self.stack.rxMsgs:
msg, sender = self.stack.rxMsgs.popleft()
if 'tag' not in msg and 'data' not in msg:
# Invalid event, how did this get here?
continue
if not msg['tag'].startswith(tag):
# Not what we are looking for, throw it away
continue
if full:
return msg
else:
return msg['data']
if start + wait < time.time():
return None
time.sleep(0.01)
def get_event_noblock(self):
'''
Get the raw event msg without blocking or any other niceties
'''
if not self.connected:
self.connect_pub()
self.stack.serviceAll()
if self.stack.rxMsgs:
msg, sender = self.stack.rxMsgs.popleft()
if 'tag' not in msg and 'data' not in msg:
# Invalid event, how did this get here?
return None
return msg
def iter_events(self, tag='', full=False, auto_reconnect=False):
'''
Creates a generator that continuously listens for events
'''
while True:
data = self.get_event(tag=tag, full=full, auto_reconnect=auto_reconnect)
if data is None:
continue
yield data
def fire_event(self, data, tag, timeout=1000):
'''
Send a single event into the publisher with paylod dict "data" and event
identifier "tag"
'''
# Timeout is retained for compat with zeromq events
if not str(tag): # no empty tags allowed
raise ValueError('Empty tag.')
if not isinstance(data, MutableMapping): # data must be dict
raise ValueError('Dict object expected, not \'{0}\'.'.format(data))
route = {'dst': (None, self.ryn, 'event_fire'),
'src': (None, self.stack.local.name, None)}
msg = {'route': route, 'tag': tag, 'data': data}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
self.stack.serviceAll()
def fire_ret_load(self, load):
'''
Fire events based on information in the return load
'''
if load.get('retcode') and load.get('fun'):
# Minion fired a bad retcode, fire an event
if load['fun'] in salt.utils.event.SUB_EVENT:
try:
for tag, data in six.iteritems(load.get('return', {})):
data['retcode'] = load['retcode']
tags = tag.split('_|-')
if data.get('result') is False:
self.fire_event(
data,
'{0}.{1}'.format(tags[0], tags[-1])) # old dup event
data['jid'] = load['jid']
data['id'] = load['id']
data['success'] = False
data['return'] = 'Error: {0}.{1}'.format(tags[0], tags[-1])
data['fun'] = load['fun']
data['user'] = load['user']
self.fire_event(
data,
salt.utils.event.tagify([load['jid'],
'sub',
load['id'],
'error',
load['fun']],
'job'))
except Exception:
pass
def close_pub(self):
'''
Here for compatability
'''
return
def destroy(self):
if hasattr(self, 'stack'):
self.stack.server.close()
class MasterEvent(RAETEvent):
'''
Create a master event management object
'''
def __init__(self, opts, sock_dir, listen=True):
super(MasterEvent, self).__init__('master', opts=opts, sock_dir=sock_dir, listen=listen)
class PresenceEvent(MasterEvent):
def __init__(self, opts, sock_dir, listen=True, state=None):
self.state = state
super(PresenceEvent, self).__init__(opts=opts, sock_dir=sock_dir, listen=listen)
def connect_pub(self):
'''
Establish the publish connection
'''
try:
route = {'dst': (None, self.ryn, 'presence_req'),
'src': (None, self.stack.local.name, None)}
msg = {'route': route}
if self.state:
msg['data'] = {'state': self.state}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
self.stack.serviceAll()
self.connected = True
except Exception:
pass
class StatsEvent(MasterEvent):
def __init__(self, opts, sock_dir, tag, estate=None, listen=True):
super(StatsEvent, self).__init__(opts=opts, sock_dir=sock_dir, listen=listen)
self.tag = tag
self.estate = estate
def connect_pub(self):
'''
Establish the publish connection
'''
try:
route = {'dst': (self.estate, None, 'stats_req'),
'src': (None, self.stack.local.name, None)}
msg = {'route': route, 'tag': self.tag}
self.stack.transmit(msg, self.stack.nameRemotes[self.ryn].uid)
self.stack.serviceAll()
self.connected = True
except Exception:
pass
| 35.062696
| 100
| 0.526777
|
9f68c4e23ea59fefb1ab5f291793069bff71e8b3
| 5,921
|
py
|
Python
|
catkin_ws/devel/lib/python2.7/dist-packages/vision_msgs/msg/_BoundingBox2D.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/devel/lib/python2.7/dist-packages/vision_msgs/msg/_BoundingBox2D.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/devel/lib/python2.7/dist-packages/vision_msgs/msg/_BoundingBox2D.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vision_msgs/BoundingBox2D.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
class BoundingBox2D(genpy.Message):
_md5sum = "9ab41e2a4c8627735e5091a9abd68b02"
_type = "vision_msgs/BoundingBox2D"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# A 2D bounding box that can be rotated about its center.
# All dimensions are in pixels, but represented using floating-point
# values to allow sub-pixel precision. If an exact pixel crop is required
# for a rotated bounding box, it can be calculated using Bresenham's line
# algorithm.
# The 2D position (in pixels) and orientation of the bounding box center.
geometry_msgs/Pose2D center
# The size (in pixels) of the bounding box surrounding the object relative
# to the pose of its center.
float64 size_x
float64 size_y
================================================================================
MSG: geometry_msgs/Pose2D
# Deprecated
# Please use the full 3D pose.
# In general our recommendation is to use a full 3D representation of everything and for 2D specific applications make the appropriate projections into the plane for their calculations but optimally will preserve the 3D information during processing.
# If we have parallel copies of 2D datatypes every UI and other pipeline will end up needing to have dual interfaces to plot everything. And you will end up with not being able to use 3D tools for 2D use cases even if they're completely valid, as you'd have to reimplement it with different inputs and outputs. It's not particularly hard to plot the 2D pose or compute the yaw error for the Pose message and there are already tools and libraries that can do this for you.
# This expresses a position and orientation on a 2D manifold.
float64 x
float64 y
float64 theta
"""
__slots__ = ['center','size_x','size_y']
_slot_types = ['geometry_msgs/Pose2D','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
center,size_x,size_y
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(BoundingBox2D, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.center is None:
self.center = geometry_msgs.msg.Pose2D()
if self.size_x is None:
self.size_x = 0.
if self.size_y is None:
self.size_y = 0.
else:
self.center = geometry_msgs.msg.Pose2D()
self.size_x = 0.
self.size_y = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_5d().pack(_x.center.x, _x.center.y, _x.center.theta, _x.size_x, _x.size_y))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.center is None:
self.center = geometry_msgs.msg.Pose2D()
end = 0
_x = self
start = end
end += 40
(_x.center.x, _x.center.y, _x.center.theta, _x.size_x, _x.size_y,) = _get_struct_5d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_5d().pack(_x.center.x, _x.center.y, _x.center.theta, _x.size_x, _x.size_y))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.center is None:
self.center = geometry_msgs.msg.Pose2D()
end = 0
_x = self
start = end
end += 40
(_x.center.x, _x.center.y, _x.center.theta, _x.size_x, _x.size_y,) = _get_struct_5d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_5d = None
def _get_struct_5d():
global _struct_5d
if _struct_5d is None:
_struct_5d = struct.Struct("<5d")
return _struct_5d
| 38.2
| 471
| 0.684344
|
74fdefa3206508648c8bf951faa05af38d303c11
| 3,603
|
py
|
Python
|
server/data/ingest_1000.py
|
OpenDataAnalytics/resonanteco
|
eb326e4af6d899ed5dc89d1b99e0f3a439c46959
|
[
"Apache-2.0"
] | 4
|
2019-07-02T13:52:36.000Z
|
2020-02-14T07:12:12.000Z
|
server/data/ingest_1000.py
|
OpenDataAnalytics/resonanteco
|
eb326e4af6d899ed5dc89d1b99e0f3a439c46959
|
[
"Apache-2.0"
] | 1
|
2019-07-03T00:00:00.000Z
|
2019-07-11T02:18:47.000Z
|
server/data/ingest_1000.py
|
OpenDataAnalytics/resonanteco
|
eb326e4af6d899ed5dc89d1b99e0f3a439c46959
|
[
"Apache-2.0"
] | null | null | null |
import csv
from girder_client import GirderClient
import requests
import json
import os
import re
import sys
def lookup_envo_number(envo):
with open('envo_lookup.json', 'r') as f:
lookup_table = json.load(f)
if envo in lookup_table:
return lookup_table[envo]
response = requests.get('https://www.ebi.ac.uk/ols/api/select',
params={'q': envo})
json_response = response.json()
try:
label = json_response['response']['docs'][0]['label']
lookup_table[envo] = label
return label
except IndexError:
return None
def thatFormatReader(taxon_oid, text):
dic = {'taxon_oid': taxon_oid}
for line in text.splitlines()[3:]:
splits = line.split(' ')
key = splits[0]
if key:
valueStr = splits[-1].strip()
try:
value = float(valueStr)
except ValueError:
value = valueStr
dic[key] = value
return dic
def parseTable(directory, filename):
taxon_oid = re.search('([0-9]{2,})', filename).groups()[0]
with open(os.path.join(directory, filename), 'r') as myfile:
return thatFormatReader(taxon_oid, myfile.read())
def get_table_list_for_oid(oid):
tables = []
with open("File_Name-SP_ID_Mapping.txt") as search:
for line in search:
if oid in line.strip():
tables.append(line.split()[0])
return tables
def create_item_from_row(row, directory, gc):
if not row[7]:
return
print("Ingesting {}".format(row[0]))
parent = gc.resourceLookup('collection/ResonantEco/datasets/JGI')
item = gc.createItem(parent['_id'], row[0], reuseExisting=True)
latitude = None
longitude = None
try:
latitude = float(row[60])
longitude = float(row[59])
except:
pass
metadata = {
'latitude': latitude,
'longitude': longitude,
'source': 'JGI'
}
metadata['timestamp'] = row[48]
metadata['name'] = row[1]
metadata['biome'] = lookup_envo_number(row[5])
metadata['feature'] = lookup_envo_number(row[6])
metadata['material'] = lookup_envo_number(row[7])
metadata['omics'] = row[20].lower()
table7 = {}
table8 = {}
table9 = {}
for filename in get_table_list_for_oid(row[8]):
if 'Table_7' in filename:
table7 = parseTable(directory, filename)
elif 'Table_8' in filename:
table8 = parseTable(directory, filename)
elif 'Table_9' in filename:
table9 = parseTable(directory, filename)
gc.addMetadataToItem(
item['_id'],
{
'meta': metadata,
'table7': table7,
'table8': table8,
'table9': table9
}
)
def create_items_from_csv(path, data_dir, gc):
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
create_item_from_row(row, data_dir, gc)
if __name__ == '__main__':
if len(sys.argv) < 5:
sys.exit('Sample call: python ingest.py ./data localhost 8080 admin letmein')
data_dir = sys.argv[1]
host = sys.argv[2]
port = sys.argv[3]
user = sys.argv[4]
password = sys.argv[5]
gc = GirderClient(apiUrl='http://{}:{}/api/v1'.format(host, port))
gc.authenticate(user, password)
create_items_from_csv('./jgi_data/NMDC_metadata_datasets - NMDC_datasets_metadata.csv', data_dir, gc)
# with open("envo_lookup.json", "w") as f:
# f.write(json.dumps(lookup_table))
| 28.824
| 105
| 0.597558
|
da3673fc4521a9e6c0aabd973991ce535fa43349
| 416
|
py
|
Python
|
is_valid/is_decodable_json_where.py
|
nandoflorestan/transvalid
|
4e0adbaad35188312189112cac0c4f187116b4b9
|
[
"MIT"
] | 4
|
2017-10-11T14:04:35.000Z
|
2019-03-29T08:38:09.000Z
|
is_valid/is_decodable_json_where.py
|
nandoflorestan/transvalid
|
4e0adbaad35188312189112cac0c4f187116b4b9
|
[
"MIT"
] | 1
|
2021-06-17T19:12:15.000Z
|
2021-06-17T19:12:15.000Z
|
is_valid/is_decodable_json_where.py
|
Daanvdk/is_valid
|
615c5ae1999095cba398af6ae041a472769857f8
|
[
"MIT"
] | 1
|
2021-06-05T18:06:49.000Z
|
2021-06-05T18:06:49.000Z
|
import json
from .is_decodable_where import is_decodable_where
from .is_json_where import is_json_where
class is_decodable_json_where(is_decodable_where):
def __init__(
self, predicate, loader=json.loads, encoding='utf-8', errors='strict'
):
return super().__init__(
is_json_where(predicate, loader=loader),
encoding=encoding,
errors=errors,
)
| 24.470588
| 77
| 0.677885
|
32fd121d66dd69a9547689ee8c56200e3f51b4e4
| 2,936
|
py
|
Python
|
octavia/amphorae/backends/health_daemon/status_message.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/amphorae/backends/health_daemon/status_message.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/amphorae/backends/health_daemon/status_message.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | 1
|
2021-12-27T13:18:38.000Z
|
2021-12-27T13:18:38.000Z
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import hashlib
import hmac
import json
import zlib
from oslo_log import log as logging
from oslo_utils import secretutils
from octavia.common import exceptions
LOG = logging.getLogger(__name__)
hash_algo = hashlib.sha256
hash_len = 32
hex_hash_len = 64
def to_hex(byte_array):
return binascii.hexlify(byte_array).decode()
def encode_obj(obj):
json_bytes = json.dumps(obj).encode('utf-8')
binary_array = zlib.compress(json_bytes, 9)
return binary_array
def decode_obj(binary_array):
json_str = zlib.decompress(binary_array).decode('utf-8')
obj = json.loads(json_str)
return obj
def wrap_envelope(obj, key, hex=True):
payload = encode_obj(obj)
hmc = get_hmac(payload, key, hex=hex)
envelope = payload + hmc
return envelope
def unwrap_envelope(envelope, key):
"""A backward-compatible way to get data.
We may still receive package from amphorae that are using digest() instead
of hexdigest()
"""
try:
return get_payload(envelope, key, hex=True)
except Exception:
return get_payload(envelope, key, hex=False)
def get_payload(envelope, key, hex=True):
len = hex_hash_len if hex else hash_len
payload = envelope[:-len]
expected_hmc = envelope[-len:]
calculated_hmc = get_hmac(payload, key, hex=hex)
if not secretutils.constant_time_compare(expected_hmc, calculated_hmc):
LOG.warning(
'calculated hmac(hex=%(hex)s): %(s1)s not equal to msg hmac: '
'%(s2)s dropping packet',
{
'hex': hex,
's1': to_hex(calculated_hmc),
's2': to_hex(expected_hmc)
}
)
fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet'
raise exceptions.InvalidHMACException(fmt.format(
to_hex(calculated_hmc), to_hex(expected_hmc)))
obj = decode_obj(payload)
return obj
def get_hmac(payload, key, hex=True):
"""Get digest for the payload.
The hex param is for backward compatibility, so the package data sent from
the existing amphorae can still be checked in the previous approach.
"""
hmc = hmac.new(key.encode("utf-8"), payload, hashlib.sha256)
return hmc.hexdigest().encode("utf-8") if hex else hmc.digest()
| 29.959184
| 79
| 0.684946
|
ea7787caeef8cdf78dfed953bd54966edcd2ae6e
| 150
|
py
|
Python
|
deepstreampy/constants/call_state.py
|
sapid/deepstreampy-twisted
|
78025141bb0ac3aadc248d68f9273bf8993fc3d4
|
[
"MIT"
] | null | null | null |
deepstreampy/constants/call_state.py
|
sapid/deepstreampy-twisted
|
78025141bb0ac3aadc248d68f9273bf8993fc3d4
|
[
"MIT"
] | null | null | null |
deepstreampy/constants/call_state.py
|
sapid/deepstreampy-twisted
|
78025141bb0ac3aadc248d68f9273bf8993fc3d4
|
[
"MIT"
] | null | null | null |
INITIAL = 'INITIAL'
CONNECTING = 'CONNECTING'
ESTABLISHED = 'ESTABLISHED'
ACCEPTED = 'ACCEPTED'
DECLINED = 'DECLINED'
ENDED = 'ENDED'
ERROR = 'ERROR'
| 18.75
| 27
| 0.72
|
e8d5a54cad8dd99ce3f7dc275ba97f4ec74cba45
| 84,732
|
py
|
Python
|
azure_compute/komand_azure_compute/actions/info_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
azure_compute/komand_azure_compute/actions/info_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
azure_compute/komand_azure_compute/actions/info_vm/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
MODE = "mode"
RESOURCEGROUP = "resourceGroup"
SUBSCRIPTIONID = "subscriptionId"
VM = "vm"
class Output:
ID = "id"
LOCATION = "location"
NAME = "name"
PROPERTIES = "properties"
TAGS = "tags"
TYPE = "type"
VMID = "vmId"
class InfoVmInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"mode": {
"type": "string",
"title": "Mode",
"description": "This mode get information of model view or instance view or both",
"default": "modelViewAndInstanceView",
"enum": [
"modelView",
"instanceView",
"modelViewAndInstanceView"
],
"order": 4
},
"resourceGroup": {
"type": "string",
"title": "Resource Group",
"description": "The resource group that will contain the virtual machine",
"order": 2
},
"subscriptionId": {
"type": "string",
"title": "Subscription ID",
"description": "The identifier of your subscription",
"order": 1
},
"vm": {
"type": "string",
"title": "Name of Virtual Machine",
"description": "The name of the virtual machine",
"order": 3
}
},
"required": [
"subscriptionId",
"resourceGroup",
"vm"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class InfoVmOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 7
},
"location": {
"type": "string",
"title": "Location",
"description": "Location",
"order": 5
},
"name": {
"type": "string",
"title": "Name",
"description": "Name",
"order": 1
},
"properties": {
"$ref": "#/definitions/properties",
"title": "Properties",
"description": "Properties",
"order": 4
},
"tags": {
"$ref": "#/definitions/tags",
"title": "Tags",
"description": "Tags",
"order": 2
},
"type": {
"type": "string",
"title": "Type",
"description": "Type",
"order": 6
},
"vmId": {
"type": "string",
"title": "VM ID",
"description": "VM ID",
"order": 3
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 34.754717
| 173
| 0.420821
|
60ebc05e5609ce8525399cd37f1b06db93d31368
| 9,237
|
py
|
Python
|
train_tacotron.py
|
atoms18/WaveRNN
|
f38f33e7273aaa95566cbd4533ed4a737831592e
|
[
"MIT"
] | null | null | null |
train_tacotron.py
|
atoms18/WaveRNN
|
f38f33e7273aaa95566cbd4533ed4a737831592e
|
[
"MIT"
] | null | null | null |
train_tacotron.py
|
atoms18/WaveRNN
|
f38f33e7273aaa95566cbd4533ed4a737831592e
|
[
"MIT"
] | null | null | null |
import torch
from torch import optim
import torch.nn.functional as F
from utils import hparams as hp
from utils.display import *
from utils.dataset import get_tts_datasets
from utils.text.symbols import symbols
from utils.paths import Paths
from models.tacotron import Tacotron
import argparse
from utils import data_parallel_workaround
import os
from pathlib import Path
import time
import numpy as np
import sys
from utils.checkpoints import save_checkpoint, restore_checkpoint
from logger import Tacotron2Logger
def np_now(x: torch.Tensor): return x.detach().cpu().numpy()
def prepare_directories_and_logger(output_directory, log_directory):
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
return logger
def main():
# Parse Arguments
parser = argparse.ArgumentParser(description='Train Tacotron TTS')
parser.add_argument('--force_train', '-f', action='store_true', help='Forces the model to train past total steps')
parser.add_argument('--force_gta', '-g', action='store_true', help='Force the model to create GTA features')
parser.add_argument('--force_cpu', '-c', action='store_true', help='Forces CPU-only training, even when in CUDA capable environment')
parser.add_argument('--hp_file', metavar='FILE', default='hparams.py', help='The file to use for the hyperparameters')
args = parser.parse_args()
hp.configure(args.hp_file) # Load hparams from file
paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)
force_train = args.force_train
force_gta = args.force_gta
if not args.force_cpu and torch.cuda.is_available():
device = torch.device('cuda')
for session in hp.tts_schedule:
_, _, batch_size = session
if batch_size % torch.cuda.device_count() != 0:
raise ValueError('`batch_size` must be evenly divisible by n_gpus!')
else:
device = torch.device('cpu')
print('Using device:', device)
# Instantiate Tacotron Model
print('\nInitialising Tacotron Model...\n')
model = Tacotron(embed_dims=hp.tts_embed_dims,
num_chars=len(symbols),
encoder_dims=hp.tts_encoder_dims,
decoder_dims=hp.tts_decoder_dims,
decoder_R=hp.tts_R_train,
fft_bins=None,
postnet_dims=None,
encoder_K=hp.tts_encoder_K,
lstm_dims=hp.tts_lstm_dims,
postnet_K=None,
num_highways=hp.tts_num_highways,
dropout=hp.tts_dropout,
stop_threshold=hp.tts_stop_threshold).to(device)
optimizer = optim.Adam(model.parameters())
restore_checkpoint('tts', paths, model, optimizer, create_if_missing=True)
scaler = torch.cuda.amp.GradScaler()
logger = prepare_directories_and_logger("/content/drive/MyDrive/Colab Notebooks/voiceclone/model_outputs/ljspeech_lsa_smooth_attention", "logdir")
if not force_gta:
for i, session in enumerate(hp.tts_schedule):
current_step = model.get_step()
lr, max_step, batch_size = session
training_steps = max_step - current_step
# Do we need to change to the next session?
if current_step >= max_step:
# Are there no further sessions than the current one?
if i == len(hp.tts_schedule)-1:
# There are no more sessions. Check if we force training.
if force_train:
# Don't finish the loop - train forever
training_steps = 999_999_999
else:
# We have completed training. Breaking is same as continue
break
else:
# There is a following session, go to it
continue
model.r = hp.tts_R_train
simple_table([(f'Steps with r={hp.tts_R_train}', str(training_steps//1000) + 'k Steps'),
('Batch Size', batch_size),
('Learning Rate', lr),
('Outputs/Step (r)', model.r)])
train_set, attn_example = get_tts_datasets(paths.data, batch_size, hp.tts_R_train)
tts_train_loop(paths, model, scaler, logger, optimizer, train_set, lr, training_steps, attn_example)
print('Training Complete.')
print('To continue training increase tts_total_steps in hparams.py or use --force_train\n')
print('Creating Ground Truth Aligned Dataset...\n')
train_set, attn_example = get_tts_datasets(paths.data, 8, model.r)
create_gta_features(model, train_set, paths.gta)
print('\n\nYou can now train WaveRNN on GTA features - use python train_wavernn.py --gta\n')
def tts_train_loop(paths: Paths, model: Tacotron, scaler, logger, optimizer, train_set, lr, train_steps, attn_example):
device = next(model.parameters()).device # use same device as model parameters
for g in optimizer.param_groups: g['lr'] = lr
duration = 0
total_iters = len(train_set)
epochs = train_steps // total_iters + 1
for e in range(1, epochs+1):
start = time.time()
running_loss = 0
# Perform 1 epoch
for i, (x, wav, ids, _, stop_targets) in enumerate(train_set, 1):
x, wav = x.to(device), wav.to(device)
stop_targets = stop_targets.to(device)
# print(f"This Iteration\'s Total Steps: {wav.size(2)//model.r}\n")
optimizer.zero_grad()
with torch.cuda.amp.autocast():
# Parallelize model onto GPUS using workaround due to python bug
if device.type == 'cuda' and torch.cuda.device_count() > 1:
logplists, logdetlosts, attention, stop_outputs = data_parallel_workaround(model, x, wav)
else:
logplists, logdetlosts, attention, stop_outputs = model(x, wav)
nll = -logplists - logdetlosts
nll = nll / model.decoder_K
nll = nll.mean()
stop_loss = F.binary_cross_entropy_with_logits(stop_outputs, stop_targets)
loss = nll + stop_loss
scaler.scale(loss).backward()
if hp.tts_clip_grad_norm is not None:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hp.tts_clip_grad_norm)
if np.isnan(grad_norm.cpu()):
print('grad_norm was NaN!')
scaler.step(optimizer)
scaler.update()
running_loss += loss.item()
avg_loss = running_loss / i
prev_duration = duration
duration = (time.time() - start)
speed = i / duration
step = model.get_step()
# k = step // 1000
if step % hp.tts_checkpoint_every == 0 or step == 1:
ckpt_name = f'taco_step{step}'
save_checkpoint('tts', paths, model, optimizer,
name=ckpt_name, is_silent=True)
logger.log_training(loss.item(), grad_norm, lr, duration - prev_duration, step, None, None)
logger.log_validation(None, None, stop_targets, [stop_outputs, attention], step)
with torch.no_grad():
# y_test = torch.rand(1, 5, 96*2).to(device)
zlast, _, _, zlist = model.decoder.flows(wav[0, :, 0].view(1, 10//2, 96*2))
abc = model.decoder.flows.reverse([zlist[-1]], reconstruct=True)
print("Reverse-Groundtruth diff: ", (wav[0, :, 0] - abc[0]).mean())
if attn_example in ids:
idx = ids.index(attn_example)
save_attention(np_now(attention[idx][:, :160]), paths.tts_attention/f'{step}')
# save_spectrogram(np_now(m2_hat[idx]), paths.tts_mel_plot/f'{step}', 600)
msg = f'|Epoch: {e}/{epochs} ({i}/{total_iters}) | Avg Loss: {avg_loss:#.4} | NLL: {loss.item():#.4} | {speed:#.2} iteration/s | Step: {step} | '
print(msg)
# Must save latest optimizer state to ensure that resuming training
# doesn't produce artifacts
save_checkpoint('tts', paths, model, optimizer, is_silent=True)
model.log(paths.tts_log, msg)
print(' ')
def create_gta_features(model: Tacotron, train_set, save_path: Path):
device = next(model.parameters()).device # use same device as model parameters
iters = len(train_set)
for i, (x, mels, ids, mel_lens) in enumerate(train_set, 1):
x, mels = x.to(device), mels.to(device)
with torch.no_grad(): _, gta, _ = model(x, mels)
gta = gta.cpu().numpy()
for j, item_id in enumerate(ids):
mel = gta[j][:, :mel_lens[j]]
mel = (mel + 4) / 8
np.save(save_path/f'{item_id}.npy', mel, allow_pickle=False)
bar = progbar(i, iters)
msg = f'{bar} {i}/{iters} Batches '
stream(msg)
if __name__ == "__main__":
main()
| 40.16087
| 157
| 0.60799
|
6706643443cdbf2a0884eedda5a2d823ecd23b82
| 18,881
|
py
|
Python
|
code/YOLO101_1Resolusi_80x60_TrainCDV2_OK1.py
|
mlcv-lab/mr3dcapsnet
|
d0b37ca085073257b0c485210ec92a5c6e7d9bb6
|
[
"Apache-2.0"
] | null | null | null |
code/YOLO101_1Resolusi_80x60_TrainCDV2_OK1.py
|
mlcv-lab/mr3dcapsnet
|
d0b37ca085073257b0c485210ec92a5c6e7d9bb6
|
[
"Apache-2.0"
] | null | null | null |
code/YOLO101_1Resolusi_80x60_TrainCDV2_OK1.py
|
mlcv-lab/mr3dcapsnet
|
d0b37ca085073257b0c485210ec92a5c6e7d9bb6
|
[
"Apache-2.0"
] | 1
|
2021-02-21T16:07:39.000Z
|
2021-02-21T16:07:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 13:24:34 2017
@author: user
"""
#from keras.preprocessing.image import ImageDataGenerator
#from keras.models import Sequential
#from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D, Merge
#from keras.layers.convolutional import Convolution3D, MaxPooling3D
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils import np_utils, generic_utils
import os
import pandas as pd
import matplotlib
from keras.callbacks import ModelCheckpoint
import keras.callbacks
import matplotlib.pyplot as plt
import numpy as np
import cv2
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
import csv
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, SpatialDropout3D
from keras.layers.convolutional import Convolution3D, MaxPooling3D
from sklearn.metrics import classification_report,confusion_matrix,cohen_kappa_score,roc_auc_score
#from keras.regularizers import l2, l1, WeightRegularizer
from keras.layers.normalization import BatchNormalization
import gc
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
def getLabelFromIdx(x):
return {
1 : 'ApplyEyeMakeup',
2 : 'ApplyLipstick',
3 : 'Archery',
4 : 'BabyCrawling',
5 : 'BalanceBeam',
6 : 'BandMarching',
7 : 'BaseballPitch',
8 : 'Basketball',
9 : 'BasketballDunk',
10 : 'BenchPress',
11 : 'Biking',
12 : 'Billiards',
13 : 'BlowDryHair',
14 : 'BlowingCandles',
15 : 'BodyWeightSquats',
16 : 'Bowling',
17 : 'BoxingPunchingBag',
18 : 'BoxingSpeedBag',
19 : 'BreastStroke',
20 : 'BrushingTeeth',
21 : 'CleanAndJerk',
22 : 'CliffDiving',
23 : 'CricketBowling',
24 : 'CricketShot',
25 : 'CuttingInKitchen',
26 : 'Diving',
27 : 'Drumming',
28 : 'Fencing',
29 : 'FieldHockeyPenalty',
30 : 'FloorGymnastics',
31 : 'FrisbeeCatch',
32 : 'FrontCrawl',
33 : 'GolfSwing',
34 : 'Haircut',
35 : 'Hammering',
36 : 'HammerThrow',
37 : 'HandstandPushups',
38 : 'HandstandWalking',
39 : 'HeadMassage',
40 : 'HighJump',
41 : 'HorseRace',
42 : 'HorseRiding',
43 : 'HulaHoop',
44 : 'IceDancing',
45 : 'JavelinThrow',
46 : 'JugglingBalls',
47 : 'JumpingJack',
48 : 'JumpRope',
49 : 'Kayaking',
50 : 'Knitting',
51 : 'LongJump',
52 : 'Lunges',
53 : 'MilitaryParade',
54 : 'Mixing',
55 : 'MoppingFloor',
56 : 'Nunchucks',
57 : 'ParallelBars',
58 : 'PizzaTossing',
59 : 'PlayingCello',
60 : 'PlayingDaf',
61 : 'PlayingDhol',
62 : 'PlayingFlute',
63 : 'PlayingGuitar',
64 : 'PlayingPiano',
65 : 'PlayingSitar',
66 : 'PlayingTabla',
67 : 'PlayingViolin',
68 : 'PoleVault',
69 : 'PommelHorse',
70 : 'PullUps',
71 : 'Punch',
72 : 'PushUps',
73 : 'Rafting',
74 : 'RockClimbingIndoor',
75 : 'RopeClimbing',
76 : 'Rowing',
77 : 'SalsaSpin',
78 : 'ShavingBeard',
79 : 'Shotput',
80 : 'SkateBoarding',
81 : 'Skiing',
82 : 'Skijet',
83 : 'SkyDiving',
84 : 'SoccerJuggling',
85 : 'SoccerPenalty',
86 : 'StillRings',
87 : 'SumoWrestling',
88 : 'Surfing',
89 : 'Swing',
90 : 'TableTennisShot',
91 : 'TaiChi',
92 : 'TennisSwing',
93 : 'ThrowDiscus',
94 : 'TrampolineJumping',
95 : 'Typing',
96 : 'UnevenBars',
97 : 'VolleyballSpiking',
98 : 'WalkingWithDog',
99 : 'WallPushups',
100 : 'WritingOnBoard',
101 : 'YoYo'
}.get(x, "----")
R1x = 60
R1y = 90
R2x = 2
R2y = 3
R3x = 2
R3y = 3
RDepth = 13
kcv = 1
vartuning = '1Resolusi_90x60'
filenya = 'YOLO_U_V1_' + vartuning + '.csv'
with open(filenya, 'w') as out_file:
writer = csv.writer(out_file, lineterminator = '\n')
grup = []
grup.append('Blok ke-')
grup.append('Skor Akurasi')
grup.append('Skor Kappa')
writer.writerows([grup])
grup = []
X_train_R1 = []
X_train_R2 = []
X_train_R3 = []
labels_train = []
count_train = 0
X_test_R1 = []
X_test_R2 = []
X_test_R3 = []
labels_test = []
count_test = 0
# training data input
for labelIdx in range(1, 101):
print labelIdx
listing = os.listdir('TestData/' + getLabelFromIdx(labelIdx) + '/')
count_pretesting = 0
for vid in listing:
count_pretesting += 1
#
# if count_pretesting > 5:
# break
vid = 'TestData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
if not((count)%4 == 0):
continue
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 52:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not(count == 52):
print "vid not saved"
continue
count_test += 1
label = labelIdx-1
labels_test.append(label)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_test_R1.append(iptR1)
X_test_R2.append(iptR2)
X_test_R3.append(iptR3)
listing = os.listdir('TrainData/' + getLabelFromIdx(labelIdx) + '/')
count_pretesting = 0
for vid in listing:
count_pretesting += 1
# if count_pretesting > 5:
# break
vid = 'TrainData/' + getLabelFromIdx(labelIdx) + '/' +vid
framesR1 = []
framesR2 = []
framesR3 = []
cap = cv2.VideoCapture(vid)
fps = cap.get(5)
#print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)
#test
frame = []
ret, frame = cap.read()
#print frame.shape
if frame is None:
print "image not readable"
break
count = 0
kondisi = True
while kondisi == True:
ret, frame = cap.read()
if frame is None:
print "skipping vid"
break
count += 1
if not((count)%4 == 0):
continue
frameR1 = cv2.resize(frame, (R1x, R1y), interpolation=cv2.INTER_AREA)
framesR1.append(frameR1)
frameR2 = cv2.resize(frame, (R2x, R2y), interpolation=cv2.INTER_AREA)
framesR2.append(frameR2)
frameR3 = cv2.resize(frame, (R3x, R3y), interpolation=cv2.INTER_AREA)
framesR3.append(frameR3)
#plt.imshow(gray, cmap = plt.get_cmap('gray'))
#plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
#plt.show()
#cv2.imshow('frame',gray)
if count == 52:
kondisi = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not(count == 52):
print "vid not saved"
continue
count_train += 1
label = labelIdx-1
labels_train.append(label)
cap.release()
cv2.destroyAllWindows()
inputR1=np.array(framesR1)
inputR2=np.array(framesR2)
inputR3=np.array(framesR3)
#print input.shape
iptR1=inputR1
iptR2=inputR2
iptR3=inputR3
#print ipt.shape
X_train_R1.append(iptR1)
X_train_R2.append(iptR2)
X_train_R3.append(iptR3)
# formatting data
X_train_R1_array = (X_train_R1)
X_train_R2_array = (X_train_R2)
X_train_R3_array = (X_train_R3)
labels_train_array = np.array(labels_train)
Y_train = np_utils.to_categorical(labels_train_array, 101)
del X_train_R1
del X_train_R2
del X_train_R3
gc.collect()
X_test_R1_array = (X_test_R1)
X_test_R2_array = (X_test_R2)
X_test_R3_array = (X_test_R3)
labels_test_array = np.array(labels_test)
Y_test = np_utils.to_categorical(labels_test_array, 101)
del X_test_R1
del X_test_R2
del X_test_R3
gc.collect()
test_set_R1 = np.zeros((count_test, RDepth, R1y,R1x,3))
test_set_R2 = np.zeros((count_test, RDepth, R2y,R2x,3))
test_set_R3 = np.zeros((count_test, RDepth, R3y,R3x,3))
for h in xrange(count_test):
test_set_R1[h][:][:][:][:]=X_test_R1_array[h]
test_set_R2[h][:][:][:][:]=X_test_R2_array[h]
test_set_R3[h][:][:][:][:]=X_test_R3_array[h]
train_set_R1 = np.zeros((count_train, RDepth, R1y,R1x,3))
train_set_R2 = np.zeros((count_train, RDepth, R2y,R2x,3))
train_set_R3 = np.zeros((count_train, RDepth, R3y,R3x,3))
for h in xrange(count_train):
train_set_R1[h][:][:][:][:]=X_train_R1_array[h]
train_set_R2[h][:][:][:][:]=X_train_R2_array[h]
train_set_R3[h][:][:][:][:]=X_train_R3_array[h]
del X_test_R1_array
del X_test_R2_array
del X_test_R3_array
gc.collect()
del X_train_R1_array
del X_train_R2_array
del X_train_R3_array
gc.collect()
train_set_R1 = train_set_R1.astype('float32')
train_set_R1 -= 127.5
train_set_R1 /=127.5
train_set_R2 = train_set_R2.astype('float32')
train_set_R2 -= 127.5
train_set_R2 /=127.5
train_set_R3 = train_set_R3.astype('float32')
train_set_R3 -= 127.5
train_set_R3 /=127.5
test_set_R1 = test_set_R1.astype('float32')
test_set_R1 -= 127.5
test_set_R1 /=127.5
test_set_R2 = test_set_R2.astype('float32')
test_set_R2 -= 127.5
test_set_R2 /=127.5
test_set_R3 = test_set_R3.astype('float32')
test_set_R3 -= 127.5
test_set_R3 /=127.5
#%% definisikan sebuah model
# # Parameter tuning
# jumEpoch = 25
# nb_classes = 8
# #Lengan A
# filterNumL1 = 16 # jumlah filter L1
# filterSizeXYL1 = 5 #ukuran filter dimensi spasial
# filterSizeTL1 = 3#ukuran filter dimensi spasial
#
# poolingSizeXYL1 = 3
# poolingSizeTL1 = 1
# poolingStrideXYL1 = 1
# poolingStrideTL1 = 1 #parameter pooling L1
# #Lengan B
# filterNumL1B = 32 # jumlah filter L1
# filterSizeXYL1B = 3 #ukuran filter dimensi spasial
# filterSizeTL1B = 3 #ukuran filter dimensi spasial
#
# poolingSizeXYL1B = 3
# poolingSizeTL1B = 1
# poolingStrideXYL1B = 1
# poolingStrideTL1B = 1 #parameter pooling L1
# Define model
# modelA = Sequential()
# modelA.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeXYL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeTL1, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelA.add(MaxPooling3D(pool_size=(poolingSizeXYL1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
# modelA.add(SpatialDropout3D(0.4))
# modelA.add(Flatten())
#
# modelB = Sequential()
# modelB.add(Convolution3D(filterNumL1B,kernel_dim1=filterSizeXYL1B, kernel_dim2=filterSizeXYL1B, kernel_dim3=filterSizeTL1B, input_shape=(10, 20, 30, 3), activation='relu', dim_ordering='tf'))
# modelB.add(MaxPooling3D(pool_size=(poolingSizeXYL1B, poolingSizeXYL1B, poolingSizeTL1B), dim_ordering='tf'))
# modelB.add(SpatialDropout3D(0.4))
# modelB.add(Flatten())
#
#
# model = Sequential()
# model.add(Merge([modelA, modelB], mode='concat'))
# model.add(Dense(paramuji, init='normal', activation='relu'))
#
# model.add(Dropout(0.4))
#
# model.add(Dense(nb_classes,init='normal'))
#
# model.add(Activation('softmax'))
# model.summary()
# model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics = ["accuracy"])
#
#
# # Train the model
#
# hist = model.fit([train_set, train_set], Y_train, validation_data=([test_set, test_set], Y_test),
# batch_size=15, nb_epoch = jumEpoch, show_accuracy=True, shuffle=True, verbose = 0)
#
# # Evaluate the model
# score = model.evaluate([test_set, test_set], Y_test, batch_size=15, show_accuracy=True)
#
# Define model
# Parameter tuning
jumEpoch = 250
nb_classes = 101
filterNumL1 = 64 # jumlah filter L1
filterSizeXYL1 = 3 #ukuran filter dimensi spasial
filterSizeTL1 = 3#ukuran filter dimensi spasial
poolingSizeXYL1 = 2
poolingSizeTL1 = 2
poolingStrideXYL1 = 1
poolingStrideTL1 = 1 #parameter pooling L1
filterNumL2 = 64 # jumlah filter L1
filterSizeXYL2 = 3 #ukuran filter dimensi spasial
filterSizeTL2 = 3#ukuran filter dimensi spasial
model = Sequential()
model.add(Convolution3D(filterNumL1,kernel_dim1=filterSizeTL1, kernel_dim2=filterSizeXYL1, kernel_dim3=filterSizeXYL1, input_shape=(RDepth, R1y, R1x,3), activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(filterNumL2,kernel_dim1=filterSizeTL2, kernel_dim2=filterSizeXYL2, kernel_dim3=filterSizeXYL2, activation='relu', dim_ordering='tf'))
# model.add(Convolution3D(filterNumL2,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.3))
model.add(Convolution3D(128,kernel_dim1=5, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
# model.add(Convolution3D(128,kernel_dim1=1, kernel_dim2=3, kernel_dim3=3, activation='relu', dim_ordering='tf'))
#model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(MaxPooling3D(pool_size=(1, poolingSizeXYL1, poolingSizeTL1), dim_ordering='tf'))
model.add(SpatialDropout3D(0.5))
model.add(Flatten())
model.add(BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Dense(nb_classes,init='normal'))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics = ['acc'])
# Train the model
nama_filenya = "weights_" + vartuning +"_.hdf5"
checkpointer = ModelCheckpoint(filepath=nama_filenya, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
hist = model.fit(train_set_R1, Y_train, validation_data=(test_set_R1, Y_test),
batch_size=8, nb_epoch = jumEpoch, shuffle=True, verbose = 1, callbacks = [checkpointer])
# Evaluate the model
# load best model
model2 = Sequential()
model2.load_weights(nama_filenya)
score = model2.evaluate(test_set_R1, Y_test, batch_size=8)
print "Skor Model:"
print score[1]
Y_pred = model2.predict_classes(test_set_R1, batch_size = 8)
grup.append(kcv)
grup.append(score[1])
cohennya = cohen_kappa_score(np.argmax(Y_test,axis=1), Y_pred)
print "kohen kappa:"
print cohennya
grup.append(cohennya)
writer.writerows([grup])
| 35.225746
| 213
| 0.559557
|
93f7abb6ce10700486b64f74ff2904abb218ef3e
| 976
|
py
|
Python
|
mnist-ml-beginners.py
|
arpitshah101/ImageClassifier
|
b1b75b81f5ca9b1c3373a75c70588d92aff67eef
|
[
"MIT"
] | 1
|
2021-01-02T20:22:36.000Z
|
2021-01-02T20:22:36.000Z
|
mnist-ml-beginners.py
|
arpitshah101/ImageClassifier
|
b1b75b81f5ca9b1c3373a75c70588d92aff67eef
|
[
"MIT"
] | null | null | null |
mnist-ml-beginners.py
|
arpitshah101/ImageClassifier
|
b1b75b81f5ca9b1c3373a75c70588d92aff67eef
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Training
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ *
tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| 34.857143
| 82
| 0.719262
|
9ccdb4307a3f387e7f783de9333df594aa59313f
| 786
|
py
|
Python
|
Example 03 - ORM/address.py
|
tgroven/PythonSqlAlchemyLunchAndLearn
|
eebcb74cbf5db007ee9f2597303f4fe7ff1f5a44
|
[
"MIT"
] | null | null | null |
Example 03 - ORM/address.py
|
tgroven/PythonSqlAlchemyLunchAndLearn
|
eebcb74cbf5db007ee9f2597303f4fe7ff1f5a44
|
[
"MIT"
] | null | null | null |
Example 03 - ORM/address.py
|
tgroven/PythonSqlAlchemyLunchAndLearn
|
eebcb74cbf5db007ee9f2597303f4fe7ff1f5a44
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.mssql import UNIQUEIDENTIFIER
#from customer import Customer
from base import Base
class Address(Base):
__tablename__ = 'address'
__table_args__ = {'schema': 'SalesLT'}
address_id = Column('AddressID', Integer, primary_key=True)
address_line_1 = Column('AddressLine1', String(60))
address_line_2 = Column('AddressLine2', String(60))
city = Column(String(30))
state_province = Column('StateProvince', String(50))
country_region = Column('CountryRegion', String(50))
postal_code = Column('PostalCode', String(15))
rowguid = Column(UNIQUEIDENTIFIER)
modified_date = Column('ModifiedDate', DateTime)
| 37.428571
| 77
| 0.744275
|
7fe71983df4dcc647134af527937152b68d8f23d
| 1,408
|
py
|
Python
|
tests/integration/link/test_absolute_path.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
tests/integration/link/test_absolute_path.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
tests/integration/link/test_absolute_path.py
|
Joeyt1008/dash-core-components
|
c806ea66eb5b674ef84fd9efae01cfa5292f143e
|
[
"MIT"
] | null | null | null |
import pytest
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
@pytest.mark.DCC782
def test_lipa001_path(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Link("Relative Path", id="link1", href="google.com"),
dcc.Location(id="url", refresh=False),
html.Div(id="content"),
]
)
@app.callback(Output("content", "children"), [Input("url", "pathname")])
def display_children(children):
return children
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#link1").click()
dash_dcc.wait_for_text_to_equal("#content", "/google.com")
assert dash_dcc.get_logs() == []
@pytest.mark.DCC782
def test_lipa002_path(dash_dcc):
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Link(
children="Absolute Path",
id="link1",
href="https://google.com",
refresh=True,
),
dcc.Location(id="url", refresh=False),
]
)
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#link1").click()
location = dash_dcc.driver.execute_script(
"""
return window.location.href
"""
)
assert location == "https://www.google.com/"
assert dash_dcc.get_logs() == []
| 23.864407
| 76
| 0.59517
|
6131be5603488522710731fb2a10b9750d62a348
| 1,223
|
py
|
Python
|
src/Data/Database.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
src/Data/Database.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
src/Data/Database.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
import datetime
from sqlalchemy import create_engine, Column, String, Enum, Numeric, SmallInteger, Boolean, ForeignKey, Date, \
BigInteger
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
from src.Enums.PeriodEnum import PeriodEnum
engine = create_engine(os.environ['CONNECTION_STRING'], echo=True)
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Bill(Base):
__tablename__ = 'Bills'
id = Column(BigInteger, primary_key=True)
name = Column(String)
value = Column(Numeric)
expiration_day = Column(SmallInteger)
expiration_period = Column(Enum(PeriodEnum))
user_id = Column(BigInteger)
created_at = Column(Date, default=datetime.datetime.now())
class BillHistory(Base):
__tablename__ = 'BillsHistory'
id = Column(BigInteger, primary_key=True)
expiration_date = Column(Date, nullable=False)
payment_date = Column(Date, nullable=True)
is_paid = Column(Boolean, default=False)
is_value_changed = Column(Boolean, default=False)
value_payed = Column(Numeric, default=0)
bill_id = Column(BigInteger, ForeignKey('Bills.id'))
Base.metadata.create_all(engine)
| 29.829268
| 111
| 0.751431
|
5baf196f756ee87bcc1203548c3295ce857d24d2
| 271
|
py
|
Python
|
setup.py
|
old-pinky/AioPaperScroll-SDK
|
f5dbd2edcbeadedfaa3f846db7610ab6cb018bef
|
[
"MIT"
] | null | null | null |
setup.py
|
old-pinky/AioPaperScroll-SDK
|
f5dbd2edcbeadedfaa3f846db7610ab6cb018bef
|
[
"MIT"
] | null | null | null |
setup.py
|
old-pinky/AioPaperScroll-SDK
|
f5dbd2edcbeadedfaa3f846db7610ab6cb018bef
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='aiopaperscroll',
version='1.0.0',
packages=find_packages(),
install_requires=[
'loguru',
'asyncio',
'aiohttp'],
url='https://github.com/old-pinky/AioPaperScroll-SDK'
)
| 19.357143
| 57
| 0.627306
|
b33bc428ecb4566db7fba0bc10f9ebe1bbb9eb22
| 16,205
|
py
|
Python
|
tools/wptrunner/wptrunner/executors/base.py
|
servo-wpt-sync/web-platform-tests
|
56e2df852354bc2b89e6d17a9dbafd280d24203c
|
[
"BSD-3-Clause"
] | null | null | null |
tools/wptrunner/wptrunner/executors/base.py
|
servo-wpt-sync/web-platform-tests
|
56e2df852354bc2b89e6d17a9dbafd280d24203c
|
[
"BSD-3-Clause"
] | null | null | null |
tools/wptrunner/wptrunner/executors/base.py
|
servo-wpt-sync/web-platform-tests
|
56e2df852354bc2b89e6d17a9dbafd280d24203c
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import httplib
import os
import threading
import traceback
import socket
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
here = os.path.split(__file__)[0]
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(name, self.test_codes[status], message, stack)
for name, status, message, stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.external_config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
timeout = test.timeout * self.timeout_multiplier
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[key] = (hash_value, None)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
harness_result = ("OK", None)
subtest_results = pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
return (harness_result, subtest_results)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class Protocol(object):
def __init__(self, executor, browser):
self.executor = executor
self.browser = browser
@property
def logger(self):
return self.executor.logger
def setup(self, runner):
pass
def teardown(self):
pass
def wait(self):
pass
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", message)
finally:
self.result_flag.set()
class WebDriverProtocol(Protocol):
server_cls = None
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def setup(self, runner):
"""Connect to browser via the HTTP server."""
try:
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
except Exception:
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
| 33.690229
| 112
| 0.606109
|
9fbfa6a5f29dd408e0c3343e3d3708c33da8d34b
| 1,295
|
py
|
Python
|
socialite/test/test_table_stmts.py
|
Wangqge/PowerLog_ae
|
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
|
[
"Apache-2.0"
] | null | null | null |
socialite/test/test_table_stmts.py
|
Wangqge/PowerLog_ae
|
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
|
[
"Apache-2.0"
] | null | null | null |
socialite/test/test_table_stmts.py
|
Wangqge/PowerLog_ae
|
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
|
[
"Apache-2.0"
] | null | null | null |
"""
Testing functions in a query.
"""
import unittest
class TestTableStmts(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def test_drop_non_exit_table(self):
try:
`drop NonExistingTable42.`
self.fail("")
except:
pass
def test_drop_table(self):
`TestX(int a, int b).
TestX(a,b) :- a=10, b=20.`
(a,b) = `TestX(a,b)`.next()
self.assertEqual(a, 10)
self.assertEqual(b, 20)
`drop TestX.`
`TestX(int a, int b).
TestX(a,b) :- a=11, b=20+1.`
(a,b) = `TestX(a,b)`.next()
self.assertEqual(a, 11)
self.assertEqual(b, 21)
`drop TestX.`
`TestX(int a, int b).
TestX(a,b) :- a=12, b=21+1.`
(a,b) = `TestX(a,b)`.next()
self.assertEqual(a, 12)
self.assertEqual(b, 22)
def test_clear_table(self):
`TestC1(int a, int b).`
`clear TestC1.`
`TestC1(a,b) :- a=1, b=42.`
`clear TestC1.`
try:
`TestC1(a,b)`.next()
self.fail("Expected exception(StopIteration) not raised")
except StopIteration:
pass
if __name__ == '__main__':
unittest.main()
| 22.719298
| 69
| 0.518147
|
6957f99872c70a6d867d974eacb396d071f96cac
| 8,707
|
py
|
Python
|
rasa_core/policies/two_stage_fallback.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 1
|
2019-04-19T18:01:21.000Z
|
2019-04-19T18:01:21.000Z
|
rasa_core/policies/two_stage_fallback.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 1
|
2019-04-02T17:52:01.000Z
|
2019-04-02T17:52:01.000Z
|
rasa_core/policies/two_stage_fallback.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 1
|
2019-05-29T15:06:59.000Z
|
2019-05-29T15:06:59.000Z
|
import json
import logging
import os
from typing import List, Text
from rasa_core import utils
from rasa_core.actions.action import (ACTION_REVERT_FALLBACK_EVENTS_NAME,
ACTION_DEFAULT_FALLBACK_NAME,
ACTION_DEFAULT_ASK_REPHRASE_NAME,
ACTION_DEFAULT_ASK_AFFIRMATION_NAME,
ACTION_LISTEN_NAME)
from rasa_core.constants import FALLBACK_SCORE, USER_INTENT_OUT_OF_SCOPE
from rasa_core.domain import Domain, InvalidDomain
from rasa_core.policies.fallback import FallbackPolicy
from rasa_core.policies.policy import confidence_scores_for
from rasa_core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
def has_user_rephrased(tracker: DialogueStateTracker) -> bool:
return tracker.last_executed_action_has(ACTION_DEFAULT_ASK_REPHRASE_NAME)
class TwoStageFallbackPolicy(FallbackPolicy):
""" This policy handles low NLU confidence in multiple stages.
If a NLU prediction has a low confidence score,
the user is asked to affirm whether they really had this intent.
If they affirm, the story continues as if the intent was classified
with high confidence from the beginning.
If they deny, the user is asked to rephrase his intent.
If the classification for the rephrased intent was confident, the story
continues as if the user had this intent from the beginning.
If the rephrased intent was not classified with high confidence,
the user is asked to affirm the classified intent.
If the user affirm the intent, the story continues as if the user had
this intent from the beginning.
If the user denies, an ultimate fallback action is triggered
(e.g. a hand-off to a human).
"""
def __init__(self,
nlu_threshold: float = 0.3,
core_threshold: float = 0.3,
fallback_core_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME,
fallback_nlu_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME,
deny_suggestion_intent_name: Text = USER_INTENT_OUT_OF_SCOPE,
) -> None:
"""Create a new Two-stage Fallback policy.
Args:
nlu_threshold: minimum threshold for NLU confidence.
If intent prediction confidence is lower than this,
predict fallback action with confidence 1.0.
core_threshold: if NLU confidence threshold is met,
predict fallback action with confidence
`core_threshold`. If this is the highest confidence in
the ensemble, the fallback action will be executed.
fallback_core_action_name: This action is executed if the Core
threshold is not met.
fallback_nlu_action_name: This action is executed if the user
denies the recognised intent for the second time.
deny_suggestion_intent_name: The name of the intent which is used to
detect that the user denies the suggested intents.
"""
super(TwoStageFallbackPolicy, self).__init__(
nlu_threshold,
core_threshold,
fallback_core_action_name)
self.fallback_nlu_action_name = fallback_nlu_action_name
self.deny_suggestion_intent_name = deny_suggestion_intent_name
def predict_action_probabilities(self,
tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the next action if NLU confidence is low.
"""
if self.deny_suggestion_intent_name not in domain.intents:
raise InvalidDomain('The intent {} must be present in the '
'domain file to use the '
'`TwoStageFallbackPolicy`.'
''.format(self.deny_suggestion_intent_name))
nlu_data = tracker.latest_message.parse_data
nlu_confidence = nlu_data["intent"].get("confidence", 1.0)
last_intent_name = nlu_data['intent'].get('name', None)
should_nlu_fallback = self.should_nlu_fallback(
nlu_confidence, tracker.latest_action_name)
user_rephrased = has_user_rephrased(tracker)
if self._is_user_input_expected(tracker):
result = confidence_scores_for(ACTION_LISTEN_NAME, FALLBACK_SCORE,
domain)
elif self._has_user_denied(last_intent_name, tracker):
logger.debug("User '{}' denied suggested intents.".format(
tracker.sender_id))
result = self._results_for_user_denied(tracker, domain)
elif user_rephrased and should_nlu_fallback:
logger.debug("Ambiguous rephrasing of user '{}' "
"for intent '{}'".format(tracker.sender_id,
last_intent_name))
result = confidence_scores_for(ACTION_DEFAULT_ASK_AFFIRMATION_NAME,
FALLBACK_SCORE,
domain)
elif user_rephrased:
logger.debug("User '{}' rephrased intent".format(tracker.sender_id))
result = confidence_scores_for(ACTION_REVERT_FALLBACK_EVENTS_NAME,
FALLBACK_SCORE, domain)
elif tracker.last_executed_action_has(
ACTION_DEFAULT_ASK_AFFIRMATION_NAME):
if not should_nlu_fallback:
logger.debug("User '{}' affirmed intent '{}'"
"".format(tracker.sender_id,
last_intent_name))
result = confidence_scores_for(
ACTION_REVERT_FALLBACK_EVENTS_NAME,
FALLBACK_SCORE, domain)
else:
result = confidence_scores_for(self.fallback_nlu_action_name,
FALLBACK_SCORE, domain)
elif should_nlu_fallback:
logger.debug("User '{}' has to affirm intent '{}'.".format(
tracker.sender_id, last_intent_name))
result = confidence_scores_for(ACTION_DEFAULT_ASK_AFFIRMATION_NAME,
FALLBACK_SCORE,
domain)
else:
result = self.fallback_scores(domain, self.core_threshold)
return result
def _is_user_input_expected(self, tracker: DialogueStateTracker) -> bool:
return tracker.latest_action_name in [
ACTION_DEFAULT_ASK_AFFIRMATION_NAME,
ACTION_DEFAULT_ASK_REPHRASE_NAME,
self.fallback_action_name]
def _has_user_denied(self,
last_intent: Text,
tracker: DialogueStateTracker) -> bool:
return (tracker.last_executed_action_has(
ACTION_DEFAULT_ASK_AFFIRMATION_NAME) and
last_intent == self.deny_suggestion_intent_name)
def _results_for_user_denied(self, tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
has_denied_before = tracker.last_executed_action_has(
ACTION_DEFAULT_ASK_REPHRASE_NAME,
skip=1)
if has_denied_before:
return confidence_scores_for(self.fallback_nlu_action_name,
FALLBACK_SCORE, domain)
else:
return confidence_scores_for(ACTION_DEFAULT_ASK_REPHRASE_NAME,
FALLBACK_SCORE, domain)
def persist(self, path: Text) -> None:
"""Persists the policy to storage."""
config_file = os.path.join(path, 'two_stage_fallback_policy.json')
meta = {
"nlu_threshold": self.nlu_threshold,
"core_threshold": self.core_threshold,
"fallback_core_action_name": self.fallback_action_name,
"fallback_nlu_action_name": self.fallback_nlu_action_name,
"deny_suggestion_intent_name": self.deny_suggestion_intent_name,
}
utils.create_dir_for_file(config_file)
utils.dump_obj_as_json_to_file(config_file, meta)
@classmethod
def load(cls, path: Text) -> 'FallbackPolicy':
meta = {}
if os.path.exists(path):
meta_path = os.path.join(path, "two_stage_fallback_policy.json")
if os.path.isfile(meta_path):
meta = json.loads(utils.read_file(meta_path))
return cls(**meta)
| 47.579235
| 80
| 0.617319
|
c2f8f16b7be2a26f8d0e98728347f1fb066201d9
| 4,546
|
py
|
Python
|
cohesity_management_sdk/models/environment_remote_protection_job_information_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | 1
|
2019-11-07T23:19:32.000Z
|
2019-11-07T23:19:32.000Z
|
cohesity_management_sdk/models/environment_remote_protection_job_information_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
cohesity_management_sdk/models/environment_remote_protection_job_information_enum.py
|
chandrashekar-cohesity/management-sdk-python
|
9e6ec99e8a288005804b808c4e9b19fd204e3a8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class EnvironmentRemoteProtectionJobInformationEnum(object):
"""Implementation of the 'Environment_RemoteProtectionJobInformation' enum.
Specifies the environment type (such as kVMware or kSQL)
of the original archived Protection Job.
Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.
NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter.
'kVMware' indicates the VMware Protection Source environment.
'kHyperV' indicates the HyperV Protection Source environment.
'kSQL' indicates the SQL Protection Source environment.
'kView' indicates the View Protection Source environment.
'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'kAzure' indicates the Microsoft's Azure Protection Source environment.
'kNetapp' indicates the Netapp Protection Source environment.
'kAgent' indicates the Agent Protection Source environment.
'kGenericNas' indicates the Genreric Network Attached Storage Protection
Source environment.
'kAcropolis' indicates the Acropolis Protection Source environment.
'kPhsicalFiles' indicates the Physical Files Protection Source
environment.
'kIsilon' indicates the Dell EMC's Isilon Protection Source environment.
'kKVM' indicates the KVM Protection Source environment.
'kAWS' indicates the AWS Protection Source environment.
'kExchange' indicates the Exchange Protection Source environment.
'kHyperVVSS' indicates the HyperV VSS Protection Source
environment.
'kOracle' indicates the Oracle Protection Source environment.
'kGCP' indicates the Google Cloud Platform Protection Source environment.
'kFlashBlade' indicates the Flash Blade Protection Source environment.
'kAWSNative' indicates the AWS Native Protection Source environment.
'kVCD' indicates the VMware's Virtual cloud Director Protection Source
environment.
'kO365' indicates the Office 365 Protection Source environment.
'kO365Outlook' indicates Office 365 outlook Protection Source
environment.
'kHyperFlex' indicates the Hyper Flex Protection Source environment.
'kGCPNative' indicates the GCP Native Protection Source environment.
'kAzureNative' indicates the Azure Native Protection Source environment.
Attributes:
KVMWARE: TODO: type description here.
KHYPERV: TODO: type description here.
KSQL: TODO: type description here.
KVIEW: TODO: type description here.
KPUPPETEER: TODO: type description here.
KPHYSICAL: TODO: type description here.
KPURE: TODO: type description here.
KAZURE: TODO: type description here.
KNETAPP: TODO: type description here.
KAGENT: TODO: type description here.
KGENERICNAS: TODO: type description here.
KACROPOLIS: TODO: type description here.
KPHYSICALFILES: TODO: type description here.
KISILON: TODO: type description here.
KKVM: TODO: type description here.
KAWS: TODO: type description here.
KEXCHANGE: TODO: type description here.
KHYPERVVSS: TODO: type description here.
KORACLE: TODO: type description here.
KGCP: TODO: type description here.
KFLASHBLADE: TODO: type description here.
KAWSNATIVE: TODO: type description here.
KVCD: TODO: type description here.
KO365: TODO: type description here.
KO365OUTLOOK: TODO: type description here.
KHYPERFLEX: TODO: type description here.
KGCPNATIVE: TODO: type description here.
KAZURENATIVE: TODO: type description here.
"""
K_VMWARE = 'kVMware'
K_HYPERV = 'kHyperV'
KSQL = 'kSQL'
KVIEW = 'kView'
KPUPPETEER = 'kPuppeteer'
KPHYSICAL = 'kPhysical'
KPURE = 'kPure'
KAZURE = 'kAzure'
KNETAPP = 'kNetapp'
KAGENT = 'kAgent'
KGENERICNAS = 'kGenericNas'
KACROPOLIS = 'kAcropolis'
KPHYSICALFILES = 'kPhysicalFiles'
KISILON = 'kIsilon'
KKVM = 'kKVM'
KAWS = 'kAWS'
KEXCHANGE = 'kExchange'
K_HYPERV_VSS = 'kHyperVVSS'
KORACLE = 'kOracle'
KGCP = 'kGCP'
KFLASHBLADE = 'kFlashBlade'
KAWSNATIVE = 'kAWSNative'
KVCD = 'kVCD'
KO365 = 'kO365'
KO365OUTLOOK = 'kO365Outlook'
KHYPERFLEX = 'kHyperFlex'
KGCPNATIVE = 'kGCPNative'
KAZURENATIVE = 'kAzureNative'
| 33.925373
| 79
| 0.710075
|
14f4f7854e24eef5e3416cab11f0faa0a458d026
| 8,766
|
py
|
Python
|
src/sas/sasgui/perspectives/calculator/calculator.py
|
andyfaff/sasview
|
c00a797ab9c4ddc60f0fa8a64ae8a2067c225921
|
[
"BSD-3-Clause"
] | null | null | null |
src/sas/sasgui/perspectives/calculator/calculator.py
|
andyfaff/sasview
|
c00a797ab9c4ddc60f0fa8a64ae8a2067c225921
|
[
"BSD-3-Clause"
] | null | null | null |
src/sas/sasgui/perspectives/calculator/calculator.py
|
andyfaff/sasview
|
c00a797ab9c4ddc60f0fa8a64ae8a2067c225921
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Calculator Module
"""
################################################################################
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#
#See the license text in license.txt
#
#copyright 2010, University of Tennessee
################################################################################
import wx
from sas.sasgui.guiframe.plugin_base import PluginBase
from sas.sasgui.perspectives.calculator.data_operator import DataOperatorWindow
from sas.sasgui.perspectives.calculator.data_editor import DataEditorWindow
from sas.sasgui.perspectives.calculator.kiessig_calculator_panel import KiessigWindow
from sas.sasgui.perspectives.calculator.sld_panel import SldWindow
from sas.sasgui.perspectives.calculator.density_panel import DensityWindow
from sas.sasgui.perspectives.calculator.slit_length_calculator_panel \
import SlitLengthCalculatorWindow
from sas.sasgui.perspectives.calculator.resolution_calculator_panel \
import ResolutionWindow
from sas.sasgui.perspectives.calculator.gen_scatter_panel import SasGenWindow
from sas.sasgui.perspectives.calculator.image_viewer import ImageView
from sas.sasgui.perspectives.calculator.pyconsole import PyConsole
import logging
logger = logging.getLogger(__name__)
class Plugin(PluginBase):
"""
This class defines the interface for a Plugin class
for calculator perspective
"""
def __init__(self):
PluginBase.__init__(self, name="Calculator")
# Log startup
logger.info("Calculator plug-in started")
self.sub_menu = "Tool"
self.data_edit_frame = None
# data operator use one frame all the time
self.data_operator_frame = None
self.kiessig_frame = None
self.sld_frame = None
self.cal_md_frame = None
self.cal_slit_frame = None
self.cal_res_frame = None
self.gen_frame = None
self.image_view = None
self.py_frame = None
def get_tools(self):
"""
Returns a set of menu entries for tools
"""
data_oper_help = "Perform arithmetic data operation (+...) "
data_oper_help += "and combination (|)"
kiessig_help = "Approximately computes the "
kiessig_help += "thickness of a shell or the size of "
kiessig_help += "particles \n from the width of a Kiessig fringe."
sld_help = "Computes the Scattering Length Density."
slit_length_help = "Computes the slit length from the beam profile."
resolution_help = "Approximately estimates the "
resolution_help += "resolution of Q in 2D based on the SAS "
resolution_help += "instrumental parameter values."
mass_volume_help = "Based on the chemical formula, "
mass_volume_help += "compute the mass density or the molar volume."
gensas_help = "Generic SAS"
pyconsole_help = "Python Console."
imageviewer_help = "Load an image file and display the image."
#data_editor_help = "Meta Data Editor"
return [("Data Operation",
data_oper_help, self.on_data_operation),
("SLD Calculator", sld_help, self.on_calculate_sld),
("Density/Volume Calculator", mass_volume_help,
self.on_calculate_dv),
("Slit Size Calculator", slit_length_help,
self.on_calculate_slit_size),
("Kiessig Thickness Calculator",
kiessig_help, self.on_calculate_kiessig),
("Q Resolution Estimator",
resolution_help, self.on_calculate_resoltuion),
("Generic Scattering Calculator",
gensas_help, self.on_gen_model),
("Python Shell/Editor", pyconsole_help, self.on_python_console),
("Image Viewer", imageviewer_help, self.on_image_viewer), ]
def on_edit_data(self, event):
"""
Edit meta data
"""
if self.data_edit_frame is None:
self.data_edit_frame = DataEditorWindow(parent=self.parent,
manager=self, data=[],
title="Data Editor")
self.put_icon(self.data_edit_frame)
else:
self.data_edit_frame.Show(False)
self.data_edit_frame.Show(True)
def on_data_operation(self, event):
"""
Data operation
"""
if self.data_operator_frame is None:
# Use one frame all the time
self.data_operator_frame = DataOperatorWindow(parent=self.parent,
manager=self,
title="Data Operation")
self.put_icon(self.data_operator_frame)
else:
self.data_operator_frame.Show(False)
self.data_operator_frame.panel.set_panel_on_focus(None)
self.data_operator_frame.Show(True)
def on_calculate_kiessig(self, event):
"""
Compute the Kiessig thickness
"""
if self.kiessig_frame is None:
frame = KiessigWindow(parent=self.parent, manager=self)
self.put_icon(frame)
self.kiessig_frame = frame
else:
self.kiessig_frame.Show(False)
self.kiessig_frame.Show(True)
def on_calculate_sld(self, event):
"""
Compute the scattering length density of molecula
"""
if self.sld_frame is None:
frame = SldWindow(parent=self.parent,
base=self.parent, manager=self)
self.put_icon(frame)
self.sld_frame = frame
else:
self.sld_frame.Show(False)
self.sld_frame.Show(True)
def on_calculate_dv(self, event):
"""
Compute the mass density or molar voulme
"""
if self.cal_md_frame is None:
frame = DensityWindow(parent=self.parent,
base=self.parent, manager=self)
self.put_icon(frame)
self.cal_md_frame = frame
else:
self.cal_md_frame.Show(False)
self.cal_md_frame.Show(True)
def on_calculate_slit_size(self, event):
"""
Compute the slit size a given data
"""
if self.cal_slit_frame is None:
frame = SlitLengthCalculatorWindow(parent=self.parent, manager=self)
self.put_icon(frame)
self.cal_slit_frame = frame
else:
self.cal_slit_frame.Show(False)
self.cal_slit_frame.Show(True)
def on_calculate_resoltuion(self, event):
"""
Estimate the instrumental resolution
"""
if self.cal_res_frame is None:
frame = ResolutionWindow(parent=self.parent, manager=self)
self.put_icon(frame)
self.cal_res_frame = frame
else:
self.cal_res_frame.Show(False)
self.cal_res_frame.Show(True)
def on_gen_model(self, event):
"""
On Generic model menu event
"""
if self.gen_frame is None:
frame = SasGenWindow(parent=self.parent, manager=self)
self.put_icon(frame)
self.gen_frame = frame
else:
self.gen_frame.Show(False)
self.gen_frame.Show(True)
def on_image_viewer(self, event):
"""
Get choose an image file dialog
:param event: menu event
"""
self.image_view = ImageView(parent=self.parent)
self.image_view.load()
def on_python_console(self, event):
"""
Open Python Console
:param event: menu event
"""
self.get_python_panel(filename=None)
def get_python_panel(self, filename=None):
"""
Get the python shell panel
:param filename: file name to open in editor
"""
if self.py_frame is None:
frame = PyConsole(parent=self.parent, base=self,
filename=filename)
self.put_icon(frame)
self.py_frame = frame
else:
self.py_frame.Show(False)
self.py_frame.Show(True)
def put_icon(self, frame):
"""
Put icon in the frame title bar
"""
if hasattr(frame, "IsIconized"):
if not frame.IsIconized():
try:
icon = self.parent.GetIcon()
frame.SetIcon(icon)
except:
pass
| 37.144068
| 85
| 0.593543
|
da05d17476207d1ccd360752affd2d4340edab31
| 1,155
|
py
|
Python
|
tests/test_comment.py
|
abbyshabi/PerfectPitches
|
7299b67784021d74520c2bc6af931c57877d2024
|
[
"MIT"
] | null | null | null |
tests/test_comment.py
|
abbyshabi/PerfectPitches
|
7299b67784021d74520c2bc6af931c57877d2024
|
[
"MIT"
] | null | null | null |
tests/test_comment.py
|
abbyshabi/PerfectPitches
|
7299b67784021d74520c2bc6af931c57877d2024
|
[
"MIT"
] | 2
|
2019-02-12T22:54:31.000Z
|
2019-02-13T23:42:04.000Z
|
import unittest
from app.models import Comment,User
from flask_login import current_user
from app import db
class TestComment(unittest.TestCase):
def setUp(self):
self.post_Fly = Post(title = 'James',body = 'potato', id = '1')
self.new_comment = Comment(body = 'good')
def tearDown(self):
Post.query.delete()
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.post_id,12345)
self.assertEquals(self.new_comment.post_title,'Review for movies')
self.assertEquals(self.new_comment.post_comment,'This movie is the best thing since sliced bread')
self.assertEquals(self.new_comment.user,self.user_James)
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comment.query.all())>0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comments = Comment.get_comment(12345)
self.assertTrue(len(got_comments) == 1)
| 36.09375
| 114
| 0.62684
|
51d92107809ab93fb9b53f6a97daa853015dd3d2
| 436
|
py
|
Python
|
blog/urls.py
|
robscodebase/django-blog
|
956e3751ffe352425fb00087a37cb4648f1a176e
|
[
"BSD-3-Clause"
] | null | null | null |
blog/urls.py
|
robscodebase/django-blog
|
956e3751ffe352425fb00087a37cb4648f1a176e
|
[
"BSD-3-Clause"
] | null | null | null |
blog/urls.py
|
robscodebase/django-blog
|
956e3751ffe352425fb00087a37cb4648f1a176e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
# post views
url(r'^$', views.post_list, name='post_list'),
url(r'^tag/(?P<tag_slug>[-\w]+)/$', views.post_list, name='post_list_by_tag'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<post>[-\w]+)/$',
views.post_detail,
name='post_detail'),
url(r'^(?P<post_id>\d+)/share/$', views.post_share, name='post_share'),
]
| 31.142857
| 82
| 0.580275
|
eea861b38075d1343f7381a54f1f66aac90b57ba
| 53,423
|
py
|
Python
|
MLlib/models.py
|
Vinit-source/ML-DL-implementation
|
15960151a4d65c24695220ee68e0ffa5b1b40e19
|
[
"BSD-3-Clause"
] | null | null | null |
MLlib/models.py
|
Vinit-source/ML-DL-implementation
|
15960151a4d65c24695220ee68e0ffa5b1b40e19
|
[
"BSD-3-Clause"
] | null | null | null |
MLlib/models.py
|
Vinit-source/ML-DL-implementation
|
15960151a4d65c24695220ee68e0ffa5b1b40e19
|
[
"BSD-3-Clause"
] | null | null | null |
from MLlib.optimizers import GradientDescent
from MLlib.activations import Sigmoid
from MLlib.utils.misc_utils import generate_weights
from MLlib.utils.decision_tree_utils import partition, find_best_split
from MLlib.utils.decision_tree_utils import class_counts
from MLlib.utils .knn_utils import get_neighbours
from MLlib.utils.naive_bayes_utils import make_likelihood_table
from MLlib.utils.gaussian_naive_bayes_utils import get_mean_var, p_y_given_x
from MLlib.utils.k_means_clustering_utils import initi_centroid, cluster_allot
from MLlib.utils.k_means_clustering_utils import new_centroid, xy_calc
from MLlib.utils.divisive_clustering_utils import KMeans, sse, \
visualize_clusters
from MLlib.utils.pca_utils import PCA_utils, infer_dimension
import MLlib.nn as nn
from collections import Counter, OrderedDict
from MLlib.utils.agglomerative_clustering_utils import compute_distance
import numpy as np
from numpy.random import random
from scipy.stats import norm
from warnings import catch_warnings
from warnings import simplefilter
import pickle
import matplotlib.pyplot as plt
from datetime import datetime
import math
import scipy.cluster.hierarchy as shc
DATE_FORMAT = '%d-%m-%Y_%H-%M-%S'
class LinearRegression():
"""
Implement Linear Regression Model.
ATTRIBUTES
==========
None
METHODS
=======
fit(X,Y,optimizer=GradientDescent,epochs=25,
zeros=False,save_best=False):
Implement the Training of
Linear Regression Model with
suitable optimizer, inititalised
random weights and Dataset's
Input-Output, upto certain number
of epochs.
predict(X):
Return the Predicted Value of
Output associated with Input,
using the weights, which were
tuned by Training Linear Regression
Model.
save(name):
Save the Trained Linear Regression
Model in rob format , in Local
disk.
"""
def fit(
self,
X,
Y,
optimizer=GradientDescent,
epochs=25,
zeros=False,
save_best=False
):
"""
Train the Linear Regression Model
by fitting its associated weights,
according to Dataset's Inputs and
their corresponding Output Values.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
Y: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Output.
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,RMSprop,AdamDelta,
Gradient Descent,etc.
epochs: int
Number of times, the loop to calculate loss
and optimize weights, will going to take
place.
zeros: boolean
Condition to initialize Weights as either
zeroes or some random decimal values.
save_best: boolean
Condition to enable or disable the option
of saving the suitable Weight values for the
model after reaching the region nearby the
minima of Loss-Function with respect to Weights.
epoch_loss: float
The degree of how much the predicted value
is diverted from actual values, given by
implementing one of choosen loss functions
from loss_func.py .
version: str
Descriptive update of Model's Version at each
step of Training Loop, along with Time description
according to DATA_FORMAT.
RETURNS
=======
None
"""
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
self.best_weights = {"weights": None, "loss": float('inf')}
print("Starting training with loss:",
optimizer.loss_func.loss(X, Y, self.weights))
for epoch in range(1, epochs + 1):
print("======================================")
print("epoch:", epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
epoch_loss = optimizer.loss_func.loss(X, Y, self.weights)
if save_best and epoch_loss < self.best_weights["loss"]:
print("updating best weights (loss: {})".format(epoch_loss))
self.best_weights['weights'] = self.weights
self.best_weights['loss'] = epoch_loss
version = "model_best_" + datetime.now().strftime(DATE_FORMAT)
print("Saving best model version: ", version)
self.save(version)
print("Loss in this step: ", epoch_loss)
version = "model_final_" + datetime.now().strftime(DATE_FORMAT)
print("Saving final model version: ", version)
self.save(version)
print("======================================\n")
print("Finished training with final loss:",
optimizer.loss_func.loss(X, Y, self.weights))
print("=====================================================\n")
def predict(self, X):
"""
Predict the Output Value of
Input, in accordance with
Linear Regression Model.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
RETURNS
=======
ndarray(dtype=float,ndim=1)
Predicted Values corresponding to
each Input of Dataset.
"""
return np.dot(X, self.weights)
def save(self, name):
"""
Save the Model in rob
format for further usage.
PARAMETERS
==========
name: str
Title of the Model's file
to be saved in rob format.
RETURNS
=======
None
"""
with open(name + '.rob', 'wb') as robfile:
pickle.dump(self, robfile)
def plot(self, X, Y, optimizer=GradientDescent, epochs=25):
""""
Plot the graph of loss vs number of iterations
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
1-D array of Dataset's input
Y: ndarray(dtype=float, ndim=1)
1-D array of Dataset's output
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,GradientDescent etc
epochs: int
Number of times, the loop to calculate loss
and optimize weights, will going to take
place.
error: float
The degree of how much the predicted value
is diverted from actual values, given by implementing
one of choosen loss functions from loss_func.py .
RETURNS
=========
A 2-D graph with x-axis as Number of
iterations and y-axis as loss.
"""
l1 = []
l2 = []
self.weights = optimizer.loss_func.loss(X, Y, self.weights)
for epoch in range(1, epochs + 1):
l1.append(epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
error = optimizer.loss_func.loss(X, Y, self.weights)
l2.append(error)
plt.plot(np.array(l1), np.array(l2))
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.scatter(X, Y)
plt.show()
class PolynomialRegression():
"""
Implement Polynomial Regression Model.
ATTRIBUTES
==========
None
METHODS
=======
fit(X,Y,optimizer=GradientDescent,epochs=60, \
zeros=False,save_best=False):
Implement the Training of
Polynomial Regression Model with
suitable optimizer, inititalised
random weights and Dataset's
Input-Output, upto certain number
of epochs.
predict(X):
Return the Predicted Value of
Output associated with Input,
using the weights, which were
tuned by Training Polynomial Regression
Model.
save(name):
Save the Trained Polynomial Regression
Model in rob format , in Local
disk.
"""
def __init__(self, degree):
self.degree = degree
self.weights = 0
self.best_weights = {}
def fit(
self,
X,
Y,
optimizer=GradientDescent,
epochs=200,
zeros=False,
save_best=True
):
"""
Train the Polynomial Regression Model
by fitting its associated weights,
according to Dataset's Inputs and
their corresponding Output Values.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
Update X with X**2, X**3, X**4 terms
Y: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Output.
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,RMSprop,AdamDelta,
Gradient Descent,etc.
epochs: int
Number of times, the loop to calculate loss
and optimize weights, is going to take
place.
zeros: boolean
Condition to initialize Weights as either
zeroes or some random decimal values.
save_best: boolean
Condition to enable or disable the option
of saving the suitable Weight values for the
model after reaching the region nearby the
minima of Loss-Function with respect to Weights.
epoch_loss: float
The degree of how much the predicted value
is diverted from actual values, given by
implementing one of choosen loss functions
from loss_func.py .
version: str
Descriptive update of Model's Version at each
step of Training Loop, along with Time description
according to DATA_FORMAT.
RETURNS
=======
None
"""
M, N = X.shape
P = X[:, 0:1]
# Add polynomial terms to X
# upto degree 'self.degree'.
for i in range(2, self.degree + 1):
P = np.hstack((
P,
(np.power(X[:, 0:1], i)).reshape(M, 1)
))
P = np.hstack((
P,
X[:, 1:2]
))
X = P
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
self.best_weights = {"weights": self.weights, "loss":
optimizer.loss_func.loss(X, Y, self.weights)}
print("Starting training with loss:",
optimizer.loss_func.loss(X, Y, self.weights))
for epoch in range(1, epochs + 1):
print("======================================")
print("epoch:", epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
epoch_loss = optimizer.loss_func.loss(X, Y, self.weights)
if save_best and epoch_loss < self.best_weights["loss"]:
self.best_weights['weights'] = self.weights
self.best_weights['loss'] = epoch_loss
version = "model_best_" + datetime.now().strftime(DATE_FORMAT)
print("Saving best model version: ", version)
self.save(version)
print("Loss in this step: ", epoch_loss)
version = "model_final_" + datetime.now().strftime(DATE_FORMAT)
print("Saving final model version: ", version)
self.save(version)
print("======================================\n")
print("Finished training with final loss:", self.best_weights['loss'])
print("=====================================================\n")
def predict(self, X):
"""
Predict the Output Value of
Input, in accordance with
Polynomial Regression Model.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
RETURNS
=======
ndarray(dtype=float, ndim=1)
Predicted Values corresponding to
each Input of Dataset.
"""
M, N = X.shape
P = X[:, 0:1]
for i in range(2, self.degree + 1):
P = np.hstack((
P,
(np.power(X[:, 0:1], i)).reshape(M, 1)
))
P = np.hstack((
P,
X[:, 1:2]
))
X = P
return np.dot(X, self.best_weights['weights'])
def save(self, name):
"""
Save the Model in rob
format for further usage.
PARAMETERS
==========
name: str
Title of the Model's file
to be saved in rob format.
RETURNS
=======
None
"""
with open(name + '.rob', 'wb') as robfile:
pickle.dump(self, robfile)
def plot(
self,
X,
Y,
Z,
optimizer=GradientDescent,
epochs=60,
zeros=False,
save_best=False
):
"""
Plot the graph of Loss vs Epochs
Plot the graph of line Of Polynomial Regression
PARAMETERS
==========
X: ndarray(dtype=float, ndim=1)
1-D array of Dataset's input
Y: ndarray(dtype=float, ndim=1)
1-D array of Dataset's output
Z: ndarray(dtype=float, ndim=1)
1-D array of Predicted Values
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,RMSprop,AdamDelta,
Gradient Descent,etc.
epochs: int
Number of times, the loop to calculate loss
and optimize weights, is going to take
place.
zeros: boolean
Condition to initialize Weights as either
zeroes or some random decimal values.
save_best: boolean
Condition to enable or disable the option
of saving the suitable Weight values for the
model after reaching the region nearby the
minima of Loss-Function with respect to Weights.
RETURNS
=======
None
"""
M, N = X.shape
P = X[:, 0:1]
for i in range(2, self.degree + 1):
P = np.hstack((
P,
(np.power(X[:, 0:1], i)).reshape(M, 1)
))
P = np.hstack((
P,
X[:, 1:2]
))
X = P
m = []
List = []
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
self.best_weights = {"weights": self.weights, "loss":
optimizer.loss_func.loss(X, Y, self.weights)}
print("Starting training with loss:",
optimizer.loss_func.loss(X, Y, self.weights))
for epoch in range(1, epochs + 1):
m.append(epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
epoch_loss = optimizer.loss_func.loss(X, Y, self.weights)
if save_best and epoch_loss < self.best_weights["loss"]:
self.best_weights['weights'] = self.weights
self.best_weights['loss'] = epoch_loss
List.append(epoch_loss)
x = np.array(m)
y = np.array(List)
plt.figure(figsize=(10, 5))
plt.xlabel('EPOCHS', family='serif', fontsize=15)
plt.ylabel('LOSS', family='serif', fontsize=15)
plt.scatter(x, y, color='navy')
plt.show()
z = np.reshape(Z, (1, M))
pred_value = z[0]
true_value = Y[0]
A = []
for i in range(0, len(Y[0])):
A.append(i)
x_axis = np.array(A)
plt.xlabel('Number of Datasets', family='serif', fontsize=15)
plt.ylabel('Values', family='serif', fontsize=15)
plt.scatter(x_axis, true_value, label="True Values")
plt.plot(x_axis, pred_value, label="Predicted Values")
plt.legend(loc="upper right")
plt.show()
class LogisticRegression(LinearRegression):
"""
Implements Logistic Regression Model.
ATTRIBUTES
==========
LinearRegression: Class
Parent Class from where Output Prediction
Value is expressed, after Linear Weighted
Combination of Input is calculated .
METHODS
=======
predict(X):
Return the probabilistic value
of an Input, belonging to either
class 0 or class 1, by using final
weights from Trained Logistic
Regression Model.
classify(X):
Return the Class corresponding to
each Input of Dataset, Predicted by
Trained Logistic Regression Model,
i.e in this scenario, either class 0
or class 1.
"""
def predict(self, X):
"""
Predict the Probabilistic Value of
Input, in accordance with
Logistic Regression Model.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
prediction: ndarray(dtype=float,ndim=1)
1-D Array of Predicted Values
corresponding to each Input of
Dataset.
RETURNS
=======
ndarray(dtype=float,ndim=1)
1-D Array of Probabilistic Values
of whether the particular Input
belongs to class 0 or class 1.
"""
prediction = np.dot(X, self.weights).T
sigmoid = Sigmoid()
return sigmoid.activation(prediction)
def classify(self, X):
"""
Classify the Input, according to
Logistic Regression Model,i.e in this
case, either class 0 or class 1.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
prediction: ndarray(dtype=float,ndim=1)
1-D Array of Predicted Values
corresponding to their Inputs.
actual_predictions: ndarray(dtype=int,ndim=1)
1-D Array of Output, associated
to each Input of Dataset,
Predicted by Trained Logistic
Regression Model.
RETURNS
=======
ndarray
1-D Array of Predicted classes
(either 0 or 1) corresponding
to their inputs.
"""
prediction = np.dot(X, self.weights).T
sigmoid = Sigmoid()
prediction = sigmoid.activation(prediction)
actual_predictions = np.zeros((1, X.shape[0]))
for i in range(prediction.shape[1]):
if prediction[0][i] > 0.5:
actual_predictions[0][i] = 1
return actual_predictions
def Plot(self,
X,
Y,
actual_predictions,
optimizer=GradientDescent,
epochs=25,
zeros=False
):
"""
Plots for Logistic Regression.
PARAMETERS
==========
X: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Input.
Y: ndarray(dtype=float,ndim=1)
1-D Array of Dataset's Output.
actual_predictions: ndarray(dtype=int,ndim=1)
1-D Array of Output, associated
to each Input of Dataset,
Predicted by Trained Logistic
Regression Model.
optimizer: class
Class of one of the Optimizers like
AdamProp,SGD,MBGD,GradientDescent etc
epochs: int
Number of times, the loop to calculate loss
and optimize weights, will going to take
place.
error: float
The degree of how much the predicted value
is diverted from actual values, given by implementing
one of choosen loss functions from loss_func.py .
zeros: boolean
Condition to initialize Weights as either
zeroes or some random decimal values.
RETURNS
=======
2-D graph of Sigmoid curve,
Comparision Plot of True output and Predicted output versus Feacture.
2-D graph of Loss versus Number of iterations.
"""
Plot = plt.figure(figsize=(8, 8))
plot1 = Plot.add_subplot(2, 2, 1)
plot2 = Plot.add_subplot(2, 2, 2)
plot3 = Plot.add_subplot(2, 2, 3)
# 2-D graph of Sigmoid curve.
x = np.linspace(- max(X[:, 0]) - 2, max(X[:, 0]) + 2, 1000)
plot1.set_title('Sigmoid curve')
plot1.grid()
sigmoid = Sigmoid()
plot1.scatter(X.T[0], Y, color="red", marker="+", label="labels")
plot1.plot(x, 0 * x + 0.5, linestyle="--",
label="Decision bound, y=0.5")
plot1.plot(x, sigmoid.activation(x),
color="green", label='Sigmoid function: 1 / (1 + e^-x)'
)
plot1.legend()
# Comparision Plot of Actual output and Predicted output vs Feacture.
plot2.set_title('Actual output and Predicted output versus Feacture')
plot2.set_xlabel("x")
plot2.set_ylabel("y")
plot2.scatter(X[:, 0], Y, color="orange", label='Actual output')
plot2.grid()
plot2.scatter(X[:, 0], actual_predictions,
color="blue", marker="+", label='Predicted output'
)
plot2.legend()
# 2-D graph of Loss versus Number of iterations.
plot3.set_title("Loss versus Number of iterations")
plot3.set_xlabel("iterations")
plot3.set_ylabel("Cost")
iterations = []
cost = []
self.weights = generate_weights(X.shape[1], 1, zeros=zeros)
for epoch in range(1, epochs + 1):
iterations.append(epoch)
self.weights = optimizer.iterate(X, Y, self.weights)
error = optimizer.loss_func.loss(X, Y, self.weights)
cost.append(error)
plot3.plot(np.array(iterations), np.array(cost))
plt.show()
class DecisionTreeClassifier():
"""
A class to implement the Decision Tree Algorithm.
ATTRIBUTES
==========
None
METHODS
=======
print_tree(rows, head, spacing = "")
To print the decision tree of the rows
in an organised manner.
classify(rows, head, prediction_val)
To determine and return the predictions
of the subsets of the dataset.
"""
def print_tree(self, rows, head, spacing=""):
"""
A tree printing function.
PARAMETERS
==========
rows: list
A list of lists to store the dataset.
head: list
A list to store the headings of the
columns of the dataset.
spacing: String
To store and update the spaces to
print the tree in an organised manner.
RETURNS
=======
None
"""
# Try partitioning the dataset on each of the unique attribute,
# calculate the gini impurity,
# and return the question that produces the least gini impurity.
gain, question = find_best_split(rows, head)
# Base case: we've reached a leaf
if gain == 0:
print(spacing + "Predict", class_counts(rows, len(rows[0]) - 1))
return
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Print the question at this node
print(spacing + str(question))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(true_rows, head, spacing + " ")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(false_rows, head, spacing + " ")
def classify(self, rows, head, prediction_val):
"""
A function to make predictions of
the subsets of the dataset.
PARAMETERS
==========
rows: list
A list of lists to store the subsets
of the dataset.
head: list
A list to store the headings of the
columns of the subset of the dataset.
prediction_val: dictionary
A dictionary to update and return the
predictions of the subsets of the
dataset.
RETURNS
=======
prediction_val
Dictionary to return the predictions
corresponding to the subsets of the
dataset.
"""
N = len(rows[0])
# Finding random indexes for columns
# to collect random samples of the dataset.
indexcol = []
for j in range(0, 5):
r = np.random.randint(0, N - 2)
if r not in indexcol:
indexcol.append(r)
row = []
for j in rows:
L = []
for k in indexcol:
L.append(j[k])
row.append(L)
# add last column to the random sample so created.
for j in range(0, len(row)):
row[j].append(rows[j][N - 1])
rows = row
# Try partitioning the dataset on each of the unique attribute,
# calculate the gini impurity,
# and return the question that produces the least gini impurity.
gain, question = find_best_split(rows, head)
# Base case: we've reached a leaf
if gain == 0:
# Get the predictions of the current set of rows.
p = class_counts(rows, len(rows[0]) - 1)
for d in prediction_val:
for j in p:
if d == j:
# update the predictions to be returned.
prediction_val[d] = prediction_val[d] + p[j]
return prediction_val
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Recursively build the true branch.
self.classify(true_rows, head, prediction_val)
# Recursively build the false branch.
self.classify(false_rows, head, prediction_val)
# Return the dictionary of the predictions
# at the end of the recursion.
return prediction_val
class RandomForestClassifier(DecisionTreeClassifier):
"""
A class to implement the Random Forest Classification Algorithm.
ATTRIBUTES
==========
DecisionTreeClassifier: Class
Parent Class from where the predictions
for the subsets of the dataset are made.
METHODS
=======
predict(A, n_estimators=100):
Print the value that appears the
highest in the list of predictions
of the subsets of the dataset.
"""
def predict(self, A, head, n_estimators=100):
"""
Determine the predictions of the
subsets of the dataset through the
DecisionTreeClassifier class and
print the mode of the predicted values.
PARAMETERS
==========
A: ndarray(dtype=int,ndim=2)
2-D Array of Dataset's Input
n_estimators: int
Number of Decision Trees to be
iterated over for the classification.
RETURNS
=======
None
"""
prediction = {}
print("Predictions of individual decision trees")
# Iterate to collect predictions of
# 100 Decision Trees after taking
# random samples from the dataset.
for i in range(n_estimators):
M = len(A)
# Finding random indexes for rows
# to collect the bootstrapped samples
# of the dataset.
indexrow = np.random.randint(0, M - 1, 6)
rows = []
for j in indexrow:
rows.append(A[j])
label = len(rows[0]) - 1
# Get prediction values for the rows
prediction_val = class_counts(rows, label)
for d in prediction_val:
prediction_val[d] = 0
# Create object of class DecisionTreeClassifier
RandomF = DecisionTreeClassifier()
# Store the returned dictionary of the
# predictions of the subsets of the dataset.
di = RandomF.classify(rows, head, prediction_val)
print(di)
# find maximum predicted value for the subsets
# of the dataset.
maximum = 0
for j in di:
if di[j] > maximum:
maximum = di[j]
maxk = j
# Update the dictionary prediction with the
# maximum predicted value in the
# predictions of the subsets of the dataset.
if maxk not in prediction:
prediction[maxk] = maximum
else:
prediction[maxk] = prediction[maxk] + maximum
# find maximum predicted value, hence the
# final prediction of the Random Forest Algorithm.
maximum = 0
for i in prediction:
if prediction[i] > maximum:
maximum = prediction[i]
maxk = i
# predicting the maximum occurence
print("\n Predict = {", maxk, "}")
class KNN():
"""
A single Class that can act as both KNN classifier or regressor
based on arguements given to the prediction function.
ATTRIBUTES
==========
None
METHODS
=======
predict(train, test_row, num_neighbours=7, classify=True):
K Nearest Neighbour Model, used as Classifier, to
predict the class of test point , with respect to
its n nearest neighbours.
"""
def predict(self, train, test_row, num_neighbours=7, classify=True):
"""
KNN Prediction Model, used for either Regression or
Classification , in respect to Test Point and
Dataset Type.
PARAMETERS
==========
train: ndarray
Array Representation of Collection
of Points, with their corresponding
x1,x2 and y features.
test_row: ndarray(dtype=int,ndim=1,axis=1)
Array representation of test point,
with its corresponding x1,x2 and y
features.
num_neighbours: int
Number of nearest neighbours, close
to the test point, with respect to
x1,x2 and y features.
classify: Boolean
Type of Mode, K Nearest Neighbour
Model wants to be applied, according
to Dataset and Application Field.
neighbours: list
List of n nearest neighbours, close
to the test point, with their
associated Point Array and distance
from the Test point.
ouput: list
List of Distances of n nearest
neighbours, calculated with respect
to the test point, using either
Block or Euclidean Metric.
key: int
Count of number of terms inside
ouput list.
RETURNS
=======
prediction: float/int
If used as a Classifier, gives
Class number as prediction. Else,
it will give the mean of Cluster
made by test point and its n
nearest neighbours.
"""
neigbours = get_neighbours(
train, test_row, num_neighbours, distance_metrics="block")
ouput = [row[-1] for row in neigbours]
if classify:
prediction = max(set(ouput), key=ouput.count)
else:
prediction = sum(ouput) / len(ouput)
return prediction
class Naive_Bayes():
"""
A class which classifies and predicts based on simple
Naive Bayes algorithm.
ATTRIBUTES
==========
None
METHODS
=======
predict(self, x_label, y_class):
Naive Bayes Model to predict the
class given the label.
"""
def predict(self, x_label, y_class):
"""
Naive Bayes Model to predict the
class given the label.
PARAMETERS
==========
x_label: ndarray(dtype=int,ndim=1,axis=1)
Array of labels.
y_class: ndarray(dtype=int,ndim=1,axis=1)
Array of classes.
RETURNS
=======
Most probable output or prediction, as list
of the label and class name.
"""
pyx = []
likelihood = make_likelihood_table(x_label, y_class)
Y = np.unique(y_class)
X = np.unique(x_label)
for j in range(len(Y)):
total = 0
for i in range(len(X)):
if(likelihood[i][j] == 0):
continue
total += math.log(likelihood[i][j])
y_sum = (y_class == Y[j]).sum()
if y_sum:
total += math.log(y_sum / len(y_class))
pyx.append([total, X[i], Y[j]])
prediction = max(pyx)
return [prediction[1], prediction[2]]
class Gaussian_Naive_Bayes():
"""
A class which classifies and predicts based on Gaussian
Naive Bayes algorithm.
ATTRIBUTES
==========
None
METHODS
=======
predict(self, x_label, y_class):
Gaussian Naive Bayes Model to predict the
label for given class values.
"""
# data is variable input given by user for which we predict the label.
# Here we predict the gender from given list of height, weight, foot_size
def predict(self, data, x_label, y_class):
"""
Gaussian Naive Bayes Model to predict the
label given the class values.
PARAMETERS
==========
x_label: ndarray(dtype=int,ndim=1,axis=1)
Array of labels.
y_class: ndarray(dtype=int,ndim=1,axis=1)
Array of classes.
RETURNS
=======
Predicts the label, for given class values
by user.
"""
mean, var = get_mean_var(x_label, y_class)
argmax = 0
for (k1, v1), (k2, v2) in zip(mean.items(), var.items()):
pre_prob = Counter(x_label)[k1] / len(x_label)
pro = 1
for i in range(len(v1)):
pro *= p_y_given_x(data[i], v1[i], v2[i])
pxy = pro * pre_prob
if(pxy > argmax):
prediction = k1
return prediction
class BernoulliNB(object):
def __init__(self, alpha=1):
self.alpha = alpha
def fit(self, x, y):
separate = [[i for i, t in zip(x, y) if t == c] for c in np.unique(y)]
count_for_sample = x.shape[0]
self.class_log = [np.log(len(i) / count_for_sample) for i in separate]
count = self.alpha + np.array([np.array(i).sum(axis=0) for i in
separate])
smoothing = 2 * self.alpha
doc = np.array([smoothing + len(i) for i in separate])
self.log_prob = count / doc[np.newaxis].T
return self
def predict_log(self, x):
return [(np.log(self.log_prob) * i + np.log(1 - self.log_prob) *
np.abs(i - 1)).sum(axis=1) + self.class_log for i in x]
def predict(self, x):
return np.argmax(self.predict_log(x), axis=1)
class MultinomialNB(object):
def __init__(self, alpha=1):
self.alpha = alpha
def fit(self, x, y):
separate = [[i for i, t in zip(x, y) if t == c] for c in np.unique(y)]
count_for_sample = x.shape[0]
self.class_log = [np.log(len(i) / count_for_sample) for i in separate]
count = self.alpha + np.array([np.array(i).sum(axis=0) for i in
separate])
self.log_prob = np.log(count / count.sum(axis=1)[np.newaxis].T)
return self
def predict_log(self, x):
return [(self.log_prob * i).sum(axis=1) + self.class_log for i in x]
def predict(self, x):
return np.argmax(self.predict_log(x), axis=1)
class KMeansClustering():
"""
One of the models used for Unsupervised
learning, by making finite number of clusters
from Dataset points.
ATTRIBUTES
==========
None
METHODS
=======
work(M, num_cluster, epochs):
Give details about cluster arrangements
from Dataset's Points, after suitable
number of epoch steps.
"""
def work(self, M, num_cluster, epochs):
"""
Show the arrangement of clusters after
certain number of epochs, provided with
number of clusters and Input Dataset
Matrix.
PARAMETERS
==========
M: ndarray(dtype=int,ndim=2)
Dataset Matrix with finite number
of points, having their corresponding
x and y coordinates.
num_cluster: int
Number of Clusters to be made from
the provided Dataset's points.
epochs: int
Number of times, centroids' coordinates
will change, to obtain suitable clusters
with appropriate number of points.
centroid_array: list
List of randomly initialised centroids,
out of Dataset points, which will be
going to update with every epoch, in
order to obtain suitable clusters.
interm: ndarray(dtype=int,ndim=2)
Intermediate Matrix, consisting of
clusterwise sum of each coordinate,
with number of points in each cluster.
new_array: list
Updated list of new centroids, due to
changes in cluster points, with each
epoch.
cluster_array: list
List of Resultant Clusters, made after
updating centroids with each epoch.
It consist of Centroid and its
corresponding nearby points of each
Cluster.
cluster: list
List of Current cluster to be shown
on screen, with its corresponding
centroid and nearby points.
RETURNS
=======
None
"""
centroid_array = initi_centroid(M, num_cluster)
for i in range(1, epochs + 1):
interm = xy_calc(M, centroid_array)
new_array = new_centroid(interm)
centroid_array = new_array
cluster_array = cluster_allot(M, centroid_array)
for cluster in cluster_array:
print("==============================\n")
print(cluster)
print("\n==============================\n")
# ---------------------- Divisive Hierarchical Clustering ----------------
class DivisiveClustering():
def work(self, M, n_clusters, n_iterations=7,
enable_for_larger_clusters=False):
if n_clusters > len(M):
raise(ValueError(
f'Number of clusters {n_clusters} inputted is greater than \
dataset number of examples {len(M)}.'))
KMC = KMeans()
clusters, centroids = KMC.runKMeans(M, 2, n_iterations)
# global list of clusters and global np.array of centroids
global_clusters, global_centroids = clusters, centroids
# Visualize flag to toggle visualization of clusters while the
# algorithm runs
_visualize = False
# List to store sum of squared errors of each cluster
cluster_sse_list = [sse(clusters[0], centroids[0]),
sse(clusters[1], centroids[1])]
# List to store lengths of each cluster
cluster_len_list = [len(clusters[0]), len(clusters[1])]
if n_clusters > 20 and not enable_for_larger_clusters:
print('Visualization disabled for number of clusters > 20. To \
enable them for larger number of clusters, pass enable_for_\
larger_clusters = True argument for DC.work.')
else:
_visualize = True
i = 2
while len(global_clusters) < n_clusters:
# index of the cluster to be splitted; selection criteria: cluster
# having max sse
rem_index = cluster_sse_list.index(max(cluster_sse_list))
# cluster to be splitted
parent = global_clusters[rem_index]
cl = cluster_len_list[rem_index]
if cl == 1:
# if single example remaining, directly add into global
# clusters
global_centroids[rem_index] = parent[0]
cluster_sse_list[rem_index] = 0.
# check if all previous clusters are splitted completely
# #!FIXME: Necessary?
m = max(cluster_len_list)
# case where all sse errors are zero
if any(cluster_sse_list) and m == 1:
i += 1
continue
else:
# index of cluster to be splitted
rem_index = cluster_len_list.index(max(cluster_len_list))
# cluster to be splitted
parent = global_clusters[rem_index]
i += 1
# delete all residues of the cluster to be splitted
del(global_clusters[rem_index])
del(cluster_sse_list[rem_index])
del(cluster_len_list[rem_index])
global_centroids = np.delete(global_centroids, rem_index, 0)
# run kmeans to split the cluster
clusters, centroids = KMC.runKMeans(parent, 2, 7)
# update util arrays using data from splitted clusters
global_clusters.extend([clusters[0], clusters[1]])
# print(f'global_clusters: {global_clusters}, len(global_clusters):
# {len(global_clusters)}, clusters: {clusters}, len(clusters):
# {len(clusters)}, parent:{parent}')
cluster_sse_list.extend([sse(clusters[0], centroids[0]), sse(
clusters[1], centroids[1])])
cluster_len_list.extend([len(clusters[0]), len(clusters[1])])
global_centroids = np.append(global_centroids, centroids, axis=0)
# visualize formation of clusters
if _visualize:
visualize_clusters(global_clusters, global_centroids, i)
return global_clusters, global_centroids
class Bayes_Optimization():
# surrogate or approximation for the objective function
def surrogate(self, model, X):
# catch any warning generated when making a prediction
with catch_warnings():
# ignore generated warnings
simplefilter("ignore")
return model.predict(X, return_std=True)
def acquisition(self, X, Xsamples, model):
yhat, _ = self.surrogate(model, X)
best = max(yhat)
# calculate mean and stdev via surrogate function
mu, std = self.surrogate(model, Xsamples)
mu = mu[:, 0]
# calculate the probability of improvement
probs = norm.cdf((mu - best) / (std + 1E-9))
return probs
# optimize the acquisition function
def opt_acquisition(self, X, y, model):
# random search, generate random samples
Xsamples = random(100)
Xsamples = Xsamples.reshape(len(Xsamples), 1)
# calculate the acquisition function for each sample
scores = self.acquisition(X, Xsamples, model)
# locate the index of the largest scores
ix = np.argmax(scores)
return Xsamples[ix, 0]
# plot real observations vs surrogate function
def plot(self, X, y, model):
# scatter plot of inputs and real objective function
plt.scatter(X, y)
# line plot of surrogate function across domain
Xsamples = np.asarray(np.arange(0, 1, 0.001))
Xsamples = Xsamples.reshape(len(Xsamples), 1)
ysamples, _ = self.surrogate(model, Xsamples)
plt.plot(Xsamples, ysamples)
# show the plot
plt.show()
# ---------------------- Principle Component Analysis ------------------------
class PCA(PCA_utils):
"""
Principal component analysis (PCA):
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
"""
def __init__(self, n_components=None, whiten=False, svd_solver='auto'):
self.n_components = n_components
self.whiten = whiten
self.svd_solver = svd_solver
self.components = None
self.mean = None
self.explained_variances = None
self.noise_variance = None
self.fitted = False
def fit(self, X, y=None):
# fit the model with the data X
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X."""
U, S, Vh = self._fit(X)
U = U[:, :self.n_components]
if self.whiten:
U *= math.sqrt(X.shape[0] - 1)
else:
U *= S[:self.n_components]
return U
def _fit(self, X):
'''Fitting function for the model'''
# count the sparsity of the ndarray
count = np.count_nonzero(X)
sparsity = 1.0 - (count / np.size(X))
if sparsity > 0.5:
raise TypeError('PCA does not support sparse input.')
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
fit_svd_solver = self.svd_solver
if fit_svd_solver == 'auto':
# Small problem or n_components == 'mle', call full PCA
if max(X.shape) <= 500 or n_components == 'mle':
fit_svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
fit_svd_solver = 'randomized'
# Case of n_components in (0,1)
else:
fit_svd_solver = 'full'
# Call different fits for either full or truncated SVD
if fit_svd_solver == 'full':
return self.fit_full(X, n_components)
else:
raise ValueError("Unrecognized svd_solver="
"'{0}'".format(fit_svd_solver))
def fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only "
"supported if n_samples >= n_features")
# mean of the dataset
self.mean = np.mean(X, axis=0)
std = np.std(X)
X = (X - self.mean) / std
U, S, Vh = np.linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
# columns of U, rows of Vh
max_abs_cols = np.argmax(np.abs(U), axis=0)
signs = np.sign(U[max_abs_cols, range(U.shape[1])])
U *= signs
Vh *= signs[:, np.newaxis]
components = Vh
# explained variance by singular values
explained_variances = (S**2) / (n_samples - 1)
explained_variance_ratio = (explained_variances /
explained_variances.sum())
singular_value = S.copy()
if n_components == 'mle':
n_components = infer_dimension(explained_variances, n_samples)
elif 0 < n_components < 1.0:
ratio_cumsum = np.cumsum(explained_variance_ratio, axis=None,
dtype=np.float64)
n_components = np.searchsorted(ratio_cumsum, n_components,
side='right') + 1
# Computing noise covariance using Probabilistic PCA model
if n_components < min(n_features, n_samples):
self.noise_variance = explained_variances[n_components:].mean()
else:
self.noise_variance = 1.0
# storing the first n_component values
self.components = components[:n_components]
self.n_components = self.n_components
self.explained_variances = explained_variances[:n_components]
self.explained_variance_ratio = explained_variance_ratio[:n_components]
self.singular_value = singular_value[:n_components]
self.fitted = True
return U, S, Vh
# ------------------------------Numerical Outliers Method----------------------
class Numerical_outliers():
def get_percentile(c, percentile_rank):
"""
get_percentile Function
PARAMETER
=========
c:ndarray(dtype=float,ndim=1)
input dataset
percentile_rank: float type
RETURNS
=======
Data corresponding to percentile rank
"""
d = np.sort(c)
index = int(((len(d) - 1) * percentile_rank) // 100)
return d[index]
def get_outliers(x):
""" get_outliers Function
PARAMETER
=========
x:ndarray(dtype=float,ndim=1)
input dataset
"""
Q1 = Numerical_outliers.get_percentile(x, 25)
Q3 = Numerical_outliers.get_percentile(x, 75)
iqr = Q3 - Q1
lowerbound = Q1 - 1.5 * iqr
upperbound = Q3 + 1.5 * iqr
for i in range(len(x)):
if x[i] > upperbound or x[i] < lowerbound:
print("outlier=", x[i])
# ---------------------- Sequential Neural Network ---------------------------
class Sequential(nn.Module):
"""
A class to construct Neural Networks with ease.
Usage:
>>> from MLlib.models import Sequential
>>> model = Sequential(
layer1,
layer2,
layer3,
layer4,
...
)
The layers(layer1, layer2, etc.) can be custom layers but must inherit from
`MLlib.nn.Module` class.
"""
# TODO:
# - create a method .fit(train_data, epochs, loss_fn, optimizer)
def __init__(self, *layers):
"""
"""
super().__init__()
self._submodules = OrderedDict()
for i in range(len(layers)):
self.register_module(str(i), layers[i])
def forward(self, x):
for layer in self._submodules.values():
x = layer(x)
return x
class Agglomerative_clustering():
"""
One of the models used for Unsupervised
learning, by making finite number of clusters
from Dataset points.
ATTRIBUTES
==========
None
METHODS
=======
work(M, num_cluster):
Give details about cluster arrangements
from Dataset's Points
"""
def work(self, X, num_clusters):
"""
Show the arrangement of clusters , provided with
number of clusters and Input Dataset
Matrix.
PARAMETERS
==========
X: ndarray(dtype=int,ndim=2)
Dataset Matrix with finite number
of points, having their corresponding
x and y coordinates.
num_cluster: int
Number of Clusters to be made from
the provided Dataset's points. num_cluster should be
less than or equal to X.shape[0]
samples: list
List of lists of Dataset points, which will be
updated with every iteration of while loop due
to merging of data points, in
order to obtain suitable clusters.
Distance_mat: ndarray(dtype=int,ndim=2)
Adjacency Matrix, consisting of
distance between every two points/ two clusters/
one point - one cluster
RETURNS
=======
None
"""
samples = [[list(X[i])] for i in range(X.shape[0])]
m = len(samples)
# create adjacency matrix
Distance_mat = compute_distance(samples)
print("Samples before clustering : {}".format(samples))
print("=============================================")
while m > num_clusters:
Distance_mat = compute_distance(samples)
# find the index [i,j] of the minimum distance from the matrix
# samples[i], samples[j] are to be merged
sample_ind_needed = np.where(Distance_mat == Distance_mat.min())[0]
print("Sample size before clustering : ", m)
print("Samples indexes to be merged: {}".format(sample_ind_needed))
value_to_add = samples.pop(sample_ind_needed[1])
# print("Values :{}".format(value_to_add))
print("Samples before clustering: {}".format(samples))
samples[sample_ind_needed[0]].append(value_to_add)
print("Samples after clustering: {}".format(samples))
m = len(samples)
print("Sample size after clustering : ", m)
print("=============================================")
print("Number of clusters formed are : {}".format(m))
print("Clusters formed are : {}".format(samples))
# plotting the dendrograms
def plot(self, X):
plt.figure(figsize=(10, 7))
plt.title("Dendrograms")
shc.dendrogram(shc.linkage(X, method='single'))
plt.show()
| 30.809112
| 79
| 0.557999
|
310ce5d9a9d12111712c1eab2707826ffcebfa99
| 1,343
|
py
|
Python
|
cogs/Triggers.py
|
jakebacker/Gompei-Bot
|
dba3b11874ca19c22f170b8a01928af2b3fb2bd8
|
[
"MIT"
] | 9
|
2020-02-14T02:07:39.000Z
|
2022-01-08T03:25:22.000Z
|
cogs/Triggers.py
|
jakebacker/Gompei-Bot
|
dba3b11874ca19c22f170b8a01928af2b3fb2bd8
|
[
"MIT"
] | 16
|
2020-02-14T02:10:29.000Z
|
2022-02-04T14:50:58.000Z
|
cogs/Triggers.py
|
jakebacker/Gompei-Bot
|
dba3b11874ca19c22f170b8a01928af2b3fb2bd8
|
[
"MIT"
] | 8
|
2020-02-21T14:28:07.000Z
|
2022-02-04T14:49:04.000Z
|
from discord.ext import commands
greetings = ["hello", "hi", "greetings", "howdy", "salutations", "hey", "oi", "dear", "yo ", "morning", "afternoon",
"evening", "sup", "G'day", "good day", "bonjour"]
gompei_references = ["gompei", "672453835863883787", "goat"]
love_references = ["gompeiHug", "love", "ily", "<3", "❤"]
hate_references = ["fuck you", "sucks", "fucker", "idiot", "shithead", "eat shit", "hate"]
violent_references = ["kill", "murder", "attack", "skin", "ambush", "stab"]
class Triggers(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot:
if any(x in message.content.lower() for x in gompei_references):
if any(x in message.content.lower() for x in love_references):
await message.add_reaction("❤")
elif any(x in message.content.lower() for x in hate_references):
await message.add_reaction("😢")
elif any(x in message.content.lower() for x in greetings):
await message.add_reaction("👋")
elif any(x in message.content.lower() for x in violent_references):
await message.add_reaction("😨")
def setup(bot):
bot.add_cog(Triggers(bot))
| 43.322581
| 116
| 0.595681
|
0e269c09850e065a8861143b9038cb665d0c8f1b
| 2,770
|
py
|
Python
|
src/inverse_text_normalization/gu/taggers/tokenize_and_classify.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 15
|
2021-07-30T18:18:47.000Z
|
2022-02-14T09:04:19.000Z
|
src/inverse_text_normalization/gu/taggers/tokenize_and_classify.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 1
|
2021-12-15T12:42:12.000Z
|
2022-02-15T05:33:00.000Z
|
src/inverse_text_normalization/gu/taggers/tokenize_and_classify.py
|
yashiagar1999/indict_punc
|
8697ac5a5245c7e0d35b0777b1dc6fb1b8d6d525
|
[
"MIT"
] | 4
|
2021-07-30T10:03:38.000Z
|
2021-12-01T14:46:54.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from inverse_text_normalization.lang_params import LANG
# lang_taggers = f'inverse_text_normalization.taggers.{LANG}_taggers'
lang_taggers = 'inverse_text_normalization.gu.taggers'
from inverse_text_normalization.gu.graph_utils import GraphFst
exec(f"from {lang_taggers}.cardinal import CardinalFst")
exec(f"from {lang_taggers}.date import DateFst")
exec(f"from {lang_taggers}.decimal import DecimalFst")
exec(f"from {lang_taggers}.measure import MeasureFst")
exec(f"from {lang_taggers}.money import MoneyFst")
exec(f"from {lang_taggers}.ordinal import OrdinalFst")
exec(f"from {lang_taggers}.time import TimeFst")
exec(f"from {lang_taggers}.whitelist import WhiteListFst")
exec(f"from {lang_taggers}.word import WordFst")
try:
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class ClassifyFst(GraphFst):
"""
Composes other classfier grammars. This class will be compiled and exported to thrax FAR.
"""
def __init__(self):
super().__init__(name="tokenize_and_classify", kind="classify")
cardinal_graph_fst = CardinalFst()
cardinal = cardinal_graph_fst.fst
ordinal_graph_fst = OrdinalFst(cardinal_graph_fst)
ordinal = ordinal_graph_fst.fst
decimal_graph_fst = DecimalFst(cardinal_graph_fst)
decimal = decimal_graph_fst.fst
measure = MeasureFst(cardinal_graph_fst, decimal_graph_fst).fst
date = DateFst(ordinal_graph_fst).fst
word = WordFst().fst
time = TimeFst().fst
money = MoneyFst(cardinal_graph_fst, decimal_graph_fst).fst
whitelist = WhiteListFst().fst
graph = (
pynutil.add_weight(whitelist, 1.01)
| pynutil.add_weight(time, 1.1)
| pynutil.add_weight(date, 1.09)
| pynutil.add_weight(decimal, 1.1)
| pynutil.add_weight(measure, 1.1)
| pynutil.add_weight(cardinal, 1.1)
| pynutil.add_weight(ordinal, 1.1)
| pynutil.add_weight(money, 1.1)
| pynutil.add_weight(word, 100)
)
self.fst = graph.optimize()
| 36.447368
| 94
| 0.711552
|
6c30804c7a529ba095bc5e76563eb929729a0f94
| 10,189
|
py
|
Python
|
pomodorr/projects/serializers.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | null | null | null |
pomodorr/projects/serializers.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | 15
|
2020-04-11T18:30:57.000Z
|
2020-07-05T09:37:43.000Z
|
pomodorr/projects/serializers.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from pomodorr.projects.exceptions import ProjectException, PriorityException, TaskException, SubTaskException
from pomodorr.projects.models import Project, Priority, Task, SubTask
from pomodorr.projects.selectors.priority_selector import get_priorities_for_user, get_all_priorities
from pomodorr.projects.selectors.project_selector import get_all_active_projects, get_active_projects_for_user
from pomodorr.projects.selectors.task_selector import get_all_non_removed_tasks, get_all_non_removed_tasks_for_user
from pomodorr.projects.services.project_service import is_project_name_available
from pomodorr.projects.services.sub_task_service import is_sub_task_name_available
from pomodorr.projects.services.task_service import (
complete_task, reactivate_task, pin_to_project, is_task_name_available
)
from pomodorr.tools.utils import has_changed
from pomodorr.tools.validators import duration_validator, today_validator
from pomodorr.users.selectors import get_active_standard_users
class PrioritySerializer(serializers.ModelSerializer):
priority_level = serializers.IntegerField(required=True, min_value=1)
user = serializers.PrimaryKeyRelatedField(write_only=True, default=serializers.CurrentUserDefault(),
queryset=get_active_standard_users())
class Meta:
model = Priority
fields = ('id', 'name', 'priority_level', 'color', 'user')
def validate(self, data):
self.check_priority_name_uniqueness(data=data)
return data
def check_priority_name_uniqueness(self, data):
user = self.context['request'].user
name = data.get('name') or None
if name is not None and get_priorities_for_user(user=user, name=name).exists():
raise serializers.ValidationError(
{'name': PriorityException.messages[PriorityException.priority_duplicated]},
code=PriorityException.priority_duplicated)
return data
class ProjectSerializer(ModelSerializer):
user = serializers.PrimaryKeyRelatedField(write_only=True, default=serializers.CurrentUserDefault(),
queryset=get_active_standard_users())
priority = serializers.PrimaryKeyRelatedField(required=False, allow_null=True,
queryset=get_all_priorities())
user_defined_ordering = serializers.IntegerField(min_value=1)
class Meta:
model = Project
fields = ('id', 'name', 'priority', 'user_defined_ordering', 'user')
def validate_priority(self, value):
user = self.context['request'].user
if not get_priorities_for_user(user=user).filter(id=value.id).exists():
raise serializers.ValidationError(ProjectException.messages[ProjectException.priority_does_not_exist],
code=ProjectException.priority_does_not_exist)
return value
def validate(self, data):
# Temporary solution for https://github.com/encode/django-rest-framework/issues/7100
self.check_project_name_uniqueness(data=data)
return data
def check_project_name_uniqueness(self, data):
user = self.context['request'].user
name = data.get('name') or None
if user is not None and name is not None and not is_project_name_available(
user=user, name=name, excluded=self.instance):
raise serializers.ValidationError(
{'name': ProjectException.messages[ProjectException.project_duplicated]},
code=ProjectException.project_duplicated)
def to_representation(self, instance):
data = super(ProjectSerializer, self).to_representation(instance=instance)
data['priority'] = PrioritySerializer(instance=instance.priority).data
return data
class SubTaskSerializer(serializers.ModelSerializer):
task = serializers.PrimaryKeyRelatedField(
required=True,
queryset=get_all_non_removed_tasks()
)
class Meta:
model = SubTask
fields = ('id', 'name', 'task', 'is_completed')
def validate_task(self, value):
user = self.context['request'].user
if value and not get_all_non_removed_tasks_for_user(user=user, id=value.id).exists():
raise serializers.ValidationError(SubTaskException.messages[SubTaskException.task_does_not_exist],
code=SubTaskException.task_does_not_exist)
if value and value.status == Task.status_completed:
raise serializers.ValidationError(SubTaskException.messages[SubTaskException.task_already_completed],
code=SubTaskException.task_already_completed)
if self.instance and has_changed(self.instance, 'task', value):
raise serializers.ValidationError(SubTaskException.messages[SubTaskException.cannot_change_task],
code=SubTaskException.cannot_change_task)
return value
def validate(self, data):
self.check_sub_task_name_uniqueness(data=data)
return data
def check_sub_task_name_uniqueness(self, data):
name = data.get('name') or None
task = data.get('task') or None
user = self.context['request'].user
if name is not None and task is not None and user is not None and \
not is_sub_task_name_available(task=task, name=name, excluded=self.instance):
raise serializers.ValidationError(
{'name': [SubTaskException.messages[SubTaskException.sub_task_duplicated]]},
code=SubTaskException.sub_task_duplicated)
class TaskSerializer(serializers.ModelSerializer):
project = serializers.PrimaryKeyRelatedField(
required=True,
queryset=get_all_active_projects()
)
priority = serializers.PrimaryKeyRelatedField(
required=False, allow_empty=True, allow_null=True,
queryset=get_all_priorities()
)
user_defined_ordering = serializers.IntegerField(min_value=1)
pomodoro_length = serializers.DurationField(required=False, allow_null=True, min_value=timedelta(minutes=5),
max_value=timedelta(hours=6))
break_length = serializers.DurationField(required=False, allow_null=True, min_value=timedelta(minutes=5),
max_value=timedelta(hours=6))
repeat_duration = serializers.DurationField(required=False, allow_null=True, min_value=timedelta(days=1),
validators=[duration_validator])
due_date = serializers.DateTimeField(required=False, allow_null=True, validators=[today_validator])
sub_tasks = SubTaskSerializer(many=True, read_only=True)
class Meta:
model = Task
fields = (
'id', 'name', 'status', 'project', 'priority', 'user_defined_ordering', 'pomodoro_number',
'pomodoro_length', 'break_length', 'due_date', 'reminder_date', 'repeat_duration', 'note', 'sub_tasks')
def validate_project(self, value):
user = self.context['request'].user
if not get_active_projects_for_user(user=user, id=value.id).exists():
raise serializers.ValidationError(TaskException.messages[TaskException.project_does_not_exist],
code=TaskException.project_does_not_exist)
return value
def validate_priority(self, value):
user = self.context['request'].user
if value and not get_priorities_for_user(user=user).filter(id=value.id).exists():
raise serializers.ValidationError(TaskException.messages[TaskException.priority_does_not_exist],
code=TaskException.priority_does_not_exist)
return value
def validate_status(self, value):
if not self.instance and value and value == self.Meta.model.status_completed:
raise serializers.ValidationError(TaskException.messages[TaskException.wrong_status],
code=TaskException.wrong_status)
return value
def update(self, instance, validated_data):
status = validated_data.pop('status') if 'status' in validated_data else None
project = validated_data.pop('project') if 'project' in validated_data else None
if status is not None:
if has_changed(instance, 'status', status, self.Meta.model.status_completed):
instance = complete_task(task=self.instance, db_save=False)
elif has_changed(instance, 'status', status, self.Meta.model.status_active):
instance = reactivate_task(task=self.instance, db_save=False)
if project is not None and has_changed(instance, 'project', project):
instance = pin_to_project(task=instance, project=project, db_save=False)
return super(TaskSerializer, self).update(instance, validated_data)
def validate(self, data):
# Temporary solution for https://github.com/encode/django-rest-framework/issues/7100
self.check_task_name_uniqueness(data=data)
return data
def check_task_name_uniqueness(self, data):
name = data.get('name') or None
project = data.get('project') or None
if name is not None and project is not None and not is_task_name_available(
project=project, name=name, excluded=self.instance):
raise serializers.ValidationError({'name': TaskException.messages[TaskException.task_duplicated]},
code=TaskException.task_duplicated)
def to_representation(self, instance):
data = super(TaskSerializer, self).to_representation(instance=instance)
data['status'] = instance.get_status_display()
data['priority'] = PrioritySerializer(instance=instance.priority).data
data['project'] = ProjectSerializer(instance=instance.project).data
return data
| 48.519048
| 115
| 0.688978
|
e3cf1eeedea8999277c2956bf64ddf377947ebdc
| 501
|
py
|
Python
|
find_lane_in_mv.py
|
ishota/CarND-LaneLines-P1
|
7780ec931d696efb9e5c41d5d91aa8723b7cb2a0
|
[
"MIT"
] | null | null | null |
find_lane_in_mv.py
|
ishota/CarND-LaneLines-P1
|
7780ec931d696efb9e5c41d5d91aa8723b7cb2a0
|
[
"MIT"
] | null | null | null |
find_lane_in_mv.py
|
ishota/CarND-LaneLines-P1
|
7780ec931d696efb9e5c41d5d91aa8723b7cb2a0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import utl
from consts import *
def main():
# divide videos into img
image_list = utl.get_frame_list('test_videos/' + VID_NAME + '.mp4')
# detect lane line
line_color = [0, 0, 255]
for n, image in enumerate(image_list):
image_list[n] = utl.find_lane_line(image, line_color, is_improved=IS_IMPROVED)
# create video from img
utl.convert_frame_to_video(image_list, VID_NAME, 'test_videos_output/')
if __name__ == '__main__':
main()
| 21.782609
| 86
| 0.668663
|
7890429cfa3e7afb401842c3ff15265d5f03d1d4
| 10,222
|
py
|
Python
|
docs/source/examples/deepmind/dm_manipulation_stack_sac.py
|
Toni-SM/skrl
|
15b429d89e3b8a1828b207d88463bf7090288d18
|
[
"MIT"
] | 43
|
2021-12-19T07:47:43.000Z
|
2022-03-31T05:24:42.000Z
|
docs/source/examples/deepmind/dm_manipulation_stack_sac.py
|
Toni-SM/skrl
|
15b429d89e3b8a1828b207d88463bf7090288d18
|
[
"MIT"
] | 5
|
2022-01-05T07:54:13.000Z
|
2022-03-08T21:00:39.000Z
|
docs/source/examples/deepmind/dm_manipulation_stack_sac.py
|
Toni-SM/skrl
|
15b429d89e3b8a1828b207d88463bf7090288d18
|
[
"MIT"
] | 1
|
2022-01-31T17:53:52.000Z
|
2022-01-31T17:53:52.000Z
|
from dm_control import manipulation
import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import GaussianModel, DeterministicModel
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
# Define the models (stochastic and deterministic models) for the SAC agent using the helper classes
# - StochasticActor (policy): takes as input the environment's observation/state and returns an action
# - Critic: takes the state and action as input and provides a value to guide the policy
class StochasticActor(GaussianModel):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
super().__init__(observation_space, action_space, device, clip_actions,
clip_log_std, min_log_std, max_log_std)
self.features_extractor = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=3),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=2, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(7744, 512),
nn.ReLU(),
nn.Linear(512, 8),
nn.Tanh())
self.net = nn.Sequential(nn.Linear(26, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, states, taken_actions):
# The dm_control.manipulation tasks have as observation/state spec a `collections.OrderedDict` object as follows:
# OrderedDict([('front_close', BoundedArray(shape=(1, 84, 84, 3), dtype=dtype('uint8'), name='front_close', minimum=0, maximum=255)),
# ('jaco_arm/joints_pos', Array(shape=(1, 6, 2), dtype=dtype('float64'), name='jaco_arm/joints_pos')),
# ('jaco_arm/joints_torque', Array(shape=(1, 6), dtype=dtype('float64'), name='jaco_arm/joints_torque')),
# ('jaco_arm/joints_vel', Array(shape=(1, 6), dtype=dtype('float64'), name='jaco_arm/joints_vel')),
# ('jaco_arm/jaco_hand/joints_pos', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/joints_pos')),
# ('jaco_arm/jaco_hand/joints_vel', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/joints_vel')),
# ('jaco_arm/jaco_hand/pinch_site_pos', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/pinch_site_pos')),
# ('jaco_arm/jaco_hand/pinch_site_rmat', Array(shape=(1, 9), dtype=dtype('float64'), name='jaco_arm/jaco_hand/pinch_site_rmat'))])
# This spec is converted to a `gym.spaces.Dict` space by the `wrap_env` function as follows:
# Dict(front_close: Box(0, 255, (1, 84, 84, 3), uint8),
# jaco_arm/jaco_hand/joints_pos: Box(-inf, inf, (1, 3), float64),
# jaco_arm/jaco_hand/joints_vel: Box(-inf, inf, (1, 3), float64),
# jaco_arm/jaco_hand/pinch_site_pos: Box(-inf, inf, (1, 3), float64),
# jaco_arm/jaco_hand/pinch_site_rmat: Box(-inf, inf, (1, 9), float64),
# jaco_arm/joints_pos: Box(-inf, inf, (1, 6, 2), float64),
# jaco_arm/joints_torque: Box(-inf, inf, (1, 6), float64),
# jaco_arm/joints_vel: Box(-inf, inf, (1, 6), float64))
# The `spaces` parameter is a flat tensor of the flattened observation/state space with shape (batch_size, size_of_flat_space).
# Using the model's method `tensor_to_space` we can convert the flattened tensor to the original space.
# https://skrl.readthedocs.io/en/latest/modules/skrl.models.base_class.html#skrl.models.torch.base.Model.tensor_to_space
input = self.tensor_to_space(states, self.observation_space)
# For this case, the `input` variable is a Python dictionary with the following structure and shapes:
# {'front_close': torch.Tensor(shape=[batch_size, 1, 84, 84, 3], dtype=torch.float32),
# 'jaco_arm/jaco_hand/joints_pos': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32)
# 'jaco_arm/jaco_hand/joints_vel': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32)
# 'jaco_arm/jaco_hand/pinch_site_pos': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32)
# 'jaco_arm/jaco_hand/pinch_site_rmat': torch.Tensor(shape=[batch_size, 1, 9], dtype=torch.float32)
# 'jaco_arm/joints_pos': torch.Tensor(shape=[batch_size, 1, 6, 2], dtype=torch.float32)
# 'jaco_arm/joints_torque': torch.Tensor(shape=[batch_size, 1, 6], dtype=torch.float32)
# 'jaco_arm/joints_vel': torch.Tensor(shape=[batch_size, 1, 6], dtype=torch.float32)}
# permute and normalize the images (samples, width, height, channels) -> (samples, channels, width, height)
features = self.features_extractor(input['front_close'][:,0].permute(0, 3, 1, 2) / 255.0)
return torch.tanh(self.net(torch.cat([features,
input["jaco_arm/joints_pos"].view(states.shape[0], -1),
input["jaco_arm/joints_vel"].view(states.shape[0], -1)], dim=-1))), self.log_std_parameter
class Critic(DeterministicModel):
def __init__(self, observation_space, action_space, device, clip_actions = False):
super().__init__(observation_space, action_space, device, clip_actions)
self.features_extractor = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=3),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=2, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(7744, 512),
nn.ReLU(),
nn.Linear(512, 8),
nn.Tanh())
self.net = nn.Sequential(nn.Linear(26 + self.num_actions, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, 1))
def compute(self, states, taken_actions):
# map the observations/states to the original space.
# See the explanation above (StochasticActor.compute)
input = self.tensor_to_space(states, self.observation_space)
# permute and normalize the images (samples, width, height, channels) -> (samples, channels, width, height)
features = self.features_extractor(input['front_close'][:,0].permute(0, 3, 1, 2) / 255.0)
return self.net(torch.cat([features,
input["jaco_arm/joints_pos"].view(states.shape[0], -1),
input["jaco_arm/joints_vel"].view(states.shape[0], -1),
taken_actions], dim=-1))
# Load and wrap the DeepMind environment
env = manipulation.load("reach_site_vision")
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory (without replacement) as experience replay memory
memory = RandomMemory(memory_size=50000, num_envs=env.num_envs, device=device, replacement=False)
# Instantiate the agent's models (function approximators).
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models
models_sac = {"policy": StochasticActor(env.observation_space, env.action_space, device, clip_actions=True),
"critic_1": Critic(env.observation_space, env.action_space, device),
"critic_2": Critic(env.observation_space, env.action_space, device),
"target_critic_1": Critic(env.observation_space, env.action_space, device),
"target_critic_2": Critic(env.observation_space, env.action_space, device)}
# Initialize the models' parameters (weights and biases) using a Gaussian distribution
for model in models_sac.values():
model.init_parameters(method_name="normal_", mean=0.0, std=0.1)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters
cfg_sac = SAC_DEFAULT_CONFIG.copy()
cfg_sac["gradient_steps"] = 1
cfg_sac["batch_size"] = 256
cfg_sac["random_timesteps"] = 0
cfg_sac["learning_starts"] = 10000
cfg_sac["learn_entropy"] = True
# logging to TensorBoard and write checkpoints each 1000 and 5000 timesteps respectively
cfg_sac["experiment"]["write_interval"] = 1000
cfg_sac["experiment"]["checkpoint_interval"] = 5000
agent_sac = SAC(models=models_sac,
memory=memory,
cfg=cfg_sac,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 100000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent_sac)
# start training
trainer.train()
| 58.411429
| 151
| 0.596165
|
b4057efcbb0da71a9b4115f58108b5a21c0cb694
| 12,282
|
py
|
Python
|
sympy/printing/mathml.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/mathml.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/mathml.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | null | null | null |
"""
A MathML printer.
"""
from sympy import Basic, sympify, S
from sympy.simplify import fraction
from printer import Printer
from conventions import split_super_sub
class MathMLPrinter(Printer):
"""Prints an expression to the MathML markup language
Whenever possible tries to use Content markup and not Presentation markup.
References: http://www.w3.org/TR/MathML2/
"""
printmethod = "_mathml"
_default_settings = {
"order": None,
"encoding": "utf-8"
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
from xml.dom.minidom import Document
self.dom = Document()
def doprint(self, expr):
mathML = Printer._print(self, expr)
return mathML.toxml(encoding=self._settings['encoding'])
def mathml_tag(self, e):
"""Returns the MathML tag for an expression."""
translate = {
'Add': 'plus',
'Mul': 'times',
'Derivative': 'diff',
'Number': 'cn',
'int': 'cn',
'Pow': 'power',
'Symbol': 'ci',
'Integral': 'int',
'sin': 'sin',
'cos': 'cos',
'tan': 'tan',
'cot': 'cot',
'asin': 'arcsin',
'asinh': 'arcsinh',
'acos': 'arccos',
'acosh': 'arccosh',
'atan': 'arctan',
'atanh': 'arctanh',
'acot': 'arccot',
'atan2': 'arctan',
'log': 'ln'
}
for cls in e.__class__.__mro__:
n = cls.__name__
if n in translate:
return translate[n]
# Not found in the MRO set
n = e.__class__.__name__
return n.lower()
def _print_Mul(self, expr):
coeff, terms = expr.as_coeff_terms()
if coeff.is_negative:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self._print_Mul(-expr))
return x
numer, denom = fraction(expr)
if not denom is S.One:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
x.appendChild(self._print(numer))
x.appendChild(self._print(denom))
return x
if coeff == 1 and len(terms) == 1:
return self._print(terms[0])
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('times'))
if(coeff != 1):
x.appendChild(self._print(coeff))
for term in terms:
x.appendChild(self._print(term))
return x
# This is complicated because we attempt to order then results in order of Basic._compare_pretty
# and use minus instead of negative
def _print_Add(self, e):
args = list(e.args)
args.sort(Basic._compare_pretty)
lastProcessed = self._print(args[0])
args.pop(0)
plusNodes = list()
for i in range(0,len(args)):
arg = args[i]
coeff, terms = arg.as_coeff_terms()
if(coeff.is_negative):
#use minus
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(lastProcessed)
x.appendChild(self._print(-arg))
#invert expression since this is now minused
lastProcessed = x;
if(arg == args[-1]):
plusNodes.append(lastProcessed)
else:
plusNodes.append(lastProcessed)
lastProcessed = self._print(arg)
if(arg == args[-1]):
plusNodes.append(self._print(arg))
if len(plusNodes) == 1:
return lastProcessed
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('plus'))
while len(plusNodes) > 0:
x.appendChild(plusNodes.pop(0))
return x
def _print_Matrix(self, m):
x = self.dom.createElement('matrix')
for i in range(m.lines):
x_r = self.dom.createElement('matrixrow')
for j in range(m.cols):
x_r.appendChild(self._print(m[i,j]))
x.appendChild(x_r)
return x
def _print_Rational(self, e):
if e.q == 1:
#don't divide
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(str(e.p)))
return x
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('divide'))
#numerator
xnum = self.dom.createElement('cn')
xnum.appendChild(self.dom.createTextNode(str(e.p)))
#denomenator
xdenom = self.dom.createElement('cn')
xdenom.appendChild(self.dom.createTextNode(str(e.q)))
x.appendChild(xnum)
x.appendChild(xdenom)
return x
def _print_Limit(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x_1 = self.dom.createElement('bvar')
x_2 = self.dom.createElement('lowlimit')
x_1.appendChild(self._print(e.args[1]))
x_2.appendChild(self._print(e.args[2]))
x.appendChild(x_1)
x.appendChild(x_2)
x.appendChild(self._print(e.args[0]))
return x
def _print_ImaginaryUnit(self,e):
return self.dom.createElement('imaginaryi')
def _print_EulerGamma(self,e):
return self.dom.createElement('eulergamma')
def _print_GoldenRatio(self,e):
"""We use unicode #x3c6 for Greek letter phi as defined here
http://www.w3.org/Math/characters/"""
x = self.dom.createElement('cn')
x.appendChild(self.dom.createTextNode(u"\u03c6"))
return x
def _print_Exp1(self,e):
return self.dom.createElement('exponentiale')
def _print_Pi(self, e):
return self.dom.createElement('pi')
def _print_Infinity(self, e):
return self.dom.createElement('infinity')
def _print_Negative_Infinity(self,e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('minus'))
x.appendChild(self.dom.createElement('infinity'))
return x
def _print_Integral(self, e):
def lime_recur(limits):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
bvar_elem = self.dom.createElement('bvar')
bvar_elem.appendChild(self._print(limits[0][0]))
x.appendChild(bvar_elem)
if limits[0][1]:
low_elem = self.dom.createElement('lowlimit')
low_elem.appendChild(self._print(limits[0][1][0]))
x.appendChild(low_elem)
up_elem = self.dom.createElement('uplimit')
up_elem.appendChild(self._print(limits[0][1][1]))
x.appendChild(up_elem)
if len(limits) == 1:
x.appendChild(self._print(e.function))
else:
x.appendChild(lime_recur(limits[1:]))
return x
limits = list(e.limits)
limits.reverse()
return lime_recur(limits)
def _print_Symbol(self, sym):
ci = self.dom.createElement(self.mathml_tag(sym))
def join(items):
if len(items) > 1:
mrow = self.dom.createElement('mml:mrow')
for i, item in enumerate(items):
if i>0:
mo = self.dom.createElement('mml:mo')
mo.appendChild(self.dom.createTextNode(" "))
mrow.appendChild(mo)
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(item))
mrow.appendChild(mi)
return mrow
else:
mi = self.dom.createElement('mml:mi')
mi.appendChild(self.dom.createTextNode(items[0]))
return mi
name, supers, subs = split_super_sub(sym.name)
mname = self.dom.createElement('mml:mi')
mname.appendChild(self.dom.createTextNode(name))
if len(supers) == 0:
if len(subs) == 0:
ci.appendChild(self.dom.createTextNode(name))
else:
msub = self.dom.createElement('mml:msub')
msub.appendChild(mname)
msub.appendChild(join(subs))
ci.appendChild(msub)
else:
if len(subs) == 0:
msup = self.dom.createElement('mml:msup')
msup.appendChild(mname)
msup.appendChild(join(supers))
ci.appendChild(msup)
else:
msubsup = self.dom.createElement('mml:msubsup')
msubsup.appendChild(mname)
msubsup.appendChild(join(subs))
msubsup.appendChild(join(supers))
ci.appendChild(msubsup)
return ci
def _print_Pow(self, e):
#Here we use root instead of power if the exponent is the reciprocal of an integer
if e.exp.is_Rational and e.exp.p == 1:
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement('root'))
if e.exp.q != 2:
xmldeg = self.dom.createElement('degree')
xmlci = self.dom.createElement('ci')
xmlci.appendChild(self.dom.createTextNode(str(e.exp.q)))
xmldeg.appendChild(xmlci)
x.appendChild(xmldeg)
x.appendChild(self._print(e.base))
return x
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
x.appendChild(self._print(e.base))
x.appendChild(self._print(e.exp))
return x
def _print_Number(self, e):
x = self.dom.createElement(self.mathml_tag(e))
x.appendChild(self.dom.createTextNode(str(e)))
return x
def _print_Derivative(self, e):
x = self.dom.createElement('apply')
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
x_1 = self.dom.createElement('bvar')
for sym in e.symbols:
x_1.appendChild(self._print(sym))
x.appendChild(x_1)
x.appendChild(self._print(e.expr))
return x
def _print_Function(self, e):
x = self.dom.createElement("apply")
x.appendChild(self.dom.createElement(self.mathml_tag(e)))
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_Basic(self, e):
x = self.dom.createElement(self.mathml_tag(e))
for arg in e:
x.appendChild(self._print(arg))
return x
def _print_AssocOp(self, e):
x = self.dom.createElement('apply')
x_1 = self.dom.createElement(self.mathml_tag(e))
x.appendChild(x_1)
for arg in e.args:
x.appendChild(self._print(arg))
return x
def _print_list(self, seq):
"""MathML reference for the <list> element:
http://www.w3.org/TR/MathML2/chapter4.html#contm.list"""
dom_element = self.dom.createElement('list')
for item in seq:
dom_element.appendChild(self._print(item))
return dom_element
def _print_int(self, p):
dom_element = self.dom.createElement(self.mathml_tag(p))
dom_element.appendChild(self.dom.createTextNode(str(p)))
return dom_element
def mathml(expr, **settings):
"""Returns the MathML representation of expr"""
return MathMLPrinter(settings).doprint(expr)
def print_mathml(expr, **settings):
"""
Prints a pretty representation of the MathML code for expr
>>> ##
>>> from sympy.printing.mathml import print_mathml
>>> from sympy.abc import x
>>> print_mathml(x+1) #doctest: +NORMALIZE_WHITESPACE
<apply>
<plus/>
<cn>
1
</cn>
<ci>
x
</ci>
</apply>
"""
s = MathMLPrinter(settings)
print s._print(sympify(expr)).toprettyxml(encoding="utf-8")
| 33.649315
| 100
| 0.561146
|
70379b5a0b6c1727957fc99098c0dca8026184d9
| 162
|
py
|
Python
|
PyStacks/test/test_verification.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 11
|
2018-02-15T04:27:05.000Z
|
2020-10-02T11:20:08.000Z
|
PyStacks/test/test_verification.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 3
|
2018-02-15T05:46:54.000Z
|
2018-03-05T04:46:51.000Z
|
PyStacks/test/test_verification.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 8
|
2018-03-05T04:40:41.000Z
|
2021-02-22T08:07:58.000Z
|
from mock import MagicMock
import PyStacks.PyStacks.verification as verification
class TestVerification:
def test_ensure_http_success(self):
pass
| 16.2
| 53
| 0.783951
|
59cbd347b42bbfb41e77a8ee380f937cc09abc48
| 52,489
|
py
|
Python
|
pennylane/devices/tests/test_measurements.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 539
|
2018-11-13T08:45:42.000Z
|
2020-07-27T18:17:16.000Z
|
pennylane/devices/tests/test_measurements.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 588
|
2018-11-14T10:21:47.000Z
|
2020-07-28T06:27:14.000Z
|
pennylane/devices/tests/test_measurements.py
|
MoritzWillmann/pennylane
|
2b07d22cfcc6406ba28e5c647062340b240a4ee5
|
[
"Apache-2.0"
] | 165
|
2018-11-13T18:58:56.000Z
|
2020-07-27T17:18:17.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the different measurement types work correctly on a device."""
# pylint: disable=no-self-use,pointless-statement, no-member
import pytest
from flaky import flaky
from scipy.sparse import csr_matrix
import pennylane as qml
from pennylane import numpy as np
pytestmark = pytest.mark.skip_unsupported
# ==========================================================
# Some useful global variables
# observables for which device support is tested
obs = {
"Identity": qml.Identity(wires=[0]),
"Hadamard": qml.Hadamard(wires=[0]),
"Hermitian": qml.Hermitian(np.eye(2), wires=[0]),
"PauliX": qml.PauliX(wires=[0]),
"PauliY": qml.PauliY(wires=[0]),
"PauliZ": qml.PauliZ(wires=[0]),
"Projector": qml.Projector(np.array([1]), wires=[0]),
"SparseHamiltonian": qml.SparseHamiltonian(csr_matrix(np.eye(8)), wires=[0, 1, 2]),
"Hamiltonian": qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliX(0)]),
}
all_obs = obs.keys()
# All qubit observables should be available to test in the device test suite
all_available_obs = qml.ops._qubit__obs__.copy() # pylint: disable=protected-access
# Note that the identity is not technically a qubit observable
all_available_obs |= {"Identity"}
if not set(all_obs) == all_available_obs:
raise ValueError(
"A qubit observable has been added that is not being tested in the "
"device test suite. Please add to the obs dictionary in "
"pennylane/devices/tests/test_measurements.py"
)
# single qubit Hermitian observable
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
obs_lst = [
qml.PauliX(wires=0) @ qml.PauliY(wires=1),
qml.PauliX(wires=1) @ qml.PauliY(wires=0),
qml.PauliX(wires=1) @ qml.PauliZ(wires=2),
qml.PauliX(wires=2) @ qml.PauliZ(wires=1),
qml.Identity(wires=0) @ qml.Identity(wires=1) @ qml.PauliZ(wires=2),
qml.PauliZ(wires=0) @ qml.PauliX(wires=1) @ qml.PauliY(wires=2),
]
obs_permuted_lst = [
qml.PauliY(wires=1) @ qml.PauliX(wires=0),
qml.PauliY(wires=0) @ qml.PauliX(wires=1),
qml.PauliZ(wires=2) @ qml.PauliX(wires=1),
qml.PauliZ(wires=1) @ qml.PauliX(wires=2),
qml.PauliZ(wires=2) @ qml.Identity(wires=0) @ qml.Identity(wires=1),
qml.PauliX(wires=1) @ qml.PauliY(wires=2) @ qml.PauliZ(wires=0),
]
label_maps = [[0, 1, 2], ["a", "b", "c"], ["beta", "alpha", "gamma"], [3, "beta", "a"]]
def sub_routine(label_map):
"""Quantum function to initalize state in tests"""
qml.Hadamard(wires=label_map[0])
qml.RX(0.12, wires=label_map[1])
qml.RY(3.45, wires=label_map[2])
class TestSupportedObservables:
"""Test that the device can implement all observables that it supports."""
@pytest.mark.parametrize("observable", all_obs)
def test_supported_observables_can_be_implemented(self, device_kwargs, observable):
"""Test that the device can implement all its supported observables."""
device_kwargs["wires"] = 3
dev = qml.device(**device_kwargs)
if device_kwargs.get("shots", None) is not None and observable == "SparseHamiltonian":
pytest.skip("SparseHamiltonian only supported in analytic mode")
assert hasattr(dev, "observables")
if observable in dev.observables:
kwargs = {"diff_method": "parameter-shift"} if observable == "SparseHamiltonian" else {}
@qml.qnode(dev, **kwargs)
def circuit():
if dev.supports_operation(qml.PauliX): # ionq can't have empty circuits
qml.PauliX(0)
return qml.expval(obs[observable])
assert isinstance(circuit(), (float, np.ndarray))
def test_tensor_observables_can_be_implemented(self, device_kwargs):
"""Test that the device can implement a simple tensor observable.
This test is skipped for devices that do not support tensor observables."""
device_kwargs["wires"] = 2
dev = qml.device(**device_kwargs)
supports_tensor = (
"supports_tensor_observables" in dev.capabilities()
and dev.capabilities()["supports_tensor_observables"]
)
if not supports_tensor:
pytest.skip("Device does not support tensor observables.")
@qml.qnode(dev)
def circuit():
if dev.supports_operation(qml.PauliX): # ionq can't have empty circuits
qml.PauliX(0)
return qml.expval(qml.Identity(wires=0) @ qml.Identity(wires=1))
assert isinstance(circuit(), (float, np.ndarray))
# pylint: disable=too-few-public-methods
@flaky(max_runs=10)
class TestHamiltonianSupport:
"""Separate test to ensure that the device can differentiate Hamiltonian observables."""
def test_hamiltonian_diff(self, device_kwargs, tol):
"""Tests a simple VQE gradient using parameter-shift rules."""
device_kwargs["wires"] = 1
dev = qml.device(**device_kwargs)
coeffs = np.array([-0.05, 0.17])
param = np.array(1.7, requires_grad=True)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(coeffs, param):
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(
qml.Hamiltonian(
coeffs,
[qml.PauliX(0), qml.PauliZ(0)],
)
)
grad_fn = qml.grad(circuit)
grad = grad_fn(coeffs, param)
def circuit1(param):
"""First Pauli subcircuit"""
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliX(0))
def circuit2(param):
"""Second Pauli subcircuit"""
qml.RX(param, wires=0)
qml.RY(param, wires=0)
return qml.expval(qml.PauliZ(0))
half1 = qml.QNode(circuit1, dev, diff_method="parameter-shift")
half2 = qml.QNode(circuit2, dev, diff_method="parameter-shift")
def combine(coeffs, param):
return coeffs[0] * half1(param) + coeffs[1] * half2(param)
grad_fn_expected = qml.grad(combine)
grad_expected = grad_fn_expected(coeffs, param)
assert np.allclose(grad[0], grad_expected[0], atol=tol(dev.shots))
assert np.allclose(grad[1], grad_expected[1], atol=tol(dev.shots))
@flaky(max_runs=10)
class TestExpval:
"""Test expectation values"""
def test_identity_expectation(self, device, tol):
"""Test that identity expectation value (i.e. the trace) is 1."""
n_wires = 2
dev = device(n_wires)
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Identity(wires=0)), qml.expval(qml.Identity(wires=1))
res = circuit()
assert np.allclose(res, np.array([1, 1]), atol=tol(dev.shots))
def test_pauliz_expectation(self, device, tol):
"""Test that PauliZ expectation value is correct"""
n_wires = 2
dev = device(n_wires)
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(wires=0)), qml.expval(qml.PauliZ(wires=1))
res = circuit()
assert np.allclose(
res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), atol=tol(dev.shots)
)
def test_paulix_expectation(self, device, tol):
"""Test that PauliX expectation value is correct"""
n_wires = 2
dev = device(n_wires)
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RY(theta, wires=[0])
qml.RY(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliX(wires=0)), qml.expval(qml.PauliX(wires=1))
res = circuit()
expected = np.array([np.sin(theta) * np.sin(phi), np.sin(phi)])
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_pauliy_expectation(self, device, tol):
"""Test that PauliY expectation value is correct"""
n_wires = 2
dev = device(n_wires)
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliY(wires=0)), qml.expval(qml.PauliY(wires=1))
res = circuit()
expected = np.array([0.0, -np.cos(theta) * np.sin(phi)])
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_hadamard_expectation(self, device, tol):
"""Test that Hadamard expectation value is correct"""
n_wires = 2
dev = device(n_wires)
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RY(theta, wires=[0])
qml.RY(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Hadamard(wires=0)), qml.expval(qml.Hadamard(wires=1))
res = circuit()
expected = np.array(
[np.sin(theta) * np.sin(phi) + np.cos(theta), np.cos(theta) * np.cos(phi) + np.sin(phi)]
) / np.sqrt(2)
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_hermitian_expectation(self, device, tol):
"""Test that arbitrary Hermitian expectation values are correct"""
n_wires = 2
dev = device(n_wires)
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
theta = 0.432
phi = 0.123
@qml.qnode(dev)
def circuit():
qml.RY(theta, wires=[0])
qml.RY(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Hermitian(A, wires=0)), qml.expval(qml.Hermitian(A, wires=1))
res = circuit()
a = A[0, 0]
re_b = A[0, 1].real
d = A[1, 1]
ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2
ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2
expected = np.array([ev1, ev2])
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_projector_expectation(self, device, tol):
"""Test that arbitrary Projector expectation values are correct"""
n_wires = 2
dev = device(n_wires)
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
theta = 0.732
phi = 0.523
@qml.qnode(dev)
def circuit(basis_state):
qml.RY(theta, wires=[0])
qml.RY(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Projector(basis_state, wires=[0, 1]))
res = circuit([0, 0])
expected = (np.cos(phi / 2) * np.cos(theta / 2)) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([0, 1])
expected = (np.sin(phi / 2) * np.cos(theta / 2)) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 0])
expected = (np.sin(phi / 2) * np.sin(theta / 2)) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 1])
expected = (np.cos(phi / 2) * np.sin(theta / 2)) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_multi_mode_hermitian_expectation(self, device, tol):
"""Test that arbitrary multi-mode Hermitian expectation values are correct"""
n_wires = 2
dev = device(n_wires)
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
theta = 0.432
phi = 0.123
A_ = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
@qml.qnode(dev)
def circuit():
qml.RY(theta, wires=[0])
qml.RY(phi, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.Hermitian(A_, wires=[0, 1]))
res = circuit()
# below is the analytic expectation value for this circuit with arbitrary
# Hermitian observable A
expected = 0.5 * (
6 * np.cos(theta) * np.sin(phi)
- np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)
- 2 * np.sin(phi)
- 6 * np.cos(phi)
- 6
)
assert np.allclose(res, expected, atol=tol(dev.shots))
@flaky(max_runs=10)
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, device, tol, skip_if):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.expval(qml.PauliX(wires=0) @ qml.PauliY(wires=2))
res = circuit()
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_pauliz_hadamard(self, device, tol, skip_if):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.expval(qml.PauliZ(wires=0) @ qml.Hadamard(wires=1) @ qml.PauliY(wires=2))
res = circuit()
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(res, expected, atol=tol(dev.shots))
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
"base_obs, permuted_obs",
list(zip(obs_lst, obs_permuted_lst)),
)
def test_wire_order_in_tensor_prod_observables(
self, device, base_obs, permuted_obs, tol, skip_if
):
"""Test that when given a tensor observable the expectation value is the same regardless of the order of terms
in the tensor observable, provided the wires each term acts on remain constant.
eg:
ob1 = qml.PauliZ(wires=0) @ qml.PauliY(wires=1)
ob2 = qml.PauliY(wires=1) @ qml.PauliZ(wires=0)
@qml.qnode(dev)
def circ(obs):
return qml.expval(obs)
circ(ob1) == circ(ob2)
"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
@qml.qnode(dev)
def circ(ob):
sub_routine(label_map=range(3))
return qml.expval(ob)
assert np.allclose(circ(base_obs), circ(permuted_obs), atol=tol(dev.shots), rtol=0)
@pytest.mark.parametrize("label_map", label_maps)
def test_wire_label_in_tensor_prod_observables(self, device, label_map, tol, skip_if):
"""Test that when given a tensor observable the expectation value is the same regardless of how the
wires are labelled, as long as they match the device order.
For example:
dev1 = qml.device("default.qubit", wires=[0, 1, 2])
dev2 = qml.device("default.qubit", wires=['c', 'b', 'a']
def circ(wire_labels):
return qml.expval(qml.PauliZ(wires=wire_labels[0]) @ qml.PauliX(wires=wire_labels[2]))
c1, c2 = qml.QNode(circ, dev1), qml.QNode(circ, dev2)
c1([0, 1, 2]) == c2(['c', 'b', 'a'])
"""
dev = device(wires=3)
dev_custom_labels = device(wires=label_map)
skip_if(dev, {"supports_tensor_observables": False})
def circ(wire_labels):
sub_routine(wire_labels)
return qml.expval(
qml.PauliX(wire_labels[0]) @ qml.PauliY(wire_labels[1]) @ qml.PauliZ(wire_labels[2])
)
circ_base_label = qml.QNode(circ, device=dev)
circ_custom_label = qml.QNode(circ, device=dev_custom_labels)
assert np.allclose(
circ_base_label(wire_labels=range(3)),
circ_custom_label(wire_labels=label_map),
atol=tol(dev.shots),
rtol=0,
)
def test_hermitian(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Hermitian works correctly"""
n_wires = 3
dev = device(n_wires)
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
A_ = np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.expval(qml.PauliZ(wires=0) @ qml.Hermitian(A_, wires=[1, 2]))
res = circuit()
expected = 0.5 * (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_projector(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Projector works correctly"""
n_wires = 3
dev = device(n_wires)
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.732
phi = 0.523
varphi = -0.543
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.expval(qml.PauliZ(wires=[0]) @ qml.Projector(basis_state, wires=[1, 2]))
res = circuit([0, 0])
expected = (np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([0, 1])
expected = (np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 0])
expected = (np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 1])
expected = (np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_sparse_hamiltonian_expval(self, device, tol):
"""Test that expectation values of sparse Hamiltonians are properly calculated."""
n_wires = 4
dev = device(n_wires)
if "SparseHamiltonian" not in dev.observables:
pytest.skip("Skipped because device does not support the SparseHamiltonian observable.")
if dev.shots is not None:
pytest.skip("SparseHamiltonian only supported in analytic mode")
h_row = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
h_col = np.array([15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
h_data = np.array(
[-1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1], dtype=np.complex128
)
h = csr_matrix((h_data, (h_row, h_col)), shape=(16, 16)) # XXYY
@qml.qnode(dev, diff_method="parameter-shift")
def result():
qml.PauliX(0)
qml.PauliX(2)
qml.SingleExcitation(0.1, wires=[0, 1])
qml.SingleExcitation(0.2, wires=[2, 3])
qml.SingleExcitation(0.3, wires=[1, 2])
return qml.expval(qml.SparseHamiltonian(h, wires=[0, 1, 2, 3]))
res = result()
exp_res = 0.019833838076209875
assert np.allclose(res, exp_res, atol=tol(False))
@flaky(max_runs=10)
class TestSample:
"""Tests for the sample return type."""
def test_sample_values(self, device, tol):
"""Tests if the samples returned by sample have
the correct values
"""
n_wires = 1
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
@qml.qnode(dev)
def circuit():
qml.RX(1.5708, wires=[0])
return qml.sample(qml.PauliZ(wires=0))
res = circuit()
# res should only contain 1 and -1
assert np.allclose(res**2, 1, atol=tol(False))
def test_sample_values_hermitian(self, device, tol):
"""Tests if the samples of a Hermitian observable returned by sample have
the correct values
"""
n_wires = 1
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
A_ = np.array([[1, 2j], [-2j, 0]])
theta = 0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
return qml.sample(qml.Hermitian(A_, wires=0))
res = circuit().flatten()
# res should only contain the eigenvalues of
# the hermitian matrix
eigvals = np.linalg.eigvalsh(A_)
assert np.allclose(sorted(list(set(res.tolist()))), sorted(eigvals), atol=tol(dev.shots))
# the analytic mean is 2*sin(theta)+0.5*cos(theta)+0.5
assert np.allclose(
np.mean(res), 2 * np.sin(theta) + 0.5 * np.cos(theta) + 0.5, atol=tol(False)
)
# the analytic variance is 0.25*(sin(theta)-4*cos(theta))^2
assert np.allclose(
np.var(res), 0.25 * (np.sin(theta) - 4 * np.cos(theta)) ** 2, atol=tol(False)
)
def test_sample_values_projector(self, device, tol):
"""Tests if the samples of a Projector observable returned by sample have
the correct values
"""
n_wires = 1
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
theta = 0.543
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(theta, wires=[0])
return qml.sample(qml.Projector(basis_state, wires=0))
res = circuit([0]).flatten()
# res should only contain 0 or 1, the eigenvalues of the projector
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
assert np.allclose(np.mean(res), np.cos(theta / 2) ** 2, atol=tol(False))
assert np.allclose(
np.var(res), np.cos(theta / 2) ** 2 - (np.cos(theta / 2) ** 2) ** 2, atol=tol(False)
)
res = circuit([1]).flatten()
# res should only contain 0 or 1, the eigenvalues of the projector
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
assert np.allclose(np.mean(res), np.sin(theta / 2) ** 2, atol=tol(False))
assert np.allclose(
np.var(res), np.sin(theta / 2) ** 2 - (np.sin(theta / 2) ** 2) ** 2, atol=tol(False)
)
def test_sample_values_hermitian_multi_qubit(self, device, tol):
"""Tests if the samples of a multi-qubit Hermitian observable returned by sample have
the correct values
"""
n_wires = 2
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
theta = 0.543
A_ = np.array(
[
[1, 2j, 1 - 2j, 0.5j],
[-2j, 0, 3 + 4j, 1],
[1 + 2j, 3 - 4j, 0.75, 1.5 - 2j],
[-0.5j, 1, 1.5 + 2j, -1],
]
)
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RY(2 * theta, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.sample(qml.Hermitian(A_, wires=[0, 1]))
res = circuit().flatten()
# res should only contain the eigenvalues of
# the hermitian matrix
eigvals = np.linalg.eigvalsh(A_)
assert np.allclose(sorted(list(set(res.tolist()))), sorted(eigvals), atol=tol(dev.shots))
# make sure the mean matches the analytic mean
expected = (
88 * np.sin(theta)
+ 24 * np.sin(2 * theta)
- 40 * np.sin(3 * theta)
+ 5 * np.cos(theta)
- 6 * np.cos(2 * theta)
+ 27 * np.cos(3 * theta)
+ 6
) / 32
assert np.allclose(np.mean(res), expected, atol=tol(dev.shots))
def test_sample_values_projector_multi_qubit(self, device, tol):
"""Tests if the samples of a multi-qubit Projector observable returned by sample have
the correct values
"""
n_wires = 2
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
theta = 0.543
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(theta, wires=[0])
qml.RY(2 * theta, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.sample(qml.Projector(basis_state, wires=[0, 1]))
res = circuit([0, 0]).flatten()
# res should only contain 0 or 1, the eigenvalues of the projector
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
expected = (np.cos(theta / 2) * np.cos(theta)) ** 2
assert np.allclose(np.mean(res), expected, atol=tol(dev.shots))
res = circuit([0, 1]).flatten()
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
expected = (np.cos(theta / 2) * np.sin(theta)) ** 2
assert np.allclose(np.mean(res), expected, atol=tol(dev.shots))
res = circuit([1, 0]).flatten()
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
expected = (np.sin(theta / 2) * np.sin(theta)) ** 2
assert np.allclose(np.mean(res), expected, atol=tol(dev.shots))
res = circuit([1, 1]).flatten()
assert np.allclose(sorted(list(set(res.tolist()))), [0, 1], atol=tol(dev.shots))
expected = (np.sin(theta / 2) * np.cos(theta)) ** 2
assert np.allclose(np.mean(res), expected, atol=tol(dev.shots))
@flaky(max_runs=10)
class TestTensorSample:
"""Test tensor sample values."""
def test_paulix_pauliy(self, device, tol, skip_if):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
n_wires = 3
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.sample(qml.PauliX(wires=[0]) @ qml.PauliY(wires=[2]))
res = circuit()
# res should only contain 1 and -1
assert np.allclose(res**2, 1, atol=tol(False))
mean = np.mean(res)
expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2
- np.cos(2 * (theta - phi))
- np.cos(2 * (theta + phi))
+ 2 * np.cos(2 * theta)
+ 2 * np.cos(2 * phi)
+ 14
) / 16
assert np.allclose(var, expected, atol=tol(False))
def test_pauliz_hadamard(self, device, tol, skip_if):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
n_wires = 3
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.sample(
qml.PauliZ(wires=[0]) @ qml.Hadamard(wires=[1]) @ qml.PauliY(wires=[2])
)
res = circuit()
# s1 should only contain 1 and -1
assert np.allclose(res**2, 1, atol=tol(False))
mean = np.mean(res)
expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
3
+ np.cos(2 * phi) * np.cos(varphi) ** 2
- np.cos(2 * theta) * np.sin(varphi) ** 2
- 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi)
) / 4
assert np.allclose(var, expected, atol=tol(False))
def test_hermitian(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Hermitian works correctly"""
n_wires = 3
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
A_ = 0.1 * np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.sample(qml.PauliZ(wires=[0]) @ qml.Hermitian(A_, wires=[1, 2]))
res = circuit()
# res should only contain the eigenvalues of
# the hermitian matrix tensor product Z
Z = np.diag([1, -1])
eigvals = np.linalg.eigvalsh(np.kron(Z, A_))
assert np.allclose(sorted(np.unique(res)), sorted(eigvals), atol=tol(False))
mean = np.mean(res)
expected = (
0.1
* 0.5
* (
-6 * np.cos(theta) * (np.cos(varphi) + 1)
- 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))
+ 3 * np.cos(varphi) * np.sin(phi)
+ np.sin(phi)
)
)
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
0.01
* (
1057
- np.cos(2 * phi)
+ 12 * (27 + np.cos(2 * phi)) * np.cos(varphi)
- 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi))
+ 16 * np.sin(2 * phi)
- 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi)
- 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2
- 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi)
- 8
* np.cos(theta)
* (
4
* np.cos(phi)
* (
4
+ 8 * np.cos(varphi)
+ np.cos(2 * varphi)
- (1 + 6 * np.cos(varphi)) * np.sin(varphi)
)
+ np.sin(phi)
* (
15
+ 8 * np.cos(varphi)
- 11 * np.cos(2 * varphi)
+ 42 * np.sin(varphi)
+ 3 * np.sin(2 * varphi)
)
)
)
/ 16
)
assert np.allclose(var, expected, atol=tol(False))
def test_projector(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Projector works correctly"""
n_wires = 3
dev = device(n_wires)
if dev.shots is None:
pytest.skip("Device is in analytic mode, cannot test sampling.")
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 1.432
phi = 1.123
varphi = -0.543
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.sample(qml.PauliZ(wires=[0]) @ qml.Projector(basis_state, wires=[1, 2]))
res = circuit([0, 0])
# res should only contain the eigenvalues of the projector matrix tensor product Z, i.e. {-1, 0, 1}
assert np.allclose(sorted(np.unique(res)), [-1, 0, 1], atol=tol(False))
mean = np.mean(res)
expected = (np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
(np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
- (
(np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
- (np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
)
** 2
)
assert np.allclose(var, expected, atol=tol(False))
res = circuit([0, 1])
assert np.allclose(sorted(np.unique(res)), [-1, 0, 1], atol=tol(False))
mean = np.mean(res)
expected = (np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
(np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
- (
(np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
- (np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
)
** 2
)
assert np.allclose(var, expected, atol=tol(False))
res = circuit([1, 0])
assert np.allclose(sorted(np.unique(res)), [-1, 0, 1], atol=tol(False))
mean = np.mean(res)
expected = (np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
(np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
- (
(np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
- (np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
)
** 2
)
assert np.allclose(var, expected, atol=tol(False))
res = circuit([1, 1])
assert np.allclose(sorted(np.unique(res)), [-1, 0, 1], atol=tol(False))
mean = np.mean(res)
expected = (np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2 - (
np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)
) ** 2
assert np.allclose(mean, expected, atol=tol(False))
var = np.var(res)
expected = (
(np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
- (
(np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
- (np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
)
** 2
)
assert np.allclose(var, expected, atol=tol(False))
@flaky(max_runs=10)
class TestVar:
"""Tests for the variance return type"""
def test_var(self, device, tol):
"""Tests if the samples returned by sample have
the correct values
"""
n_wires = 2
dev = device(n_wires)
phi = 0.543
theta = 0.6543
@qml.qnode(dev)
def circuit():
qml.RX(phi, wires=[0])
qml.RY(theta, wires=[0])
return qml.var(qml.PauliZ(wires=0))
res = circuit()
expected = 0.25 * (3 - np.cos(2 * theta) - 2 * np.cos(theta) ** 2 * np.cos(2 * phi))
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_var_hermitian(self, device, tol):
"""Tests if the samples of a Hermitian observable returned by sample have
the correct values
"""
n_wires = 2
dev = device(n_wires)
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
phi = 0.543
theta = 0.6543
# test correct variance for <H> of a rotated state
H = 0.1 * np.array([[4, -1 + 6j], [-1 - 6j, 2]])
@qml.qnode(dev)
def circuit():
qml.RX(phi, wires=[0])
qml.RY(theta, wires=[0])
return qml.var(qml.Hermitian(H, wires=0))
res = circuit()
expected = (
0.01
* 0.5
* (
2 * np.sin(2 * theta) * np.cos(phi) ** 2
+ 24 * np.sin(phi) * np.cos(phi) * (np.sin(theta) - np.cos(theta))
+ 35 * np.cos(2 * phi)
+ 39
)
)
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_var_projector(self, device, tol):
"""Tests if the samples of a Projector observable returned by sample have
the correct values
"""
n_wires = 2
dev = device(n_wires)
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
phi = 0.543
theta = 0.654
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(phi, wires=[0])
qml.RY(theta, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.var(qml.Projector(basis_state, wires=[0, 1]))
res = circuit([0, 0])
expected = (np.cos(phi / 2) * np.cos(theta / 2)) ** 2 - (
(np.cos(phi / 2) * np.cos(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([0, 1])
expected = (np.cos(phi / 2) * np.sin(theta / 2)) ** 2 - (
(np.cos(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 0])
expected = (np.sin(phi / 2) * np.sin(theta / 2)) ** 2 - (
(np.sin(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 1])
expected = (np.sin(phi / 2) * np.cos(theta / 2)) ** 2 - (
(np.sin(phi / 2) * np.cos(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@flaky(max_runs=10)
class TestTensorVar:
"""Test tensor variance measurements."""
def test_paulix_pauliy(self, device, tol, skip_if):
"""Test that a tensor product involving PauliX and PauliY works correctly"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.var(qml.PauliX(wires=[0]) @ qml.PauliY(wires=[2]))
res = circuit()
expected = (
8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2
- np.cos(2 * (theta - phi))
- np.cos(2 * (theta + phi))
+ 2 * np.cos(2 * theta)
+ 2 * np.cos(2 * phi)
+ 14
) / 16
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_pauliz_hadamard(self, device, tol, skip_if):
"""Test that a tensor product involving PauliZ and PauliY and hadamard works correctly"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.var(qml.PauliZ(wires=[0]) @ qml.Hadamard(wires=[1]) @ qml.PauliY(wires=[2]))
res = circuit()
expected = (
3
+ np.cos(2 * phi) * np.cos(varphi) ** 2
- np.cos(2 * theta) * np.sin(varphi) ** 2
- 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi)
) / 4
assert np.allclose(res, expected, atol=tol(dev.shots))
# pylint: disable=too-many-arguments
@pytest.mark.parametrize(
"base_obs, permuted_obs",
list(zip(obs_lst, obs_permuted_lst)),
)
def test_wire_order_in_tensor_prod_observables(
self, device, base_obs, permuted_obs, tol, skip_if
):
"""Test that when given a tensor observable the variance is the same regardless of the order of terms
in the tensor observable, provided the wires each term acts on remain constant.
eg:
ob1 = qml.PauliZ(wires=0) @ qml.PauliY(wires=1)
ob2 = qml.PauliY(wires=1) @ qml.PauliZ(wires=0)
@qml.qnode(dev)
def circ(obs):
return qml.var(obs)
circ(ob1) == circ(ob2)
"""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_tensor_observables": False})
@qml.qnode(dev)
def circ(ob):
sub_routine(label_map=range(3))
return qml.var(ob)
assert np.allclose(circ(base_obs), circ(permuted_obs), atol=tol(dev.shots), rtol=0)
@pytest.mark.parametrize("label_map", label_maps)
def test_wire_label_in_tensor_prod_observables(self, device, label_map, tol, skip_if):
"""Test that when given a tensor observable the variance is the same regardless of how the
wires are labelled, as long as they match the device order.
eg:
dev1 = qml.device("default.qubit", wires=[0, 1, 2])
dev2 = qml.device("default.qubit", wires=['c', 'b', 'a']
def circ(wire_labels):
return qml.var(qml.PauliZ(wires=wire_labels[0]) @ qml.PauliX(wires=wire_labels[2]))
c1, c2 = qml.QNode(circ, dev1), qml.QNode(circ, dev2)
c1([0, 1, 2]) == c2(['c', 'b', 'a'])
"""
dev = device(wires=3)
dev_custom_labels = device(wires=label_map)
skip_if(dev, {"supports_tensor_observables": False})
def circ(wire_labels):
sub_routine(wire_labels)
return qml.var(
qml.PauliX(wire_labels[0]) @ qml.PauliY(wire_labels[1]) @ qml.PauliZ(wire_labels[2])
)
circ_base_label = qml.QNode(circ, device=dev)
circ_custom_label = qml.QNode(circ, device=dev_custom_labels)
assert np.allclose(
circ_base_label(wire_labels=range(3)),
circ_custom_label(wire_labels=label_map),
atol=tol(dev.shots),
rtol=0,
)
def test_hermitian(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Hermitian works correctly"""
n_wires = 3
dev = device(n_wires)
if "Hermitian" not in dev.observables:
pytest.skip("Skipped because device does not support the Hermitian observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
A_ = 0.1 * np.array(
[
[-6, 2 + 1j, -3, -5 + 2j],
[2 - 1j, 0, 2 - 1j, -5 + 4j],
[-3, 2 + 1j, 0, -4 + 3j],
[-5 - 2j, -5 - 4j, -4 - 3j, -6],
]
)
@qml.qnode(dev)
def circuit():
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.var(qml.PauliZ(wires=[0]) @ qml.Hermitian(A_, wires=[1, 2]))
res = circuit()
expected = (
0.01
* (
1057
- np.cos(2 * phi)
+ 12 * (27 + np.cos(2 * phi)) * np.cos(varphi)
- 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi))
+ 16 * np.sin(2 * phi)
- 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi)
- 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2
- 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi)
- 8
* np.cos(theta)
* (
4
* np.cos(phi)
* (
4
+ 8 * np.cos(varphi)
+ np.cos(2 * varphi)
- (1 + 6 * np.cos(varphi)) * np.sin(varphi)
)
+ np.sin(phi)
* (
15
+ 8 * np.cos(varphi)
- 11 * np.cos(2 * varphi)
+ 42 * np.sin(varphi)
+ 3 * np.sin(2 * varphi)
)
)
)
/ 16
)
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_projector(self, device, tol, skip_if):
"""Test that a tensor product involving qml.Projector works correctly"""
n_wires = 3
dev = device(n_wires)
if "Projector" not in dev.observables:
pytest.skip("Skipped because device does not support the Projector observable.")
skip_if(dev, {"supports_tensor_observables": False})
theta = 0.432
phi = 0.123
varphi = -0.543
@qml.qnode(dev)
def circuit(basis_state):
qml.RX(theta, wires=[0])
qml.RX(phi, wires=[1])
qml.RX(varphi, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
return qml.var(qml.PauliZ(wires=[0]) @ qml.Projector(basis_state, wires=[1, 2]))
res = circuit([0, 0])
expected = (
(np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
) - (
(np.cos(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
- (np.cos(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([0, 1])
expected = (
(np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
) - (
(np.sin(varphi / 2) * np.cos(phi / 2) * np.cos(theta / 2)) ** 2
- (np.sin(varphi / 2) * np.sin(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 0])
expected = (
(np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
) - (
(np.sin(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
- (np.sin(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
res = circuit([1, 1])
expected = (
(np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
+ (np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
) - (
(np.cos(varphi / 2) * np.sin(phi / 2) * np.cos(theta / 2)) ** 2
- (np.cos(varphi / 2) * np.cos(phi / 2) * np.sin(theta / 2)) ** 2
) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
| 35.48952
| 118
| 0.530816
|
ba812ec545c7f6343b5ecb7defc751652411e09c
| 30,642
|
py
|
Python
|
raiden/raiden/token_swap.py
|
yy18/raidenenv
|
f732de6870de01840a1ad7bb25cc9d31b1781600
|
[
"MIT"
] | null | null | null |
raiden/raiden/token_swap.py
|
yy18/raidenenv
|
f732de6870de01840a1ad7bb25cc9d31b1781600
|
[
"MIT"
] | null | null | null |
raiden/raiden/token_swap.py
|
yy18/raidenenv
|
f732de6870de01840a1ad7bb25cc9d31b1781600
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import random
import time
from collections import namedtuple, defaultdict
import gevent
from gevent.queue import Empty
from ethereum import slogging
from raiden.network.channelgraph import (
get_best_routes,
)
from raiden.tasks import Task
from raiden.messages import (
MediatedTransfer,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
)
from raiden.settings import (
DEFAULT_EVENTS_POLL_TIMEOUT,
)
from raiden.utils import lpex, pex, sha3
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
TIMEOUT = object()
TokenSwap = namedtuple('TokenSwap', (
'identifier',
'from_token',
'from_amount',
'from_nodeaddress', # the node address of the owner of the `from_token`
'to_token',
'to_amount',
'to_nodeaddress', # the node address of the owner of the `to_token`
))
SwapKey = namedtuple('SwapKey', (
'identifier',
'from_token',
'from_amount',
))
class GreenletTasksDispatcher:
def __init__(self):
self.hashlocks_greenlets = defaultdict(list)
def register_task(self, task, hashlock):
""" Register the task to receive messages based on `hashlock`.
Registration is required otherwise the task won't receive any messages
from the protocol, un-registering is done by the `unregister_task`
function.
Note:
Messages are dispatched solely on the hashlock value (being part of
the message, eg. SecretRequest, or calculated from the message
content, eg. RevealSecret), this means the sender needs to be
checked for the received messages.
"""
if not isinstance(task, Task):
raise ValueError('task must be an instance of Task')
self.hashlocks_greenlets[hashlock].append(task)
def unregister_task(self, task, hashlock, success): # pylint: disable=unused-argument
""" Clear the task when it's finished. """
self.hashlocks_greenlets[hashlock].remove(task)
if not self.hashlocks_greenlets[hashlock]:
del self.hashlocks_greenlets[hashlock]
def dispatch_message(self, message, hashlock):
for task in self.hashlocks_greenlets[hashlock]:
task.response_queue.put(message)
def stop(self):
wait_for = list()
for greenlets in self.hashlocks_greenlets.values():
for task in greenlets:
task.kill()
wait_for.extend(greenlets)
return wait_for
class BaseMediatedTransferTask(Task):
def _send_and_wait_time(self, raiden, recipient, transfer, timeout):
""" Utility to handle multiple messages for the same hashlock while
properly handling expiration timeouts.
"""
current_time = time.time()
limit_time = current_time + timeout
raiden.send_async(recipient, transfer)
while current_time <= limit_time:
# wait for a response message (not the Ack for the transfer)
try:
response = self.response_queue.get(
timeout=limit_time - current_time,
)
except Empty:
yield TIMEOUT
return
yield response
current_time = time.time()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT %s %s',
self.__class__,
pex(transfer),
)
def _send_and_wait_block(self, raiden, recipient, transfer, expiration_block):
""" Utility to handle multiple messages and timeout on a block number. """
raiden.send_async(recipient, transfer)
current_block = raiden.get_block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT,
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.get_block_number()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT ON BLOCK %s %s %s',
current_block,
self.__class__,
pex(transfer),
block_number=current_block,
)
yield TIMEOUT
def _messages_until_block(self, raiden, expiration_block):
""" Returns the received messages up to the block `expiration_block`.
"""
current_block = raiden.get_block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT,
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.get_block_number()
def _wait_for_unlock_or_close(self, raiden, graph, channel, mediated_transfer): # noqa
""" Wait for a Secret message from our partner to update the local
state, if the Secret message is not sent within time the channel will
be closed.
Note:
Must be called only once the secret is known.
Must call `unregister_task` after this function returns.
"""
assert graph.token_address == mediated_transfer.token
if not isinstance(mediated_transfer, MediatedTransfer):
raise ValueError('MediatedTransfer expected.')
block_to_close = mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
hashlock = mediated_transfer.lock.hashlock
identifier = mediated_transfer.identifier
token = mediated_transfer.token
while channel.our_state.is_locked(hashlock):
current_block = raiden.get_block_number()
if current_block > block_to_close:
if log.isEnabledFor(logging.WARN):
log.warn(
'Closing channel (%s, %s) to prevent expiration of lock %s %s',
pex(channel.our_state.address),
pex(channel.partner_state.address),
pex(hashlock),
repr(self),
)
channel.external_state.close(
channel.our_state.balance_proof,
)
return
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if isinstance(response, Secret):
secret = response.secret
hashlock = sha3(secret)
is_valid_identifier = response.identifier == identifier
is_valid_channel = response.channel == channel.channel_address
if is_valid_identifier and is_valid_channel:
raiden.handle_secret(
identifier,
graph.token_address,
secret,
response,
hashlock,
)
else:
# cannot use the message but the secret is okay
raiden.handle_secret(
identifier,
graph.token_address,
secret,
None,
hashlock,
)
if log.isEnabledFor(logging.ERROR):
log.error(
'Invalid Secret message received, expected message'
' for token=%s identifier=%s received=%s',
token,
identifier,
response,
)
elif isinstance(response, RevealSecret):
secret = response.secret
hashlock = sha3(secret)
raiden.handle_secret(
identifier,
graph.token_address,
secret,
None,
hashlock,
)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s %s',
repr(response),
repr(self),
)
def _wait_expiration(self, raiden, transfer, sleep=DEFAULT_EVENTS_POLL_TIMEOUT):
""" Utility to wait until the expiration block.
For a chain A-B-C, if an attacker controls A and C a mediated transfer
can be done through B and C will wait for/send a timeout, for that
reason B must not unregister the hashlock until the lock has expired,
otherwise the revealed secret wouldn't be caught.
"""
# pylint: disable=no-self-use
expiration = transfer.lock.expiration + 1
while True:
current_block = raiden.get_block_number()
if current_block > expiration:
return
gevent.sleep(sleep)
# Note: send_and_wait_valid methods are used to check the message type and
# sender only, this can be improved by using a encrypted connection between the
# nodes making the signature validation unnecessary
# TODO: Implement the swaps as a restartable task (issue #303)
class MakerTokenSwapTask(BaseMediatedTransferTask):
""" Initiator task, responsible to choose a random secret, initiate the
token swap by sending a mediated transfer to the counterparty and
revealing the secret once the swap is complete.
"""
def __init__(self, raiden, tokenswap, async_result):
super(MakerTokenSwapTask, self).__init__()
self.raiden = raiden
self.tokenswap = tokenswap
self.async_result = async_result
def __repr__(self):
tokenswap = self.tokenswap
return '<{} {} from_token:{} to_token:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(tokenswap.from_token),
pex(tokenswap.to_token),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
tokenswap = self.tokenswap
raiden = self.raiden
identifier = tokenswap.identifier
from_token = tokenswap.from_token
from_amount = tokenswap.from_amount
to_token = tokenswap.to_token
to_amount = tokenswap.to_amount
to_nodeaddress = tokenswap.to_nodeaddress
from_graph = raiden.token_to_channelgraph[from_token]
to_graph = raiden.token_to_channelgraph[to_token]
from_routes = get_best_routes(
from_graph,
raiden.protocol.nodeaddresses_networkstatuses,
raiden.address,
to_nodeaddress,
from_amount,
previous_address=None,
)
fee = 0
for route in from_routes:
# for each new path a new secret must be used
secret = sha3(hex(random.getrandbits(256)).encode())
hashlock = sha3(secret)
from_channel = from_graph.get_channel_by_contract_address(route.channel_address)
raiden.greenlet_task_dispatcher.register_task(self, hashlock)
raiden.register_channel_for_hashlock(from_token, from_channel, hashlock)
block_number = raiden.get_block_number()
lock_expiration = block_number + from_channel.settle_timeout
from_mediated_transfer = from_channel.create_mediatedtransfer(
raiden.address,
to_nodeaddress,
fee,
from_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(from_mediated_transfer)
from_channel.register_transfer(
# must be the same block number used to compute lock_expiration
block_number,
from_mediated_transfer,
)
# wait for the SecretRequest and MediatedTransfer
to_mediated_transfer = self.send_and_wait_valid_state(
raiden,
route.node_address,
to_nodeaddress,
from_mediated_transfer,
to_token,
to_amount,
)
if to_mediated_transfer is None:
# the initiator can unregister right away since it knows the
# secret wont be revealed
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, False)
elif isinstance(to_mediated_transfer, MediatedTransfer):
to_hop = to_mediated_transfer.sender
to_channel = to_graph.partneraddress_to_channel[to_hop]
to_channel.register_transfer(
raiden.get_block_number(),
to_mediated_transfer,
)
raiden.register_channel_for_hashlock(to_token, to_channel, hashlock)
# A swap is composed of two mediated transfers, we need to
# reveal the secret to both, since the maker is one of the ends
# we just need to send the reveal secret directly to the taker.
reveal_secret = RevealSecret(secret)
raiden.sign(reveal_secret)
raiden.send_async(to_nodeaddress, reveal_secret)
from_channel.register_secret(secret)
# Register the secret with the to_channel and send the
# RevealSecret message to the node that is paying the to_token
# (this node might, or might not be the same as the taker),
# then wait for the withdraw.
raiden.handle_secret(
identifier,
to_token,
secret,
None,
hashlock,
)
to_channel = to_graph.partneraddress_to_channel[to_mediated_transfer.sender]
self._wait_for_unlock_or_close(
raiden,
to_graph,
to_channel,
to_mediated_transfer,
)
# unlock the from_token and optimistically reveal the secret
# forward
raiden.handle_secret(
identifier,
from_token,
secret,
None,
hashlock,
)
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, True)
self.async_result.set(True)
return
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'MAKER TOKEN SWAP FAILED',
node=pex(node_address),
to=pex(to_nodeaddress),
)
# all routes failed
self.async_result.set(False)
def send_and_wait_valid_state( # noqa
self,
raiden,
next_hop,
target_address,
from_token_transfer,
to_token,
to_amount):
""" Start the swap by sending the first mediated transfer to the
taker and wait for mediated transfer for the exchanged token.
This method will validate the messages received, discard the invalid
ones, and wait until a valid state is reached. The valid state is
reached when a mediated transfer for `to_token` with `to_amount` tokens
and a SecretRequest from the taker are received.
Returns:
None: when the timeout was reached.
MediatedTransfer: when a valid state is reached.
RefundTransfer: when an invalid state is reached by
our partner.
"""
# pylint: disable=too-many-arguments
# a valid state must have a secret request from the maker and a valid
# mediated transfer for the new token
received_secretrequest = False
mediated_transfer = None
response_iterator = self._send_and_wait_time(
raiden,
from_token_transfer.recipient,
from_token_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
transfer_is_valid_mediated_transfer = (
isinstance(response, MediatedTransfer) and
response.token == to_token and
# we need a lower expiration because:
# - otherwise the previous node is not operating correctly
# - we assume that received mediated transfer has a smaller
# expiration to properly call close on edge cases
response.lock.expiration <= from_token_transfer.lock.expiration
)
if response is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MAKER SWAP TIMED OUT',
hashlock=pex(from_token_transfer.lock.hashlock),
)
return None
# The MediatedTransfer might be from `next_hop` or most likely from
# a different node.
if transfer_is_valid_mediated_transfer:
if response.lock.amount == to_amount:
mediated_transfer = response
elif isinstance(response, SecretRequest) and response.sender == target_address:
received_secretrequest = True
elif isinstance(response, RefundTransfer) and response.sender == next_hop:
return response
# The other participant must not use a direct transfer to finish
# the token swap, ignore it
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
if mediated_transfer and received_secretrequest:
return mediated_transfer
return None
class TakerTokenSwapTask(BaseMediatedTransferTask):
""" Taker task, responsible to receive a MediatedTransfer for the
from_transfer and forward a to_transfer with the same hashlock.
"""
def __init__(
self,
raiden,
tokenswap,
from_mediated_transfer):
super(TakerTokenSwapTask, self).__init__()
self.raiden = raiden
self.from_mediated_transfer = from_mediated_transfer
self.tokenswap = tokenswap
def __repr__(self):
return '<{} {} from_token:{} to_token:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_mediated_transfer.token),
pex(self.tokenswap.to_token),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
fee = 0
raiden = self.raiden
tokenswap = self.tokenswap
# this is the MediatedTransfer that wil pay the maker's half of the
# swap, not necessarily from him
maker_paying_transfer = self.from_mediated_transfer
# this is the address of the node that the taker actually has a channel
# with (might or might not be the maker)
maker_payer_hop = maker_paying_transfer.sender
assert tokenswap.identifier == maker_paying_transfer.identifier
assert tokenswap.from_token == maker_paying_transfer.token
assert tokenswap.from_amount == maker_paying_transfer.lock.amount
assert tokenswap.from_nodeaddress == maker_paying_transfer.initiator
maker_receiving_token = tokenswap.to_token
to_amount = tokenswap.to_amount
identifier = maker_paying_transfer.identifier
hashlock = maker_paying_transfer.lock.hashlock
maker_address = maker_paying_transfer.initiator
taker_receiving_token = maker_paying_transfer.token
taker_paying_token = maker_receiving_token
from_graph = raiden.token_to_channelgraph[taker_receiving_token]
from_channel = from_graph.partneraddress_to_channel[maker_payer_hop]
to_graph = raiden.token_to_channelgraph[maker_receiving_token]
# update the channel's distributable and merkle tree
from_channel.register_transfer(
raiden.get_block_number(),
maker_paying_transfer,
)
# register the task to receive Refund/Secrect/RevealSecret messages
raiden.greenlet_task_dispatcher.register_task(self, hashlock)
raiden.register_channel_for_hashlock(taker_receiving_token, from_channel, hashlock)
# send to the maker a secret request informing how much the taker will
# be _paid_, this is used to inform the maker that his part of the
# mediated transfer is okay
secret_request = SecretRequest(
identifier,
maker_paying_transfer.lock.hashlock,
maker_paying_transfer.lock.amount,
)
raiden.sign(secret_request)
raiden.send_async(maker_address, secret_request)
lock_expiration = maker_paying_transfer.lock.expiration - raiden.config['reveal_timeout']
# Note: taker may only try different routes if a RefundTransfer is
# received, because the maker is the node controlling the secret
available_routes = get_best_routes(
to_graph,
raiden.protocol.nodeaddresses_networkstatuses,
raiden.address,
maker_address,
maker_paying_transfer.lock.amount,
previous_address=None,
)
if not available_routes:
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED, NO ROUTES',
from_=pex(node_address),
to=pex(maker_address),
)
return
first_transfer = None
for route in available_routes:
taker_paying_channel = to_graph.get_channel_by_contract_address(
route.channel_address,
)
taker_paying_hop = route.node_address
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TAKER TOKEN SWAP',
from_=pex(maker_paying_transfer.target),
to=pex(maker_address),
msghash=pex(maker_paying_transfer.hash),
hashlock=pex(hashlock),
)
# make a paying MediatedTransfer with same hashlock/identifier and the
# taker's paying token/amount
taker_paying_transfer = taker_paying_channel.create_mediatedtransfer(
raiden.address,
maker_address,
fee,
to_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(taker_paying_transfer)
taker_paying_channel.register_transfer(
raiden.get_block_number(),
taker_paying_transfer,
)
if not first_transfer:
first_transfer = taker_paying_transfer
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER NEW PATH',
path=lpex(str(t).encode() for t in taker_paying_hop),
hashlock=pex(hashlock),
)
# register the task to receive Refund/Secrect/RevealSecret messages
raiden.register_channel_for_hashlock(
maker_receiving_token,
taker_paying_channel,
hashlock,
)
response, secret = self.send_and_wait_valid(
raiden,
taker_paying_transfer,
maker_payer_hop,
)
# only refunds for `maker_receiving_token` must be considered
# (check send_and_wait_valid)
if isinstance(response, RefundTransfer):
if response.lock.amount != taker_paying_transfer.amount:
log.info(
'Partner %s sent an invalid refund message with an invalid amount',
pex(taker_paying_hop),
)
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, False)
return
else:
taker_paying_channel.register_transfer(
raiden.get_block_number(),
response,
)
elif isinstance(response, RevealSecret):
# the secret was registered by the message handler
# wait for the taker_paying_hop to reveal the secret prior to
# unlocking locally
if response.sender != taker_paying_hop:
response = self.wait_reveal_secret(
raiden,
taker_paying_hop,
taker_paying_transfer.lock.expiration,
)
# unlock and send the Secret message
raiden.handle_secret(
identifier,
taker_paying_token,
response.secret,
None,
hashlock,
)
# if the secret arrived early, withdraw it, otherwise send the
# RevealSecret forward in the maker-path
if secret:
raiden.handle_secret(
identifier,
taker_receiving_token,
response.secret,
secret,
hashlock,
)
# wait for the withdraw in case it did not happen yet
self._wait_for_unlock_or_close(
raiden,
from_graph,
from_channel,
maker_paying_transfer,
)
return
# the lock expired
else:
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED',
from_=pex(node_address),
to=pex(maker_address),
)
self.async_result.set(False)
return
# no route is available, wait for the sent mediated transfer to expire
self._wait_expiration(raiden, first_transfer)
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED',
from_=pex(node_address),
to=pex(maker_address),
)
self.async_result.set(False)
def send_and_wait_valid(self, raiden, mediated_transfer, maker_payer_hop):
""" Start the second half of the exchange and wait for the SecretReveal
for it.
This will send the taker mediated transfer with the maker as a target,
once the maker receives the transfer he is expected to send a
RevealSecret backwards.
"""
# the taker cannot discard the transfer since the secret is controlled
# by another node (the maker), so we have no option but to wait for a
# valid response until the lock expires
response_iterator = self._send_and_wait_block(
raiden,
mediated_transfer.recipient,
mediated_transfer,
mediated_transfer.lock.expiration,
)
# Usually the RevealSecret for the MediatedTransfer from this node to
# the maker should arrive first, but depending on the number of hops
# and if the maker-path is optimistically revealing the Secret, then
# the Secret message might arrive first.
secret = None
for response in response_iterator:
valid_reveal = (
isinstance(response, RevealSecret) and
response.hashlock == mediated_transfer.lock.hashlock and
response.sender == maker_payer_hop
)
valid_refund = (
isinstance(response, RefundTransfer) and
response.sender == maker_payer_hop and
response.lock.amount == mediated_transfer.lock.amount and
response.lock.expiration <= mediated_transfer.lock.expiration and
response.token == mediated_transfer.token
)
if response is None:
log.error(
'TAKER SWAP TIMED OUT',
node=pex(raiden.address),
hashlock=pex(mediated_transfer.lock.hashlock),
)
return (response, secret)
elif isinstance(response, Secret):
if sha3(response.secret) != mediated_transfer.lock.hashlock:
log.error("Secret doesn't match the hashlock, ignoring.")
continue
secret = response
elif valid_reveal:
return (response, secret)
elif valid_refund:
return (response, secret)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
return (None, secret)
def wait_reveal_secret(self, raiden, taker_paying_hop, expiration_block):
for response in self._messages_until_block(raiden, expiration_block):
if isinstance(response, RevealSecret) and response.sender == taker_paying_hop:
return response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
| 36.007051
| 97
| 0.56713
|
d9d801cbe3e0d0308657f8028fb0b1b31d7ebb8b
| 2,096
|
py
|
Python
|
languages/python/topics/regex/P012_ReModule_RegexObject.py
|
lakshmikanth-tesla/DeveloperNotes2Myself
|
9a5dad930ddbb99ace46d2d672109e8553aecbc2
|
[
"MIT"
] | 2
|
2019-05-25T10:09:00.000Z
|
2022-03-11T09:06:23.000Z
|
languages/python/topics/regex/P012_ReModule_RegexObject.py
|
lakshmikanth-tesla/DeveloperNotes2Myself
|
9a5dad930ddbb99ace46d2d672109e8553aecbc2
|
[
"MIT"
] | 2
|
2020-03-31T04:30:17.000Z
|
2020-10-30T07:54:28.000Z
|
languages/python/topics/regex/P012_ReModule_RegexObject.py
|
lakshmikanth-tesla/DeveloperNotes2Myself
|
9a5dad930ddbb99ace46d2d672109e8553aecbc2
|
[
"MIT"
] | 4
|
2019-07-12T13:18:56.000Z
|
2021-11-17T08:04:55.000Z
|
# Description: The re.RegexObject
import re
"""
Note
1. The re.RegexObject supports following methods
- search(string[, pos[, endpos]])
- match(string[, pos[, endpos]])
- split(string, maxsplit=0)
- findall(string[, pos[, endpos]])
- finditer(string[, pos[, endpos]])
- sub(repl, string, count=0)
- subn(repl, string, count=0)
2. The re.RegexObject supports following attributes
- flags: The regex matching flags. This is a combination of the flags given to compile() and any (?...) inline flags
in the pattern.
- groups: The number of capturing groups in the pattern.
- groupindex: A dictionary mapping any symbolic group names defined by (?P<id>) to group numbers. The dictionary is
empty if no symbolic groups were used in the pattern.
- pattern: The pattern string from which the RE object was compiled.
3. The position parameter
- The optional second parameter position gives an index in the string where the search is to start.
- It defaults to 0.
- This is not completely equivalent to slicing the string. The '^' pattern character matches at the real beginning
of the string and at positions just after a newline, but not necessarily at the index where the search is to
start.
4. The endposition parameter
- The optional parameter endposition limits how far the string will be searched.
- It will be as if the string is endpos characters long, so only the characters from position to endposition - 1
will be searched for a match.
- If endposition is less than position, no match will be found, otherwise, if regexObject is a compiled regular
expression object, regexObjectx.search(string, 0, 50) is equivalent to rx.search(string[:50], 0).
"""
text = 'A bone to the dog is not charity. Charity is the bone shared with the dog, when you are just as hungry as the dog.'
pattern = 'dog'
regexObject = re.compile(pattern)
matchObject = regexObject.findall(text, pos=0, endpos=len(text)-3)
if matchObject:
print 'Total Matches:', len(matchObject)
else:
print 'No match found!'
| 46.577778
| 123
| 0.716603
|
aba85a8982f79dd5cfad67d945512f5c715817c8
| 371
|
py
|
Python
|
office_tracker/leave_tracker/apps.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | null | null | null |
office_tracker/leave_tracker/apps.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | 11
|
2020-06-05T18:04:42.000Z
|
2022-03-11T23:19:32.000Z
|
office_tracker/leave_tracker/apps.py
|
tanvir002700/tracker
|
567c3be2f36ac120fb412c06126cbd8fa72be4b9
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.db.models.signals import post_save
from django.apps import apps
from .signals import assign_season_to_all_user
class LeaveTrackerConfig(AppConfig):
name = 'leave_tracker'
def ready(self):
Season = apps.get_model('leave_tracker', 'Season')
post_save.connect(assign_season_to_all_user, sender=Season)
| 30.916667
| 67
| 0.773585
|
ec158b25a1e594847af37d1ac161f674369ae88f
| 3,166
|
py
|
Python
|
setup/settings.py
|
hcs42/ExponWords
|
6cbb7ccea5076a33aeff23c503d168a9f4ade8a2
|
[
"Apache-2.0"
] | null | null | null |
setup/settings.py
|
hcs42/ExponWords
|
6cbb7ccea5076a33aeff23c503d168a9f4ade8a2
|
[
"Apache-2.0"
] | null | null | null |
setup/settings.py
|
hcs42/ExponWords
|
6cbb7ccea5076a33aeff23c503d168a9f4ade8a2
|
[
"Apache-2.0"
] | null | null | null |
# Django settings for ExponWords project.
import os
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
PROJECT_DIR = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_DIR, 'production.db'),
'OPTIONS': {
# Helps avoiding "DatabaseError: database is locked" errors;
# see https://docs.djangoproject.com/en/dev/ref/databases/#database-is-locked-errors
'timeout': 20
},
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', 'English'),
('hu', 'Magyar')
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'ew/media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://myexponwordssite.org/site_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '...'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ExponWords.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'ew',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
DEFAULT_FROM_EMAIL = 'myexponwordssite.com admin <admin@myexponwordssite.com>'
| 30.442308
| 97
| 0.716361
|
fd63219fc7f5eacebd684e1839b1bebfe6099865
| 93
|
py
|
Python
|
midburn/config/prod.py
|
mtr574/projectMidbrunFirstReg
|
2569c3f07e1af746bfc1f213632708c76d8fc829
|
[
"Apache-2.0"
] | null | null | null |
midburn/config/prod.py
|
mtr574/projectMidbrunFirstReg
|
2569c3f07e1af746bfc1f213632708c76d8fc829
|
[
"Apache-2.0"
] | 1
|
2016-01-22T09:32:04.000Z
|
2016-01-22T12:14:12.000Z
|
midburn/config/prod.py
|
mtr574/projectMidbrunFirstReg
|
2569c3f07e1af746bfc1f213632708c76d8fc829
|
[
"Apache-2.0"
] | 3
|
2016-11-04T12:10:03.000Z
|
2017-02-23T08:52:53.000Z
|
from midburn.settings import *
DEBUG = True
DATABASES['default'] = dj_database_url.config()
| 18.6
| 47
| 0.763441
|
80cc85554034143d155874a883242839f3e7618e
| 24,597
|
py
|
Python
|
src/ezdxf/entities/dxfgfx.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
src/ezdxf/entities/dxfgfx.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
src/ezdxf/entities/dxfgfx.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019-2021 Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Optional, Tuple, Iterable, Dict, Any
from ezdxf.entities import factory
from ezdxf import options
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf import colors as clr
from ezdxf.lldxf.const import (
DXF12,
DXF2000,
DXF2004,
DXF2007,
DXF2013,
DXFValueError,
DXFKeyError,
DXFTableEntryError,
SUBCLASS_MARKER,
DXFInvalidLineType,
DXFStructureError,
TRANSPARENCY_BYBLOCK,
)
from ezdxf.math import OCS, Matrix44
from ezdxf.proxygraphic import load_proxy_graphic, export_proxy_graphic
from .dxfentity import DXFEntity, base_class, SubclassProcessor, DXFTagStorage
if TYPE_CHECKING:
from ezdxf.eztypes import (
Auditor,
TagWriter,
BaseLayout,
DXFNamespace,
Vertex,
Drawing,
)
__all__ = [
"DXFGraphic",
"acdb_entity",
"SeqEnd",
"add_entity",
"replace_entity",
"elevation_to_z_axis",
"is_graphic_entity",
"get_font_name",
]
GRAPHIC_PROPERTIES = {
"layer",
"linetype",
"color",
"lineweight",
"ltscale",
"true_color",
"color_name",
"transparency",
}
acdb_entity: DefSubclass = DefSubclass(
"AcDbEntity",
{
# Layer name as string, no auto fix for invalid names!
"layer": DXFAttr(
8, default="0", validator=validator.is_valid_layer_name
),
# Linetype name as string, no auto fix for invalid names!
"linetype": DXFAttr(
6,
default="BYLAYER",
optional=True,
validator=validator.is_valid_table_name,
),
# ACI color index, BYBLOCK=0, BYLAYER=256, BYOBJECT=257:
"color": DXFAttr(
62,
default=256,
optional=True,
validator=validator.is_valid_aci_color,
fixer=RETURN_DEFAULT,
),
# modelspace=0, paperspace=1
"paperspace": DXFAttr(
67,
default=0,
optional=True,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# Lineweight in mm times 100 (e.g. 0.13mm = 13). Smallest line weight is 13
# and biggest line weight is 200, values outside this range prevents AutoCAD
# from loading the file.
# Special values: BYLAYER=-1, BYBLOCK=-2, DEFAULT=-3
"lineweight": DXFAttr(
370,
default=-1,
dxfversion=DXF2000,
optional=True,
validator=validator.is_valid_lineweight,
fixer=validator.fix_lineweight,
),
"ltscale": DXFAttr(
48,
default=1.0,
dxfversion=DXF2000,
optional=True,
validator=validator.is_positive,
fixer=RETURN_DEFAULT,
),
# visible=0, invisible=1
"invisible": DXFAttr(60, default=0, dxfversion=DXF2000, optional=True),
# True color as 0x00RRGGBB 24-bit value
# True color always overrides ACI "color"!
"true_color": DXFAttr(420, dxfversion=DXF2004, optional=True),
# Color name as string. Color books are stored in .stb config files?
"color_name": DXFAttr(430, dxfversion=DXF2004, optional=True),
# Transparency value 0x020000TT 0 = fully transparent / 255 = opaque
# Special value 0x01000000 == ByBlock
# unset value means ByLayer
"transparency": DXFAttr(
440,
dxfversion=DXF2004,
optional=True,
validator=validator.is_transparency,
),
# Shadow mode:
# 0 = Casts and receives shadows
# 1 = Casts shadows
# 2 = Receives shadows
# 3 = Ignores shadows
"shadow_mode": DXFAttr(284, dxfversion=DXF2007, optional=True),
"material_handle": DXFAttr(347, dxfversion=DXF2007, optional=True),
"visualstyle_handle": DXFAttr(348, dxfversion=DXF2007, optional=True),
# PlotStyleName type enum (AcDb::PlotStyleNameType). Stored and moved around
# as a 16-bit integer. Custom non-entity
"plotstyle_enum": DXFAttr(
380, dxfversion=DXF2007, default=1, optional=True
),
# Handle value of the PlotStyleName object, basically a hard pointer, but
# has a different range to make backward compatibility easier to deal with.
"plotstyle_handle": DXFAttr(390, dxfversion=DXF2007, optional=True),
# 92 or 160?: Number of bytes in the proxy entity graphics represented in
# the subsequent 310 groups, which are binary chunk records (optional)
# 310: Proxy entity graphics data (multiple lines; 256 characters max. per
# line) (optional), compiled by TagCompiler() to a DXFBinaryTag() objects
},
)
acdb_entity_group_codes = group_code_mapping(acdb_entity)
def elevation_to_z_axis(dxf: "DXFNamespace", names: Iterable[str]):
# The elevation group code (38) is only used for DXF R11 and prior and
# ignored for DXF R2000 and later.
# DXF R12 and later store the entity elevation in the z-axis of the
# vertices, but AutoCAD supports elevation for R12 if no z-axis is present.
# DXF types with legacy elevation support:
# SOLID, TRACE, TEXT, CIRCLE, ARC, TEXT, ATTRIB, ATTDEF, INSERT, SHAPE
# The elevation is only used for DXF R12 if no z-axis is stored in the DXF
# file. This is a problem because ezdxf loads the vertices always as 3D
# vertex including a z-axis even if no z-axis is present in DXF file.
if dxf.hasattr("elevation"):
elevation = dxf.elevation
# ezdxf does not export the elevation attribute for any DXF version
dxf.discard("elevation")
if elevation == 0:
return
for name in names:
v = dxf.get(name)
# Only use elevation value if z-axis is 0, this will not work for
# situations where an elevation and a z-axis=0 is present, but let's
# assume if the elevation group code is used the z-axis is not
# present if z-axis is 0.
if v is not None and v.z == 0:
dxf.set(name, v.replace(z=elevation))
class DXFGraphic(DXFEntity):
"""Common base class for all graphic entities, a subclass of
:class:`~ezdxf.entities.dxfentity.DXFEntity`. These entities resides in
entity spaces like modelspace, paperspace or block.
"""
DXFTYPE = "DXFGFX"
DEFAULT_ATTRIBS: Dict[str, Any] = {"layer": "0"}
DXFATTRIBS = DXFAttributes(base_class, acdb_entity)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
"""Adds subclass processing for 'AcDbEntity', requires previous base
class processing by parent class.
(internal API)
"""
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
r12 = processor.r12
# It is valid to mix up the base class with AcDbEntity class.
processor.append_base_class_to_acdb_entity()
# Load proxy graphic data if requested
if options.load_proxy_graphics:
# length tag has group code 92 until DXF R2010
if processor.dxfversion and processor.dxfversion < DXF2013:
code = 92
else:
code = 160
self.proxy_graphic = load_proxy_graphic(
processor.subclasses[0 if r12 else 1],
length_code=code,
)
processor.fast_load_dxfattribs(dxf, acdb_entity_group_codes, 1)
return dxf
def post_new_hook(self):
"""Post processing and integrity validation after entity creation
(internal API)
"""
if self.doc:
if self.dxf.linetype not in self.doc.linetypes:
raise DXFInvalidLineType(
f'Linetype "{self.dxf.linetype}" not defined.'
)
@property
def rgb(self) -> Optional[clr.RGB]:
"""Returns RGB true color as (r, g, b) tuple or None if true_color is
not set.
"""
if self.dxf.hasattr("true_color"):
return clr.int2rgb(self.dxf.get("true_color"))
else:
return None
@rgb.setter
def rgb(self, rgb: clr.RGB) -> None:
"""Set RGB true color as (r, g , b) tuple e.g. (12, 34, 56)."""
self.dxf.set("true_color", clr.rgb2int(rgb))
@property
def transparency(self) -> float:
"""Get transparency as float value between 0 and 1, 0 is opaque and 1
is 100% transparent (invisible). Transparency by block returns always 0.
"""
if self.dxf.hasattr("transparency"):
value = self.dxf.get("transparency")
if validator.is_transparency(value):
if value & TRANSPARENCY_BYBLOCK: # just check flag state
return 0.0
return clr.transparency2float(value)
return 0.0
@transparency.setter
def transparency(self, transparency: float) -> None:
"""Set transparency as float value between 0 and 1, 0 is opaque and 1
is 100% transparent (invisible).
"""
self.dxf.set("transparency", clr.float2transparency(transparency))
@property
def is_transparency_by_layer(self) -> bool:
"""Returns ``True`` if entity inherits transparency from layer."""
return not self.dxf.hasattr("transparency")
@property
def is_transparency_by_block(self) -> bool:
"""Returns ``True`` if entity inherits transparency from block."""
return self.dxf.get("transparency", 0) == TRANSPARENCY_BYBLOCK
def graphic_properties(self) -> Dict:
"""Returns the important common properties layer, color, linetype,
lineweight, ltscale, true_color and color_name as `dxfattribs` dict.
"""
attribs = dict()
for key in GRAPHIC_PROPERTIES:
if self.dxf.hasattr(key):
attribs[key] = self.dxf.get(key)
return attribs
def ocs(self) -> OCS:
"""Returns object coordinate system (:ref:`ocs`) for 2D entities like
:class:`Text` or :class:`Circle`, returns a pass-through OCS for
entities without OCS support.
"""
# extrusion is only defined for 2D entities like Text, Circle, ...
if self.dxf.is_supported("extrusion"):
extrusion = self.dxf.get("extrusion", default=(0, 0, 1))
return OCS(extrusion)
else:
return OCS()
def set_owner(self, owner: Optional[str], paperspace: int = 0) -> None:
"""Set owner attribute and paperspace flag. (internal API)"""
self.dxf.owner = owner
if paperspace:
self.dxf.paperspace = paperspace
else:
self.dxf.discard("paperspace")
def link_entity(self, entity: "DXFEntity") -> None:
"""Store linked or attached entities. Same API for both types of
appended data, because entities with linked entities (POLYLINE, INSERT)
have no attached entities and vice versa.
(internal API)
"""
pass
def export_entity(self, tagwriter: "TagWriter") -> None:
"""Export entity specific data as DXF tags. (internal API)"""
# Base class export is done by parent class.
self.export_acdb_entity(tagwriter)
# XDATA and embedded objects export is also done by the parent class.
def export_acdb_entity(self, tagwriter: "TagWriter"):
"""Export subclass 'AcDbEntity' as DXF tags. (internal API)"""
# Full control over tag order and YES, sometimes order matters
not_r12 = tagwriter.dxfversion > DXF12
if not_r12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_entity.name)
self.dxf.export_dxf_attribs(
tagwriter,
[
"paperspace",
"layer",
"linetype",
"material_handle",
"color",
"lineweight",
"ltscale",
"true_color",
"color_name",
"transparency",
"plotstyle_enum",
"plotstyle_handle",
"shadow_mode",
"visualstyle_handle",
],
)
if self.proxy_graphic and not_r12 and options.store_proxy_graphics:
# length tag has group code 92 until DXF R2010
export_proxy_graphic(
self.proxy_graphic,
tagwriter=tagwriter,
length_code=(92 if tagwriter.dxfversion < DXF2013 else 160),
)
def get_layout(self) -> Optional["BaseLayout"]:
"""Returns the owner layout or returns ``None`` if entity is not
assigned to any layout.
"""
if self.dxf.owner is None or self.doc is None: # unlinked entity
return None
try:
return self.doc.layouts.get_layout_by_key(self.dxf.owner)
except DXFKeyError:
pass
try:
return self.doc.blocks.get_block_layout_by_handle(self.dxf.owner)
except DXFTableEntryError:
return None
def unlink_from_layout(self) -> None:
"""
Unlink entity from associated layout. Does nothing if entity is already
unlinked.
It is more efficient to call the
:meth:`~ezdxf.layouts.BaseLayout.unlink_entity` method of the associated
layout, especially if you have to unlink more than one entity.
"""
if not self.is_alive:
raise TypeError("Can not unlink destroyed entity.")
if self.doc is None:
# no doc -> no layout
self.dxf.owner = None
return
layout = self.get_layout()
if layout:
layout.unlink_entity(self)
def move_to_layout(
self, layout: "BaseLayout", source: "BaseLayout" = None
) -> None:
"""
Move entity from model space or a paper space layout to another layout.
For block layout as source, the block layout has to be specified. Moving
between different DXF drawings is not supported.
Args:
layout: any layout (model space, paper space, block)
source: provide source layout, faster for DXF R12, if entity is
in a block layout
Raises:
DXFStructureError: for moving between different DXF drawings
"""
if source is None:
source = self.get_layout()
if source is None:
raise DXFValueError("Source layout for entity not found.")
source.move_to_layout(self, layout)
def copy_to_layout(self, layout: "BaseLayout") -> "DXFEntity":
"""
Copy entity to another `layout`, returns new created entity as
:class:`DXFEntity` object. Copying between different DXF drawings is
not supported.
Args:
layout: any layout (model space, paper space, block)
Raises:
DXFStructureError: for copying between different DXF drawings
"""
if self.doc != layout.doc:
raise DXFStructureError(
"Copying between different DXF drawings is not supported."
)
new_entity = self.copy()
layout.add_entity(new_entity)
return new_entity
def audit(self, auditor: "Auditor") -> None:
"""Audit and repair graphical DXF entities.
.. important::
Do not delete entities while auditing process, because this
would alter the entity database while iterating, instead use::
auditor.trash(entity)
to delete invalid entities after auditing automatically.
"""
assert self.doc is auditor.doc, "Auditor for different DXF document."
if not self.is_alive:
return
super().audit(auditor)
auditor.check_owner_exist(self)
dxf = self.dxf
if dxf.hasattr("layer"):
auditor.check_for_valid_layer_name(self)
if dxf.hasattr("linetype"):
auditor.check_entity_linetype(self)
if dxf.hasattr("color"):
auditor.check_entity_color_index(self)
if dxf.hasattr("lineweight"):
auditor.check_entity_lineweight(self)
if dxf.hasattr("extrusion"):
auditor.check_extrusion_vector(self)
if dxf.hasattr("transparency"):
auditor.check_transparency(self)
def transform(self, m: "Matrix44") -> "DXFGraphic":
"""Inplace transformation interface, returns `self`
(floating interface).
Args:
m: 4x4 transformation matrix (:class:`ezdxf.math.Matrix44`)
"""
raise NotImplementedError()
def post_transform(self, m: "Matrix44") -> None:
"""Should be called if the main entity transformation was successful."""
if self.xdata is not None:
self.xdata.transform(m)
@property
def is_post_transform_required(self) -> bool:
"""Check if post transform call is required."""
return self.xdata is not None
def translate(self, dx: float, dy: float, dz: float) -> "DXFGraphic":
"""Translate entity inplace about `dx` in x-axis, `dy` in y-axis and
`dz` in z-axis, returns `self` (floating interface).
Basic implementation uses the :meth:`transform` interface, subclasses
may have faster implementations.
"""
return self.transform(Matrix44.translate(dx, dy, dz))
def scale(self, sx: float, sy: float, sz: float) -> "DXFGraphic":
"""Scale entity inplace about `dx` in x-axis, `dy` in y-axis and `dz`
in z-axis, returns `self` (floating interface).
"""
return self.transform(Matrix44.scale(sx, sy, sz))
def scale_uniform(self, s: float) -> "DXFGraphic":
"""Scale entity inplace uniform about `s` in x-axis, y-axis and z-axis,
returns `self` (floating interface).
"""
return self.transform(Matrix44.scale(s))
def rotate_axis(self, axis: "Vertex", angle: float) -> "DXFGraphic":
"""Rotate entity inplace about vector `axis`, returns `self`
(floating interface).
Args:
axis: rotation axis as tuple or :class:`Vec3`
angle: rotation angle in radians
"""
return self.transform(Matrix44.axis_rotate(axis, angle))
def rotate_x(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about x-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.x_rotate(angle))
def rotate_y(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about y-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.y_rotate(angle))
def rotate_z(self, angle: float) -> "DXFGraphic":
"""Rotate entity inplace about z-axis, returns `self`
(floating interface).
Args:
angle: rotation angle in radians
"""
return self.transform(Matrix44.z_rotate(angle))
def has_hyperlink(self) -> bool:
"""Returns ``True`` if entity has an attached hyperlink."""
return bool(self.xdata) and ("PE_URL" in self.xdata) # type: ignore
def set_hyperlink(
self, link: str, description: str = None, location: str = None
):
"""Set hyperlink of an entity."""
xdata = [(1001, "PE_URL"), (1000, str(link))]
if description:
xdata.append((1002, "{"))
xdata.append((1000, str(description)))
if location:
xdata.append((1000, str(location)))
xdata.append((1002, "}"))
self.discard_xdata("PE_URL")
self.set_xdata("PE_URL", xdata)
if self.doc and "PE_URL" not in self.doc.appids:
self.doc.appids.new("PE_URL")
return self
def get_hyperlink(self) -> Tuple[str, str, str]:
"""Returns hyperlink, description and location."""
link = ""
description = ""
location = ""
if self.xdata and "PE_URL" in self.xdata:
xdata = [
tag.value
for tag in self.get_xdata("PE_URL")
if tag.code == 1000
]
if len(xdata):
link = xdata[0]
if len(xdata) > 1:
description = xdata[1]
if len(xdata) > 2:
location = xdata[2]
return link, description, location
def remove_dependencies(self, other: "Drawing" = None) -> None:
"""Remove all dependencies from current document.
(internal API)
"""
if not self.is_alive:
return
super().remove_dependencies(other)
# The layer attribute is preserved because layer doesn't need a layer
# table entry, the layer attributes are reset to default attributes
# like color is 7 and linetype is CONTINUOUS
has_linetype = other is not None and (
self.dxf.linetype in other.linetypes
)
if not has_linetype:
self.dxf.linetype = "BYLAYER"
self.dxf.discard("material_handle")
self.dxf.discard("visualstyle_handle")
self.dxf.discard("plotstyle_enum")
self.dxf.discard("plotstyle_handle")
def _new_compound_entity(
self, type_: str, dxfattribs: dict
) -> "DXFGraphic":
"""Create and bind new entity with same layout settings as `self`.
Used by INSERT & POLYLINE to create appended DXF entities, don't use it
to create new standalone entities.
(internal API)
"""
dxfattribs = dxfattribs or {}
# if layer is not deliberately set, set same layer as creator entity,
# at least VERTEX should have the same layer as the POLYGON entity.
# Don't know if that is also important for the ATTRIB & INSERT entity.
if "layer" not in dxfattribs:
dxfattribs["layer"] = self.dxf.layer
if self.doc:
entity = factory.create_db_entry(type_, dxfattribs, self.doc)
else:
entity = factory.new(type_, dxfattribs)
entity.dxf.owner = self.dxf.owner
entity.dxf.paperspace = self.dxf.paperspace
return entity # type: ignore
@factory.register_entity
class SeqEnd(DXFGraphic):
DXFTYPE = "SEQEND"
def add_entity(entity: DXFGraphic, layout: "BaseLayout") -> None:
"""Add `entity` entity to the entity database and to the given `layout`."""
assert entity.dxf.handle is None
assert layout is not None
if layout.doc:
factory.bind(entity, layout.doc)
layout.add_entity(entity)
def replace_entity(
source: DXFGraphic, target: DXFGraphic, layout: "BaseLayout"
) -> None:
"""Add `target` entity to the entity database and to the given `layout`
and replace the `source` entity by the `target` entity.
"""
assert target.dxf.handle is None
assert layout is not None
target.dxf.handle = source.dxf.handle
if source in layout:
layout.delete_entity(source)
if layout.doc:
factory.bind(target, layout.doc)
layout.add_entity(target)
else:
source.destroy()
def is_graphic_entity(entity: DXFEntity) -> bool:
"""Returns ``True`` if the `entity` has a graphical representations and
can reside in the model space, a paper space or a block layout,
otherwise the entity is a table or class entry or a DXF object from the
OBJECTS section.
"""
if isinstance(entity, DXFGraphic):
return True
if isinstance(entity, DXFTagStorage) and entity.is_graphic_entity:
return True
return False
def get_font_name(entity: DXFEntity) -> str:
"""Returns the name of the font use by an entity.
This function always returns a font name even if the entity does not have
any font usage. The default font name is "txt".
"""
font_name = "txt"
if entity.doc and entity.dxf.hasattr("style"):
style_name = entity.dxf.style
style = entity.doc.styles.get(style_name)
if style:
font_name = style.dxf.font
return font_name
| 34.692525
| 84
| 0.608245
|
a2c0763378c0cd4c270fb23b9d13ecb02f0ef5c9
| 3,807
|
py
|
Python
|
couler/tests/mpi_step_test.py
|
javoweb/couler
|
1531f31816a1505401c5326dc5fec5a8bb7bf7cd
|
[
"Apache-2.0"
] | 700
|
2020-08-19T16:50:32.000Z
|
2022-03-31T11:05:22.000Z
|
couler/tests/mpi_step_test.py
|
javoweb/couler
|
1531f31816a1505401c5326dc5fec5a8bb7bf7cd
|
[
"Apache-2.0"
] | 136
|
2020-08-19T16:54:20.000Z
|
2022-03-08T15:23:53.000Z
|
couler/tests/mpi_step_test.py
|
javoweb/couler
|
1531f31816a1505401c5326dc5fec5a8bb7bf7cd
|
[
"Apache-2.0"
] | 74
|
2020-08-19T19:48:56.000Z
|
2022-03-04T17:39:42.000Z
|
# Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
import yaml
import couler.argo as couler
import couler.steps.mpi as mpi
from couler.core import utils
from couler.tests.argo_yaml_test import ArgoYamlTest
class MPITestCase(ArgoYamlTest):
def test_mpi_train(self):
access_key_secret = {"access_key": "key1234"}
secret = couler.create_secret(secret_data=access_key_secret)
mpi.train(
num_workers=3,
image="mpi:1.13",
command="python mpi.py",
worker_resources="cpu=0.5,memory=1024",
clean_pod_policy="Running",
secret=secret,
)
secret_yaml = list(couler.states._secrets.values())[0].to_yaml()
self.assertEqual(
secret_yaml["data"]["access_key"], utils.encode_base64("key1234")
)
wf = couler.workflow_yaml()
self.assertEqual(len(wf["spec"]["templates"]), 2)
# Check steps template
template0 = wf["spec"]["templates"][0]
self.assertEqual(len(template0["steps"]), 1)
self.assertEqual(len(template0["steps"][0]), 1)
# Check train template
template1 = wf["spec"]["templates"][1]
self.assertEqual(template1["name"], "test-mpi-train")
resource = template1["resource"]
self.assertEqual(resource["action"], "create")
self.assertEqual(resource["setOwnerReference"], "true")
self.assertEqual(
resource["successCondition"],
"status.replicaStatuses.Worker.succeeded == 3",
)
self.assertEqual(
resource["failureCondition"],
"status.replicaStatuses.Worker.failed > 0",
)
# Check the MPIJob spec
mpi_job = yaml.load(
StringIO(resource["manifest"]), Loader=yaml.FullLoader
)
self.assertEqual(mpi_job["kind"], "MPIJob")
self.assertEqual(mpi_job["spec"]["cleanPodPolicy"], "Running")
master = mpi_job["spec"]["mpiReplicaSpecs"]["Launcher"]
self.assertEqual(master["replicas"], 1)
chief_container = master["template"]["spec"]["containers"][0]
self.assertEqual(chief_container["env"][0]["name"], "access_key")
self.assertEqual(
chief_container["env"][0]["valueFrom"]["secretKeyRef"]["name"],
secret_yaml["metadata"]["name"],
)
worker = mpi_job["spec"]["mpiReplicaSpecs"]["Worker"]
self.assertEqual(worker["replicas"], 3)
self.assertEqual(len(worker["template"]["spec"]["containers"]), 1)
worker_container = worker["template"]["spec"]["containers"][0]
self.assertEqual(worker_container["image"], "mpi:1.13")
self.assertEqual(worker_container["command"], "python mpi.py")
worker_container = worker["template"]["spec"]["containers"][0]
self.assertEqual(worker_container["env"][0]["name"], "access_key")
self.assertEqual(
worker_container["env"][0]["valueFrom"]["secretKeyRef"]["name"],
secret_yaml["metadata"]["name"],
)
self.assertEqual(worker_container["resources"]["limits"]["cpu"], 0.5)
self.assertEqual(
worker_container["resources"]["limits"]["memory"], 1024
)
| 39.65625
| 77
| 0.634095
|
f4bb6f5bada4498926d734b6d9dd9b69b6d4fca1
| 1,972
|
py
|
Python
|
xgboost/xgb.py
|
alvinx31/AI-Pool
|
9f01ddff1b3a6cbfb1648d75f4c3706c424c9930
|
[
"MIT"
] | null | null | null |
xgboost/xgb.py
|
alvinx31/AI-Pool
|
9f01ddff1b3a6cbfb1648d75f4c3706c424c9930
|
[
"MIT"
] | null | null | null |
xgboost/xgb.py
|
alvinx31/AI-Pool
|
9f01ddff1b3a6cbfb1648d75f4c3706c424c9930
|
[
"MIT"
] | null | null | null |
#coding=utf-8
"""
Created on 2016/09/17
By 我曾经被山河大海跨过
"""
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.cross_validation import train_test_split
#记录程序运行时间
import time
start_time = time.time()
#读入数据
train = pd.read_csv("train.csv")
tests = pd.read_csv("test.csv")
params={
'booster':'gbtree',
'objective': 'multi:softmax', #多分类的问题
'num_class':10, # 类别数,与 multisoftmax 并用
'gamma':0.1, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
'max_depth':12, # 构建树的深度,越大越容易过拟合
'lambda':2, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
'subsample':0.7, # 随机采样训练样本
'colsample_bytree':0.7, # 生成树时进行的列采样
'min_child_weight':3,
# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
#,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
#这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
'silent':0 ,#设置成1则没有运行信息输出,最好是设置为0.
'eta': 0.007, # 如同学习率
'seed':1000,
'nthread':7,# cpu 线程数
#'eval_metric': 'auc'
}
plst = list(params.items())
num_rounds = 10 #5000 # 迭代次数
train_xy,val = train_test_split(train, test_size = 0.3,random_state=1)
#random_state is of big influence for val-auc
y = train_xy.label
X = train_xy.drop(['label'],axis=1)
val_y = val.label
val_X = val.drop(['label'],axis=1)
xgb_val = xgb.DMatrix(val_X,label=val_y)
xgb_train = xgb.DMatrix(X, label=y)
xgb_test = xgb.DMatrix(tests)
watchlist = [(xgb_train, 'train'),(xgb_val, 'val')]
# training model
# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
model = xgb.train(plst, xgb_train, num_rounds, watchlist,early_stopping_rounds=100)
model.save_model('./model/xgb.model') # 用于存储训练出的模型
print("best best_ntree_limit",model.best_ntree_limit)
print("跑到这里了model.predict")
preds = model.predict(xgb_test,ntree_limit=model.best_ntree_limit)
np.savetxt('xgb_submission.csv',np.c_[range(1,len(tests)+1),preds],delimiter=',',header='ImageId,Label',comments='',fmt='%d')
#输出运行时长
cost_time = time.time()-start_time
print("xgboost success!",'\n',"cost time:",cost_time,"(s)")
| 27.774648
| 125
| 0.742394
|
32473c66349d7b0b578232016d303fe2db36652f
| 1,158
|
py
|
Python
|
robot_util.py
|
bmorrison4/LED-bot-RMR
|
d8e5abed39f55682b0107ffafcbced5c78be1820
|
[
"Apache-2.0"
] | null | null | null |
robot_util.py
|
bmorrison4/LED-bot-RMR
|
d8e5abed39f55682b0107ffafcbced5c78be1820
|
[
"Apache-2.0"
] | null | null | null |
robot_util.py
|
bmorrison4/LED-bot-RMR
|
d8e5abed39f55682b0107ffafcbced5c78be1820
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import traceback
import ssl
import urllib2
import getpass
import json
ConfigFilename = "/home/pi/config_" + getpass.getuser() + ".json"
def getWithRetry(url, secure=True):
for retryNumber in range(2000):
try:
print "GET", url
if secure:
response = urllib2.urlopen(url).read()
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
response = urllib2.urlopen(url, context=ctx).read()
break
except:
print "could not open url", url
traceback.print_exc()
time.sleep(2)
return response
def sendSerialCommand(ser, command):
print(ser.name) # check which port was really used
ser.nonblocking()
# loop to collect input
#s = "f"
#print "string:", s
print str(command.lower())
ser.write(command.lower().encode("utf8") + "\r\n") # write a string
#ser.write(s)
ser.flush()
#while ser.in_waiting > 0:
# print "read:", ser.read()
#ser.close()
| 21.444444
| 71
| 0.570812
|
431c9a6ede9bfb15de308f98109b2a0d42a56ef8
| 5,481
|
py
|
Python
|
virtual/Lib/site-packages/dash_html_components/Output.py
|
LeonZly90/LeonZly90
|
935a658814632beca84cab0af6c048dd762f8c56
|
[
"MIT"
] | 2
|
2021-07-18T11:39:56.000Z
|
2021-11-06T17:13:05.000Z
|
venv/Lib/site-packages/dash_html_components/Output.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | null | null | null |
venv/Lib/site-packages/dash_html_components/Output.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Output(Component):
"""An Output component.
Output is a wrapper for the <output> HTML5 element.
CAUTION: <output> is included for completeness, but its typical usage
requires the oninput attribute of the enclosing <form> element, which
is not accessible to Dash.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/output
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- form (string; optional):
Indicates the form that is the owner of the element.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- htmlFor (string; optional):
Describes elements which belongs to this one.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- name (string; optional):
Name of the element. For example used by the server to identify
the fields in form submits.
- role (string; optional):
The ARIA role attribute.
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, htmlFor=Component.UNDEFINED, form=Component.UNDEFINED, name=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'form', 'hidden', 'htmlFor', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'name', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self._type = 'Output'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'form', 'hidden', 'htmlFor', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'name', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Output, self).__init__(children=children, **args)
| 41.839695
| 667
| 0.710089
|
2dcc34751c72483e3921d35c0a6b137fdb07f0c8
| 6,760
|
py
|
Python
|
sdk/python/pulumi_oci/identity/get_identity_providers.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/identity/get_identity_providers.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/identity/get_identity_providers.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetIdentityProvidersResult',
'AwaitableGetIdentityProvidersResult',
'get_identity_providers',
]
@pulumi.output_type
class GetIdentityProvidersResult:
"""
A collection of values returned by getIdentityProviders.
"""
def __init__(__self__, compartment_id=None, filters=None, id=None, identity_providers=None, name=None, protocol=None, state=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity_providers and not isinstance(identity_providers, list):
raise TypeError("Expected argument 'identity_providers' to be a list")
pulumi.set(__self__, "identity_providers", identity_providers)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID of the tenancy containing the `IdentityProvider`.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetIdentityProvidersFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="identityProviders")
def identity_providers(self) -> Sequence['outputs.GetIdentityProvidersIdentityProviderResult']:
"""
The list of identity_providers.
"""
return pulumi.get(self, "identity_providers")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name you assign to the `IdentityProvider` during creation. The name must be unique across all `IdentityProvider` objects in the tenancy and cannot be changed. This is the name federated users see when choosing which identity provider to use when signing in to the Oracle Cloud Infrastructure Console.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The protocol used for federation. Allowed value: `SAML2`. Example: `SAML2`
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The current state.
"""
return pulumi.get(self, "state")
class AwaitableGetIdentityProvidersResult(GetIdentityProvidersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIdentityProvidersResult(
compartment_id=self.compartment_id,
filters=self.filters,
id=self.id,
identity_providers=self.identity_providers,
name=self.name,
protocol=self.protocol,
state=self.state)
def get_identity_providers(compartment_id: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetIdentityProvidersFilterArgs']]] = None,
name: Optional[str] = None,
protocol: Optional[str] = None,
state: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIdentityProvidersResult:
"""
This data source provides the list of Identity Providers in Oracle Cloud Infrastructure Identity service.
Lists all the identity providers in your tenancy. You must specify the identity provider type (e.g., `SAML2` for
identity providers using the SAML2.0 protocol). You must specify your tenancy's OCID as the value for the
compartment ID (remember that the tenancy is simply the root compartment).
See [Where to Get the Tenancy's OCID and User's OCID](https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#five).
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_identity_providers = oci.identity.get_identity_providers(compartment_id=var["tenancy_ocid"],
protocol=var["identity_provider_protocol"],
name=var["identity_provider_name"],
state=var["identity_provider_state"])
```
:param str compartment_id: The OCID of the compartment (remember that the tenancy is simply the root compartment).
:param str name: A filter to only return resources that match the given name exactly.
:param str protocol: The protocol used for federation.
:param str state: A filter to only return resources that match the given lifecycle state. The state value is case-insensitive.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['filters'] = filters
__args__['name'] = name
__args__['protocol'] = protocol
__args__['state'] = state
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getIdentityProviders:getIdentityProviders', __args__, opts=opts, typ=GetIdentityProvidersResult).value
return AwaitableGetIdentityProvidersResult(
compartment_id=__ret__.compartment_id,
filters=__ret__.filters,
id=__ret__.id,
identity_providers=__ret__.identity_providers,
name=__ret__.name,
protocol=__ret__.protocol,
state=__ret__.state)
| 40.238095
| 312
| 0.671746
|
0fbced832a946f061c872027eaf01a91f3566dfb
| 7,748
|
py
|
Python
|
mitmproxy/tools/console/defaultkeys.py
|
developeric/mitmproxy
|
c996f1ee74a367424b956a77c3e2c7bfc9c86da5
|
[
"MIT"
] | 9
|
2021-12-19T13:47:10.000Z
|
2022-03-26T06:34:02.000Z
|
mitmproxy/tools/console/defaultkeys.py
|
PeterDaveHello/mitmproxy
|
4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb
|
[
"MIT"
] | null | null | null |
mitmproxy/tools/console/defaultkeys.py
|
PeterDaveHello/mitmproxy
|
4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb
|
[
"MIT"
] | 3
|
2021-12-20T08:21:47.000Z
|
2022-03-29T17:55:12.000Z
|
def map(km):
km.add(":", "console.command ", ["commonkey", "global"], "Command prompt")
km.add("?", "console.view.help", ["global"], "View help")
km.add("B", "browser.start", ["global"], "Start an attached browser")
km.add("C", "console.view.commands", ["global"], "View commands")
km.add("K", "console.view.keybindings", ["global"], "View key bindings")
km.add("O", "console.view.options", ["commonkey", "global"], "View options")
km.add("E", "console.view.eventlog", ["commonkey", "global"], "View event log")
km.add("Q", "console.exit", ["global"], "Exit immediately")
km.add("q", "console.view.pop", ["commonkey", "global"], "Exit the current view")
km.add("-", "console.layout.cycle", ["global"], "Cycle to next layout")
km.add("shift tab", "console.panes.next", ["global"], "Focus next layout pane")
km.add("ctrl right", "console.panes.next", ["global"], "Focus next layout pane")
km.add("P", "console.view.flow @focus", ["global"], "View flow details")
km.add("?", "console.view.pop", ["help"], "Exit help")
km.add("g", "console.nav.start", ["global"], "Go to start")
km.add("G", "console.nav.end", ["global"], "Go to end")
km.add("k", "console.nav.up", ["global"], "Up")
km.add("j", "console.nav.down", ["global"], "Down")
km.add("l", "console.nav.right", ["global"], "Right")
km.add("h", "console.nav.left", ["global"], "Left")
km.add("tab", "console.nav.next", ["commonkey", "global"], "Next")
km.add("enter", "console.nav.select", ["commonkey", "global"], "Select")
km.add("space", "console.nav.pagedown", ["global"], "Page down")
km.add("ctrl f", "console.nav.pagedown", ["global"], "Page down")
km.add("ctrl b", "console.nav.pageup", ["global"], "Page up")
km.add("I", "set intercept_active toggle", ["global"], "Toggle whether the filtering via the intercept option is enabled")
km.add("i", "console.command.set intercept", ["global"], "Set intercept")
km.add("W", "console.command.set save_stream_file", ["global"], "Stream to file")
km.add("A", "flow.resume @all", ["flowlist", "flowview"], "Resume all intercepted flows")
km.add("a", "flow.resume @focus", ["flowlist", "flowview"], "Resume this intercepted flow")
km.add(
"b", "console.command cut.save @focus response.content ",
["flowlist", "flowview"],
"Save response body to file"
)
km.add("d", "view.flows.remove @focus", ["flowlist", "flowview"], "Delete flow from view")
km.add("D", "view.flows.duplicate @focus", ["flowlist", "flowview"], "Duplicate flow")
km.add(
"e",
"""
console.choose.cmd Format export.formats
console.command export.file {choice} @focus
""",
["flowlist", "flowview"],
"Export this flow to file"
)
km.add("f", "console.command.set view_filter", ["flowlist"], "Set view filter")
km.add("F", "set console_focus_follow toggle", ["flowlist"], "Set focus follow")
km.add(
"ctrl l",
"console.command cut.clip ",
["flowlist", "flowview"],
"Send cuts to clipboard"
)
km.add("L", "console.command view.flows.load ", ["flowlist"], "Load flows from file")
km.add("m", "flow.mark.toggle @focus", ["flowlist"], "Toggle mark on this flow")
km.add("M", "view.properties.marked.toggle", ["flowlist"], "Toggle viewing marked flows")
km.add(
"n",
"console.command view.flows.create get https://example.com/",
["flowlist"],
"Create a new flow"
)
km.add(
"o",
"""
console.choose.cmd Order view.order.options
set view_order {choice}
""",
["flowlist"],
"Set flow list order"
)
km.add("r", "replay.client @focus", ["flowlist", "flowview"], "Replay this flow")
km.add("S", "console.command replay.server ", ["flowlist"], "Start server replay")
km.add("v", "set view_order_reversed toggle", ["flowlist"], "Reverse flow list order")
km.add("U", "flow.mark @all false", ["flowlist"], "Un-set all marks")
km.add("w", "console.command save.file @shown ", ["flowlist"], "Save listed flows to file")
km.add("V", "flow.revert @focus", ["flowlist", "flowview"], "Revert changes to this flow")
km.add("X", "flow.kill @focus", ["flowlist"], "Kill this flow")
km.add("z", "view.flows.remove @all", ["flowlist"], "Clear flow list")
km.add("Z", "view.flows.remove @hidden", ["flowlist"], "Purge all flows not showing")
km.add(
"|",
"console.command script.run @focus ",
["flowlist", "flowview"],
"Run a script on this flow"
)
km.add(
"e",
"""
console.choose.cmd Part console.edit.focus.options
console.edit.focus {choice}
""",
["flowview"],
"Edit a flow component"
)
km.add(
"f",
"view.settings.setval.toggle @focus fullcontents",
["flowview"],
"Toggle viewing full contents on this flow",
)
km.add("w", "console.command save.file @focus ", ["flowview"], "Save flow to file")
km.add("space", "view.focus.next", ["flowview"], "Go to next flow")
km.add(
"v",
"""
console.choose "View Part" request,response
console.bodyview @focus {choice}
""",
["flowview"],
"View flow body in an external viewer"
)
km.add("p", "view.focus.prev", ["flowview"], "Go to previous flow")
km.add(
"m",
"""
console.choose.cmd Mode console.flowview.mode.options
console.flowview.mode.set {choice}
""",
["flowview"],
"Set flow view mode"
)
km.add(
"z",
"""
console.choose "Part" request,response
flow.encode.toggle @focus {choice}
""",
["flowview"],
"Encode/decode flow body"
)
km.add("L", "console.command options.load ", ["options"], "Load from file")
km.add("S", "console.command options.save ", ["options"], "Save to file")
km.add("D", "options.reset", ["options"], "Reset all options")
km.add("d", "console.options.reset.focus", ["options"], "Reset this option")
km.add("a", "console.grideditor.add", ["grideditor"], "Add a row after cursor")
km.add("A", "console.grideditor.insert", ["grideditor"], "Insert a row before cursor")
km.add("d", "console.grideditor.delete", ["grideditor"], "Delete this row")
km.add(
"r",
"console.command console.grideditor.load",
["grideditor"],
"Read unescaped data into the current cell from file"
)
km.add(
"R",
"console.command console.grideditor.load_escaped",
["grideditor"],
"Load a Python-style escaped string into the current cell from file"
)
km.add("e", "console.grideditor.editor", ["grideditor"], "Edit in external editor")
km.add(
"w",
"console.command console.grideditor.save ",
["grideditor"],
"Save data to file as CSV"
)
km.add("z", "eventstore.clear", ["eventlog"], "Clear")
km.add(
"a",
"""
console.choose.cmd "Context" console.key.contexts
console.command console.key.bind {choice}
""",
["keybindings"],
"Add a key binding"
)
km.add(
"d",
"console.key.unbind.focus",
["keybindings"],
"Unbind the currently focused key binding"
)
km.add(
"x",
"console.key.execute.focus",
["keybindings"],
"Execute the currently focused key binding"
)
km.add(
"enter",
"console.key.edit.focus",
["keybindings"],
"Edit the currently focused key binding"
)
| 39.329949
| 126
| 0.572019
|
232b9be87de6124f571d4e0a6d346eae0524f5e3
| 12,989
|
py
|
Python
|
lib/saq/constants.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
lib/saq/constants.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
lib/saq/constants.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
# vim: sw=4:ts=4:et
__all__ = [
'INSTANCE_TYPE_PRODUCTION',
'INSTANCE_TYPE_QA',
'INSTANCE_TYPE_DEV',
'F_UUID',
'F_ID',
'F_TOOL',
'F_TOOL_INSTANCE',
'F_TYPE',
'F_DESCRIPTION',
'F_EVENT_TIME',
'F_DETAILS',
'F_CIDR',
'F_IPV4',
'F_IPV4_CONVERSATION',
'F_FQDN',
'F_HTTP_REQUEST',
'F_HOSTNAME',
'F_ASSET',
'F_USER',
'F_URL',
'F_PCAP',
'F_FILE',
'F_SUSPECT_FILE', # DEPRECATED
'F_FILE_PATH',
'F_FILE_NAME',
'F_FILE_LOCATION',
'F_EMAIL_ADDRESS',
'F_EMAIL_CONVERSATION',
'F_YARA',
'F_YARA_RULE',
'F_INDICATOR',
'F_MD5',
'F_SHA1',
'F_SHA256',
'F_SNORT_SIGNATURE',
'F_MESSAGE_ID',
'F_DISPOSITION',
'F_PROCESS_GUID',
'event_time_format',
'OBSERVABLE_DESCRIPTIONS',
'OBSERVABLE_NODE_COLORS',
'VALID_OBSERVABLE_TYPES',
'VALID_ALERT_DISPOSITIONS',
'IGNORE_ALERT_DISPOSITIONS',
'BENIGN_ALERT_DISPOSITIONS',
'MAL_ALERT_DISPOSITIONS',
'parse_ipv4_conversation',
'create_ipv4_conversation',
'parse_email_conversation',
'create_email_conversation',
'parse_file_location',
'create_file_location',
'DISPOSITION_FALSE_POSITIVE',
'DISPOSITION_IGNORE',
'DISPOSITION_UNKNOWN',
'DISPOSITION_REVIEWED',
'DISPOSITION_GRAYWARE',
'DISPOSITION_POLICY_VIOLATION',
'DISPOSITION_RECONNAISSANCE',
'DISPOSITION_WEAPONIZATION',
'DISPOSITION_DELIVERY',
'DISPOSITION_EXPLOITATION',
'DISPOSITION_INSTALLATION',
'DISPOSITION_COMMAND_AND_CONTROL',
'DISPOSITION_EXFIL',
'DISPOSITION_DAMAGE',
'DISPOSITION_CSS_MAPPING',
'DIRECTIVE_ARCHIVE',
'DIRECTIVE_COLLECT_FILE',
'DIRECTIVE_CRAWL',
'DIRECTIVE_FORCE_DOWNLOAD',
'DIRECTIVE_EXTRACT_URLS',
'DIRECTIVE_SANDBOX',
'DIRECTIVE_ORIGINAL_EMAIL',
'DIRECTIVE_NO_SCAN',
'VALID_DIRECTIVES',
'is_valid_directive',
'TAG_LEVEL_FALSE_POSITIVE',
'TAG_LEVEL_INFO',
'TAG_LEVEL_WARNING',
'TAG_LEVEL_ALERT',
'TAG_LEVEL_CRITICAL',
'EVENT_TAG_ADDED',
'EVENT_OBSERVABLE_ADDED',
'EVENT_DETAILS_UPDATED',
'EVENT_DIRECTIVE_ADDED',
'EVENT_ANALYSIS_ADDED',
'EVENT_DETECTION_ADDED',
'EVENT_ANALYSIS_MARKED_COMPLETED',
'EVENT_GLOBAL_TAG_ADDED',
'EVENT_GLOBAL_OBSERVABLE_ADDED',
'EVENT_GLOBAL_ANALYSIS_ADDED',
'VALID_EVENTS',
'ACTION_TAG_OBSERVABLE',
'ACTION_UPLOAD_TO_CRITS',
'ACTION_FILE_DOWNLOAD',
'ACTION_FILE_DOWNLOAD_AS_ZIP',
'ACTION_FILE_VIEW_AS_HEX',
'ACTION_FILE_VIEW_AS_TEXT',
'ACTION_FILE_UPLOAD_VT',
'ACTION_FILE_UPLOAD_VX',
'ACTION_FILE_VIEW_VT',
'ACTION_FILE_VIEW_VX',
'ACTION_COLLECT_FILE',
'METRIC_THREAD_COUNT',
]
#
# instance types
#
INSTANCE_TYPE_PRODUCTION = 'PRODUCTION'
INSTANCE_TYPE_QA = 'QA'
INSTANCE_TYPE_DEV = 'DEV'
#
# required fields for every alert
#
F_UUID = 'uuid'
F_ID = 'id'
F_TOOL = 'tool'
F_TOOL_INSTANCE = 'tool_instance'
F_TYPE = 'type'
F_DESCRIPTION = 'description'
F_EVENT_TIME = 'event_time'
F_DETAILS = 'details'
F_DISPOSITION = 'disposition'
#F_COMMENTS = 'comments'
#
# observable types
#
#
# WARNING
# XXX NOTE
# when you add a new observable type you ALSO need to edit lib/saq/analysis.py
# and add a matching entry to the _OBSERVABLE_TYPE_MAPPING dictionary
F_CIDR = 'cidr'
F_IPV4 = 'ipv4'
F_IPV4_CONVERSATION = 'ipv4_conversation'
F_FQDN = 'fqdn'
F_HOSTNAME = 'hostname'
F_HTTP_REQUEST = 'http_request'
F_ASSET = 'asset'
F_USER = 'user'
F_URL = 'url'
F_PCAP = 'pcap'
F_FILE = 'file'
F_SUSPECT_FILE = 'suspect_file' # DEPRECATED
F_FILE_PATH = 'file_path'
F_FILE_NAME = 'file_name'
F_FILE_LOCATION = 'file_location'
F_EMAIL_ADDRESS = 'email_address'
F_EMAIL_CONVERSATION = 'email_conversation'
F_YARA = 'yara'
F_YARA_RULE = 'yara_rule'
F_INDICATOR = 'indicator'
F_MD5 = 'md5'
F_SHA1 = 'sha1'
F_SHA256 = 'sha256'
F_SNORT_SIGNATURE = 'snort_sig'
F_MESSAGE_ID = 'message_id'
F_PROCESS_GUID = 'process_guid'
OBSERVABLE_DESCRIPTIONS = {
F_CIDR: 'IPv4 range in CIDR notation',
F_IPV4: 'IP address (version 4)',
F_IPV4_CONVERSATION: 'two F_IPV4 that were communicating formatted as aaa.bbb.ccc.ddd_aaa.bbb.ccc.ddd',
F_FQDN: 'fully qualified domain name',
F_HOSTNAME: 'host or workstation name',
F_HTTP_REQUEST: 'a single HTTP request',
F_ASSET: 'a F_IPV4 identified to be a managed asset',
F_USER: 'an NT user ID identified to have used a given asset in the given period of time',
F_URL: 'a URL',
F_PCAP: 'path to a pcap formatted file *** DEPRECATED (use F_FILE instead)',
F_FILE: 'path to an attached file',
F_SUSPECT_FILE: 'path to an attached file that might be malicious *** DEPRECATED (use directives instead)',
F_FILE_PATH: 'a file path',
F_FILE_NAME: 'a file name (no directory path)',
F_FILE_LOCATION: 'the location of file with format hostname@full_path',
F_EMAIL_ADDRESS: 'email address',
F_EMAIL_CONVERSATION: 'a conversation between a source email address (MAIL FROM) and a destination email address (RCPT TO)',
F_YARA: 'yara scan result *** DEPRECATED (use F_YARA_RULE instead)',
F_YARA_RULE: 'yara rule name',
F_INDICATOR: 'crits indicator object id',
F_MD5: 'MD5 hash',
F_SHA1: 'SHA1 hash',
F_SHA256: 'SHA256 hash',
F_SNORT_SIGNATURE: 'snort signature ID',
F_MESSAGE_ID: 'email Message-ID',
F_PROCESS_GUID: 'CarbonBlack global process identifier'
}
# this is used in vis.js in the GUI
# see http://www.rapidtables.com/web/color/RGB_Color.htm
OBSERVABLE_NODE_COLORS = {
F_CIDR: "#0000FF", # blue
F_IPV4 : "#0000FF", # blue
F_IPV4_CONVERSATION : "#0000FF", # blue
F_FQDN : "#D2691E", # chocolate
F_HOSTNAME : "#87CEFA", # light sky blue
F_HTTP_REQUEST : "#87CEFA", # light sky blue
F_ASSET : "#FDF5E6", # old lace
F_USER : "#DDA0DD", # plum
F_URL : "#F5F5DC", # beige
F_PCAP : "#B0C4DE", # light steel blue
F_FILE : "#9ACD32", # yellow green
F_SUSPECT_FILE : "#9ACD32", # yellow green
F_FILE_PATH : "#A9DC23", # ???
F_FILE_NAME : "#A9DC23", # ???
F_FILE_LOCATION : "#A9DC23", # ???
F_EMAIL_ADDRESS : "#00CED1", # dark turquoise
F_EMAIL_CONVERSATION : "#00CED1", # dark turquoise
F_YARA : '#B22222', # firebrick
F_YARA_RULE : '#B22222', # firebrick
F_INDICATOR : "#F5F5F5", # white smoke
F_MD5 : "#E6E6FA", # lavender
F_SHA1 : "#E6E6FA", # lavender
F_SHA256 : "#E6E6FA", # lavender
F_MESSAGE_ID : "#E6E6FA", # lavender
F_PROCESS_GUID : "#E6E6FA", # lavender
}
VALID_OBSERVABLE_TYPES = sorted([
F_CIDR,
F_IPV4,
F_IPV4_CONVERSATION,
F_FQDN,
F_HOSTNAME,
F_HTTP_REQUEST,
F_ASSET,
F_USER,
F_URL,
F_PCAP,
F_FILE,
F_SUSPECT_FILE,
F_FILE_PATH,
F_FILE_NAME,
F_FILE_LOCATION,
F_EMAIL_ADDRESS,
F_EMAIL_CONVERSATION,
F_YARA,
F_YARA_RULE,
F_INDICATOR,
F_MD5,
F_SHA1,
F_SHA256,
F_SNORT_SIGNATURE,
F_MESSAGE_ID,
F_PROCESS_GUID,
])
# utility functions to work with F_IPV4_CONVERSATION types
def parse_ipv4_conversation(f_ipv4_c):
return f_ipv4_c.split('_', 2)
def create_ipv4_conversation(src, dst):
return '{0}_{1}'.format(src, dst)
# utility functions to work with F_EMAIL_CONVERSATION types
def parse_email_conversation(f_ipv4_c):
result = f_ipv4_c.split('|', 2)
# did parsing fail?
if len(result) != 2:
return f_ipv4_c, ''
return result
def create_email_conversation(mail_from, rcpt_to):
return '{0}|{1}'.format(mail_from, rcpt_to)
def parse_file_location(file_location):
return file_location.split('@', 1)
def create_file_location(hostname, full_path):
return '{}@{}'.format(hostname, full_path)
# the expected format of the event_time of an alert
event_time_format = '%Y-%m-%d %H:%M:%S'
# alert dispositions
DISPOSITION_FALSE_POSITIVE = 'FALSE_POSITIVE'
DISPOSITION_IGNORE = 'IGNORE'
DISPOSITION_UNKNOWN = 'UNKNOWN'
DISPOSITION_REVIEWED = 'REVIEWED'
DISPOSITION_GRAYWARE = 'GRAYWARE'
DISPOSITION_POLICY_VIOLATION = 'POLICY_VIOLATION'
DISPOSITION_RECONNAISSANCE = 'RECONNAISSANCE'
DISPOSITION_WEAPONIZATION = 'WEAPONIZATION'
DISPOSITION_DELIVERY = 'DELIVERY'
DISPOSITION_EXPLOITATION = 'EXPLOITATION'
DISPOSITION_INSTALLATION = 'INSTALLATION'
DISPOSITION_COMMAND_AND_CONTROL = 'COMMAND_AND_CONTROL'
DISPOSITION_EXFIL = 'EXFIL'
DISPOSITION_DAMAGE = 'DAMAGE'
# disposition to label mapping
# each disposition has a specific CSS class assigned to it
DISPOSITION_CSS_MAPPING = {
None: 'default', # when no disposition has been set yet
DISPOSITION_FALSE_POSITIVE: 'success',
DISPOSITION_IGNORE: 'default',
DISPOSITION_UNKNOWN: 'info',
DISPOSITION_REVIEWED: 'info',
DISPOSITION_GRAYWARE: 'info',
DISPOSITION_POLICY_VIOLATION: 'warning',
DISPOSITION_RECONNAISSANCE: 'warning',
DISPOSITION_WEAPONIZATION: 'danger',
DISPOSITION_DELIVERY: 'danger',
DISPOSITION_EXPLOITATION: 'danger',
DISPOSITION_INSTALLATION: 'danger',
DISPOSITION_COMMAND_AND_CONTROL: 'danger',
DISPOSITION_EXFIL: 'danger',
DISPOSITION_DAMAGE: 'danger',
}
VALID_ALERT_DISPOSITIONS = [
DISPOSITION_FALSE_POSITIVE,
DISPOSITION_IGNORE,
DISPOSITION_UNKNOWN,
DISPOSITION_REVIEWED,
DISPOSITION_GRAYWARE,
DISPOSITION_POLICY_VIOLATION,
DISPOSITION_RECONNAISSANCE,
DISPOSITION_WEAPONIZATION,
DISPOSITION_DELIVERY,
DISPOSITION_EXPLOITATION,
DISPOSITION_INSTALLATION,
DISPOSITION_COMMAND_AND_CONTROL,
DISPOSITION_EXFIL,
DISPOSITION_DAMAGE
]
IGNORE_ALERT_DISPOSITIONS = [
DISPOSITION_IGNORE,
DISPOSITION_UNKNOWN,
DISPOSITION_REVIEWED
]
BENIGN_ALERT_DISPOSITIONS = [
DISPOSITION_FALSE_POSITIVE,
DISPOSITION_GRAYWARE,
DISPOSITION_POLICY_VIOLATION,
DISPOSITION_RECONNAISSANCE
]
MAL_ALERT_DISPOSITIONS = [
DISPOSITION_WEAPONIZATION,
DISPOSITION_DELIVERY,
DISPOSITION_EXPLOITATION,
DISPOSITION_INSTALLATION,
DISPOSITION_COMMAND_AND_CONTROL,
DISPOSITION_EXFIL,
DISPOSITION_DAMAGE
]
# --- DIRECTIVES
# archive the file
DIRECTIVE_ARCHIVE = 'archive'
# collect the file from the remote endpoint
DIRECTIVE_COLLECT_FILE = 'collect_file'
# crawl the url
DIRECTIVE_CRAWL = 'crawl'
# download the content of the URL no matter what
DIRECTIVE_FORCE_DOWNLOAD = 'force_download'
# extract URLs from the given file
DIRECTIVE_EXTRACT_URLS = 'extract_urls'
# run the observable through a sandbox
DIRECTIVE_SANDBOX = 'sandbox'
# treat this file as the original email file
DIRECTIVE_ORIGINAL_EMAIL = 'original_email'
# do not scan this file with yara
DIRECTIVE_NO_SCAN = 'no_scan'
VALID_DIRECTIVES = [
DIRECTIVE_COLLECT_FILE,
DIRECTIVE_CRAWL,
DIRECTIVE_EXTRACT_URLS,
DIRECTIVE_ORIGINAL_EMAIL,
DIRECTIVE_SANDBOX,
DIRECTIVE_FORCE_DOWNLOAD,
]
def is_valid_directive(directive):
return directive in VALID_DIRECTIVES
# --- TAGS
TAG_LEVEL_FALSE_POSITIVE = 'fp'
TAG_LEVEL_INFO = 'info'
TAG_LEVEL_WARNING = 'warning'
TAG_LEVEL_ALERT = 'alert'
TAG_LEVEL_CRITICAL = 'critical'
# --- EVENTS
# fired when we add a tag to something
EVENT_TAG_ADDED = 'tag_added'
# called when an Observable is added to the Analysis
EVENT_OBSERVABLE_ADDED = 'observable_added'
# called when the details of an Analysis have been updated
EVENT_DETAILS_UPDATED = 'details_updated'
# fired when we add a directive to an Observable
EVENT_DIRECTIVE_ADDED = 'directive_added'
# fired when we add an Analysis to an Observable
EVENT_ANALYSIS_ADDED = 'analysis_added'
# fired when we add a DetectionPoint ot an Analysis or Observable
EVENT_DETECTION_ADDED = 'detection_added'
# fired when an analysis is marked as completed manually
EVENT_ANALYSIS_MARKED_COMPLETED = 'analysis_marked_completed'
# these next two events are intended to be used with the RootAnalysis object
# fired when we add a tag to any taggable object
EVENT_GLOBAL_TAG_ADDED = 'global_tag_added'
# fired when we add an observable to any analysis object
EVENT_GLOBAL_OBSERVABLE_ADDED = 'global_observable_added'
# fired when we add an analysis to any observable object
EVENT_GLOBAL_ANALYSIS_ADDED = 'global_analysis_added'
# list of all valid events
VALID_EVENTS = [
EVENT_ANALYSIS_MARKED_COMPLETED,
EVENT_TAG_ADDED,
EVENT_OBSERVABLE_ADDED,
EVENT_ANALYSIS_ADDED,
EVENT_DETECTION_ADDED,
EVENT_DIRECTIVE_ADDED,
EVENT_DETAILS_UPDATED,
EVENT_GLOBAL_TAG_ADDED,
EVENT_GLOBAL_OBSERVABLE_ADDED,
EVENT_GLOBAL_ANALYSIS_ADDED ]
# available actions for observables
ACTION_TAG_OBSERVABLE = 'tag_observable'
ACTION_UPLOAD_TO_CRITS = 'upload_crits'
ACTION_FILE_DOWNLOAD = 'file_download'
ACTION_FILE_DOWNLOAD_AS_ZIP = 'file_download_as_zip'
ACTION_FILE_VIEW_AS_HEX = 'file_view_as_hex'
ACTION_FILE_VIEW_AS_TEXT = 'file_view_as_text'
ACTION_FILE_UPLOAD_VT = 'file_upload_vt'
ACTION_FILE_UPLOAD_VX = 'file_upload_vx'
ACTION_FILE_VIEW_VT = 'file_view_vt'
ACTION_FILE_VIEW_VX = 'file_view_vx'
ACTION_COLLECT_FILE = 'collect_file'
# recorded metrics
METRIC_THREAD_COUNT = 'thread_count'
| 28.610132
| 128
| 0.732928
|
d63cd250429cfb78cd33fd33c12b653a66499eec
| 18,356
|
py
|
Python
|
src/alsc/ar-tnet/layer_no_split_lstm_after_cpt_first_and_last_hidden_reshape_concat_GraphSAGE.py
|
mainuliitkgp/AR-BERT
|
d6d5e8542a3a1c76edac49cec9e99ebda6395725
|
[
"MIT"
] | 4
|
2022-03-06T17:41:57.000Z
|
2022-03-22T08:42:58.000Z
|
src/alsc/ar-tnet/layer_no_split_lstm_after_cpt_first_and_last_hidden_reshape_concat_GraphSAGE.py
|
mainuliitkgp/AR-BERT
|
d6d5e8542a3a1c76edac49cec9e99ebda6395725
|
[
"MIT"
] | null | null | null |
src/alsc/ar-tnet/layer_no_split_lstm_after_cpt_first_and_last_hidden_reshape_concat_GraphSAGE.py
|
mainuliitkgp/AR-BERT
|
d6d5e8542a3a1c76edac49cec9e99ebda6395725
|
[
"MIT"
] | 1
|
2022-03-19T14:04:42.000Z
|
2022-03-19T14:04:42.000Z
|
# -*- coding: utf-8 -*-
import os
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal import pool
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from nn_utils_sentence import *
import pickle
class LSTM:
def __init__(self, bs, n_in, n_out, name):
"""
:param bs: batch size
:param n_in: input size
:param n_out: hidden size
:param name: alias of layer
"""
self.bs = bs
self.n_in = n_in
self.n_out = n_out
self.name = name
# W shape: (n_in, 4*n_out)
# U shape: (n_out, 4*n_out)
# b shape: (4*n_out
self.W, self.U, self.b = lstm_init(n_in=self.n_in, n_out=self.n_out, component=name)
self.h0 = theano.shared(value=zeros(size=(self.bs, n_out)), name='h0')
self.c0 = theano.shared(value=zeros(size=(self.bs, n_out)), name='c0')
self.params = [self.W, self.U, self.b]
def __str__(self):
return "%s: LSTM(%s, %s)" % (self.name, self.n_in, self.n_out)
__repr__ = __str__
def __call__(self, x):
"""
:param x: input tensor, shape: (bs, seq_len, n_in)
:return: generated hidden states
"""
h0 = T.zeros_like(self.h0)
c0 = T.zeros_like(self.c0)
rnn_input = x.dimshuffle(1, 0, 2)
[H, _], _ = theano.scan(fn=self.recurrence, sequences=rnn_input, outputs_info=[h0, c0])
return H.dimshuffle(1, 0, 2)
def recurrence(self, xt, htm1, ctm1):
"""
:param xt: x[t] \in (bs, n_in)
:param htm1: h[t-1] \in (bs, n_out)
:param ctm1: c[t-1] \in (bs, n_out)
:return:
"""
Wx = T.dot(xt, self.W)
Uh = T.dot(htm1, self.U)
Sum_item = Wx + Uh + self.b
it = T.nnet.hard_sigmoid(Sum_item[:, :self.n_out])
ft = T.nnet.hard_sigmoid(Sum_item[:, self.n_out:2*self.n_out])
ct_tilde = T.tanh(Sum_item[:, 2*self.n_out:3*self.n_out])
ot = T.nnet.hard_sigmoid(Sum_item[:, 3*self.n_out:])
ct = ft * ctm1 + it * ct_tilde
ht = ot * T.tanh(ct)
return ht, ct
class Linear:
"""
fully connected layer
"""
def __init__(self, n_in, n_out, name, use_bias=True):
"""
:param n_in: input size
:param n_out: output size
:param name: layer name
:param use_bias: use bias or not
"""
self.n_in = n_in
self.n_out = n_out
self.name = name
self.use_bias = use_bias
# sample weight from uniform distribution [-INIT_RANGE, INIT_RANGE]
# initialize bias as zero vector
self.W = theano.shared(value=uniform(lb=-INIT_RANGE, ub=INIT_RANGE, size=(n_in, n_out)), name="%s_W" % name)
self.b = theano.shared(value=zeros(size=n_out), name="%s_b" % name)
self.params = [self.W]
if self.use_bias:
self.params.append(self.b)
def __str__(self):
return "%s: Linear(%s, %s)" % (self.name, self.n_in, self.n_out)
__repr__ = __str__
def __call__(self, x, bs=None):
"""
:param x: input tensor, shape: (bs, *, n_in)
:return:
"""
if bs is None:
output = T.dot(x, self.W)
else:
# current shape: (bs, n_in, n_out)
padded_W = T.tile(self.W, (bs, 1, 1))
# output shape: (bs, seq_len, n_out)
output = T.batched_dot(x, padded_W)
if self.use_bias:
output = output + self.b
return output
class CNN:
def __init__(self, bs, n_in, sent_len, kernel_size, n_filters, name):
"""
:param bs: batch size
:param n_in: input size
:param sent_len: sentence length
:param kernel_size: size of convolutional kernel
:param n_filters: number of filters
:param name: layer alias
"""
self.bs = bs
self.n_in = n_in
self.sent_len = sent_len
self.kernel_size = kernel_size
self.n_filters = n_filters
self.filter_shape = (self.n_filters, 1, self.kernel_size, self.n_in)
self.image_shape = (self.bs, 1, self.sent_len, self.n_in)
self.name = name
self.pool_size = (self.sent_len - self.kernel_size + 1, 1)
self.W = theano.shared(
value=uniform(lb=-INIT_RANGE, ub=INIT_RANGE, size=(self.n_filters, 1, self.kernel_size, self.n_in)),
name='%s_W' % self.name
)
self.b = theano.shared(value=zeros(size=self.n_filters), name='%s_b' % self.name)
self.params = [self.W, self.b]
def __str__(self):
return "%s: CNN(%s, %s, kernel_size=%s)" % (self.name, self.n_in, self.n_filters, self.kernel_size)
__repr__ = __str__
def __call__(self, x):
"""
:param x: input tensor, shape: (bs, seq_len, n_in)
:return: (1) features after pooling; (2) generated feature maps
"""
x = x.dimshuffle(0, 'x', 1, 2)
conv_out = T.nnet.conv2d(input=x, filters=self.W, filter_shape=self.filter_shape, input_shape=self.image_shape)
conv_out = T.nnet.relu(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# shape: (bs, n_filter, sent_len - kernel_size + 1)
feature_maps = conv_out.flatten(3)
# max pooling
conv_out_pool = pool.pool_2d(input=conv_out, ws=self.pool_size, mode='max', ignore_border=True).flatten(2)
return conv_out_pool, feature_maps
class Dropout:
def __init__(self, p):
self.p = p
self.retain_prob = 1 - p
def __str__(self):
return "Dropout(%s)" % (1.0 - self.retain_prob)
__repr__ = __str__
def __call__(self, x):
"""
:param x: input tensor
:return:
"""
rng = np.random.RandomState(1344)
srng = RandomStreams(rng.randint(999999))
mask = srng.binomial(size=x.shape, n=1, p=self.retain_prob, dtype='float32')
scaling_factor = 1.0 / (1.0 - self.p)
return x * mask
class CPT_AS:
# Context-Preserving Transformation with Adaptive-Scaling
def __init__(self, bs, sent_len, n_in, n_out, name):
self.bs = bs
self.sent_len = sent_len
self.n_in = n_in
self.n_out = n_out
self.name = name
self.fc_gate = Linear(n_in=self.n_in, n_out=self.n_out, name="Gate")
self.fc_trans = Linear(n_in=2*self.n_in, n_out=self.n_out, name="Trans")
# for model with highway transformation
self.layers = [self.fc_gate, self.fc_trans]
# for model without highway transformation
#self.layers = [self.fc_trans]
self.params = []
for layer in self.layers:
self.params.extend(layer.params)
def __str__(self):
des_str = 'CPT(%s, %s)' % (self.n_in, self.n_out)
for layer in self.layers:
des_str += ', %s' % layer
return des_str
__repr__ = __str__
def __call__(self, x, xt):
"""
:param x: input sentence, shape: (bs, sent_len, n_in)
:param xt: input target, shape: (bs, target_len, n_in)
:return:
"""
trans_gate = T.nnet.hard_sigmoid(self.fc_gate(x, bs=self.bs))
# (max_len, bs, n_in)
x_ = x.dimshuffle(1, 0, 2)
# (bs, n_in, target_len)
xt_ = xt.dimshuffle(0, 2, 1)
x_new = []
for i in range(self.sent_len):
# (bs, n_in)
xi = x_[i]
# shape: (bs, sent_len)
alphai = T.nnet.softmax(T.batched_dot(xt, xi.dimshuffle(0, 1, 'x')).flatten(2))
ti = T.batched_dot(xt_, alphai.dimshuffle(0, 1, 'x')).flatten(2)
xi_new = T.tanh(self.fc_trans(x=T.concatenate([xi, ti], axis=1)))
x_new.append(xi_new)
x_new = T.stack(x_new, axis=0).dimshuffle(1, 0, 2)
return trans_gate * x_new + (1.0 - trans_gate) * x
class CPT_LF:
# Context-Preserving Transformation with Lossless-Forwarding
def __init__(self, bs, sent_len, n_in, n_out, name):
self.bs = bs
self.sent_len = sent_len
self.n_in = n_in
self.n_out = n_out
self.name = name
self.fc_trans = Linear(n_in=2*self.n_in, n_out=self.n_out, name="Trans")
self.layers = [self.fc_trans]
self.params = []
for layer in self.layers:
self.params.extend(layer.params)
def __str__(self):
des_str = 'CPT(%s, %s)' % (self.n_in, self.n_out)
for layer in self.layers:
des_str += ', %s' % layer
return des_str
__repr__ = __str__
def __call__(self, x, xt):
"""
:param x: input sentence, shape: (bs, sent_len, n_in)
:param xt: input target, shape: (bs, target_len, n_in)
:return:
"""
# (max_len, bs, n_in)
x_ = x.dimshuffle(1, 0, 2)
# (bs, n_in, target_len)
xt_ = xt.dimshuffle(0, 2, 1)
x_new = []
for i in range(self.sent_len):
# (bs, n_in)
xi = x_[i]
# shape: (bs, sent_len)
alphai = T.nnet.softmax(T.batched_dot(xt, xi.dimshuffle(0, 1, 'x')).flatten(2))
ti = T.batched_dot(xt_, alphai.dimshuffle(0, 1, 'x')).flatten(2)
xi_new = T.nnet.relu(self.fc_trans(x=T.concatenate([xi, ti], axis=1)))
x_new.append(xi_new)
x_new = T.stack(x_new, axis=0).dimshuffle(1, 0, 2)
return x_new + x
class TNet:
"""
Transformation Networks for Target-Oriented Sentiment Analysis
"""
def __init__(self, args):
self.ds_name = args.ds_name # dataset name
self.connection_type = args.connection_type # connection type AS/LF
self.dropout_rate = args.dropout_rate # dropout rate 0.3
self.lr = args.lr
self.model_name = 'TNet'
self.model_dir = './models_no_split_lstm_after_cpt_first_and_last_hidden_reshape_concat_GraphSAGE/'+self.ds_name+'/'
self.saved_model_name = self.connection_type+'_'+str(args.n_epoch)+'_'+str(self.lr)
if args.ds_name != '14semeval_rest':
seed = 14890
else:
seed = 11456
print("Use seed %s..." % seed)
np.random.seed(seed)
self.bs = args.bs
self.n_in = args.dim_w
self.graphsage_dim = 50
self.n_rnn_out = args.dim_h
self.n_rnn_out_after_cpt = 25
self.kernels = args.kernels
self.embedding_weights = args.embeddings
self.n_filters = args.n_filter
self.n_y = args.dim_y
self.sent_len = args.sent_len
self.target_len = args.target_len
#assert len(self.kernels) == 1
# model component for ASTN
self.Words = theano.shared(value=np.array(self.embedding_weights, 'float32'), name="embedding")
self.Dropout_ctx = Dropout(p=0.3)
self.Dropout_tgt = Dropout(p=0.3)
self.Dropout = Dropout(p=self.dropout_rate)
self.LSTM_ctx = LSTM(bs=self.bs, n_in=self.n_in, n_out=self.n_rnn_out, name="CTX_LSTM")
self.LSTM_tgt = LSTM(bs=self.bs, n_in=self.n_in, n_out=self.n_rnn_out, name="TGT_LSTM")
self.LSTM_cpt = LSTM(bs=self.bs, n_in=((2*self.n_rnn_out)+self.graphsage_dim), n_out=self.n_rnn_out_after_cpt, name="CPT_LSTM")
if self.connection_type == 'AS':
self.CPT = CPT_AS(bs=self.bs, sent_len=self.sent_len, n_in=2*self.n_rnn_out, n_out=2*self.n_rnn_out, name="CPT")
else:
self.CPT = CPT_LF(bs=self.bs, sent_len=self.sent_len, n_in=2 * self.n_rnn_out, n_out=2 * self.n_rnn_out, name="CPT")
# convolutional layers; actually, we just use one kernel size in our model
self.Conv_layers = []
for i in range(len(self.kernels)):
self.Conv_layers.append(CNN(bs=self.bs, n_in=((2*self.n_rnn_out)+self.graphsage_dim), sent_len=self.sent_len,
kernel_size=self.kernels[i], n_filters=self.n_filters, name='Conv2D_%s' % i))
self.FC = Linear(n_in=((self.n_filters*len(self.kernels)) + (4*self.n_rnn_out_after_cpt)), n_out=self.n_y, name="LAST_FC")
# parameters for full model
self.layers = [self.LSTM_ctx, self.LSTM_tgt, self.CPT, self.FC]
self.layers.extend(self.Conv_layers)
self.params = []
for layer in self.layers:
self.params.extend(layer.params)
print(self.params)
self.build_model()
self.make_function()
def __str__(self):
strs = []
for layer in self.layers:
strs.append(str(layer))
return ', '.join(strs)
__repr__ = __str__
def build_model(self):
"""
build the computational graph of ASTN
:return:
"""
self.x = T.imatrix('wids')
self.xt = T.imatrix('wids_target')
self.y = T.ivector('label')
self.pw = T.fmatrix("position_weight")
self.graphsage_embedding = T.ftensor3("graphsage_embedding")
self.is_train = T.iscalar("is_training")
input = self.Words[T.cast(self.x.flatten(), 'int32')].reshape((self.bs, self.sent_len, self.n_in))
input_target = self.Words[T.cast(self.xt.flatten(), 'int32')].reshape((self.bs, self.target_len, self.n_in))
input = T.switch(T.eq(self.is_train, np.int32(1)), self.Dropout_ctx(input), input * (1 - self.dropout_rate))
input_target = T.switch(T.eq(self.is_train, np.int32(1)), self.Dropout_tgt(input_target), input_target * (1 - self.dropout_rate))
# model component for TNet
rnn_input = input
rnn_input_reverse = reverse_tensor(tensor=rnn_input)
rnn_input_target = input_target
rnn_input_target_reverse = reverse_tensor(tensor=rnn_input_target)
H0_forward = self.LSTM_ctx(x=rnn_input)
Ht_forward = self.LSTM_tgt(x=rnn_input_target)
H0_backward = reverse_tensor(tensor=self.LSTM_ctx(x=rnn_input_reverse))
Ht_backward = reverse_tensor(tensor=self.LSTM_tgt(x=rnn_input_target_reverse))
H0 = T.concatenate([H0_forward, H0_backward], axis=2)
Ht = T.concatenate([Ht_forward, Ht_backward], axis=2)
H1 = self.CPT(H0, Ht)
if self.pw is not None:
H1 = H1 * self.pw.dimshuffle(0, 1, 'x')
H2 = self.CPT(H1, Ht)
if self.pw is not None:
H2 = H2 * self.pw.dimshuffle(0, 1, 'x')
"""
H3 = self.CPT(H2, Ht)
if self.pw is not None:
H3 = H3 * self.pw.dimshuffle(0, 1, 'x')
H4 = self.CPT(H3, Ht)
if self.pw is not None:
H4 = H4 * self.pw.dimshuffle(0, 1, 'x')
H5 = self.CPT(H4, Ht)
if self.pw is not None:
H5 = H5 * self.pw.dimshuffle(0, 1, 'x')
"""
# concat CPT o/p and GraphSAGE target embedding
H2_concat = T.concatenate([H2, self.graphsage_embedding], axis = 2)
# lstm layer after CPT
# batch_size*max_sequence_length*2*hidden_size_after_cpt
H2_after_lstm_forward = self.LSTM_cpt(x=H2_concat)
H2_after_lstm_backward = reverse_tensor(tensor=self.LSTM_cpt(x=reverse_tensor(tensor=H2_concat)))
H2_after_lstm = T.concatenate([H2_after_lstm_forward, H2_after_lstm_backward], axis=2)
#concat first bi-lstm hidden and last bi-lstm hidden
H2_after_lstm_shape = T.shape(H2_after_lstm)
H2_after_lstm_first_hidden = H2_after_lstm[:, 0:1, :]
H2_after_lstm_first_hidden_reshape = T.reshape(H2_after_lstm_first_hidden, [T.shape(H2_after_lstm_first_hidden)[0], T.shape(H2_after_lstm_first_hidden)[1]*T.shape(H2_after_lstm_first_hidden)[2]])
H2_after_lstm_last_hidden = H2_after_lstm[:, H2_after_lstm_shape[1]-1:H2_after_lstm_shape[1], :]
H2_after_lstm_last_hidden_reshape = T.reshape(H2_after_lstm_last_hidden, [T.shape(H2_after_lstm_last_hidden)[0], T.shape(H2_after_lstm_last_hidden)[1]*T.shape(H2_after_lstm_last_hidden)[2]])
H2_after_lstm_concat = T.concatenate([H2_after_lstm_first_hidden_reshape, H2_after_lstm_last_hidden_reshape], axis = -1)
feat_and_feat_maps = [conv(H2_concat) for conv in self.Conv_layers]
feat = [ele[0] for ele in feat_and_feat_maps]
self.feature_maps = T.concatenate([ele[1] for ele in feat_and_feat_maps], axis=2)
self.feature_maps_argmax = T.squeeze(T.argmax(self.feature_maps.dimshuffle(0, 2, 1), axis = 1))
feat = T.concatenate(feat, axis=1)
# concat cnn o/p and lstm after cpt o/p
concat_hidden = T.concatenate([H2_after_lstm_concat, feat], axis = 1)
# we do not use the self-implemented Dropout class
feat_dropout = T.switch(T.eq(self.is_train, np.int32(1)), self.Dropout(concat_hidden), concat_hidden * (1 - self.dropout_rate))
# shape: (bs, n_y)
self.p_y_x = T.nnet.softmax(self.FC(feat_dropout))
# self.p_y_x = self.FC(feat_dropout)
self.loss = T.nnet.categorical_crossentropy(coding_dist=self.p_y_x, true_dist=self.y).mean()
self.pred_y = T.argmax(self.p_y_x, axis=1)
def save_model(self):
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
params_list = [self.Words.get_value()]
for param in self.params:
params_list.append(param.get_value())
model_file = self.model_dir+self.model_name+'_'+self.saved_model_name
f = open(model_file, 'wb')
pickle.dump(params_list, f, protocol=pickle.HIGHEST_PROTOCOL)
return model_file
def load_model(self, model_file):
params_list = pickle.load(model_file)
self.Words.set_value(params_list[0])
for param, param_value in zip(self.params, params_list[1:]):
param.set_value(param_value)
def make_function(self):
"""
compile theano function
:return:
"""
print("Use adam...")
self.updates = adam(cost=self.loss, params=self.params, lr=self.lr)
model_inputs = [self.x, self.xt, self.y, self.pw, self.graphsage_embedding, self.is_train]
model_outputs = [self.pred_y, self.y, self.loss, self.feature_maps_argmax]
self.train = theano.function(
inputs=model_inputs,
outputs=model_outputs,
updates=self.updates,
#mode='DebugMode'
)
self.test = theano.function(
inputs=model_inputs,
outputs=model_outputs
)
| 38.644211
| 203
| 0.603563
|
39565d0a96f1c69760860e88f46e2883c3b20a9e
| 143
|
py
|
Python
|
platform_control/platforms/apps.py
|
dmontoya1/platform_control
|
4be797674dd14dc5fe0a7e8ab9e0ab1ce5c139e2
|
[
"MIT"
] | null | null | null |
platform_control/platforms/apps.py
|
dmontoya1/platform_control
|
4be797674dd14dc5fe0a7e8ab9e0ab1ce5c139e2
|
[
"MIT"
] | 2
|
2022-03-01T03:13:40.000Z
|
2022-03-02T03:09:57.000Z
|
platform_control/platforms/apps.py
|
dmontoya1/platform_control
|
4be797674dd14dc5fe0a7e8ab9e0ab1ce5c139e2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PlatformsConfig(AppConfig):
name = 'platform_control.platforms'
verbose_name = 'Plataformas'
| 20.428571
| 39
| 0.769231
|
6b8c3dd8e7e725e1082fcb46658641b64e308468
| 546
|
py
|
Python
|
final_project/main/decorators.py
|
W7SP/project_defense
|
550152bd82998333444ace099c47feffffb6c3ab
|
[
"MIT"
] | null | null | null |
final_project/main/decorators.py
|
W7SP/project_defense
|
550152bd82998333444ace099c47feffffb6c3ab
|
[
"MIT"
] | null | null | null |
final_project/main/decorators.py
|
W7SP/project_defense
|
550152bd82998333444ace099c47feffffb6c3ab
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
# def allowed_groups(allowed_roles):
# def decorator(view_func):
# def wrapper(request, *args, **kwargs):
# group = None
# if request.groups.user.exists():
# group = request.user.groups.all()[0].name
# if group in allowed_roles:
# return view_func(request, *args, **kwargs)
# else:
# return HttpResponse('You have to be a Trainer to enter this page')
# return wrapper
# return decorator
| 34.125
| 84
| 0.571429
|
8dec106f9d0da892472157dfdb2230138a52dbd5
| 723
|
py
|
Python
|
tests/snmp/conftest.py
|
slutati1536/sonic-mgmt
|
741c735b8bf2385e389b252a9c7a0816641cdf90
|
[
"Apache-2.0"
] | null | null | null |
tests/snmp/conftest.py
|
slutati1536/sonic-mgmt
|
741c735b8bf2385e389b252a9c7a0816641cdf90
|
[
"Apache-2.0"
] | null | null | null |
tests/snmp/conftest.py
|
slutati1536/sonic-mgmt
|
741c735b8bf2385e389b252a9c7a0816641cdf90
|
[
"Apache-2.0"
] | 1
|
2021-06-13T07:38:59.000Z
|
2021-06-13T07:38:59.000Z
|
import pytest
from tests.common.utilities import wait_until
@pytest.fixture(scope="module", autouse=True)
def setup_check_snmp_ready(duthosts):
for duthost in duthosts:
assert wait_until(300, 20, duthost.is_service_fully_started, "snmp"), "SNMP service is not running"
@pytest.fixture(scope="function", autouse=True)
def snmp_wait_for_counters(wait_for_counters):
return
def pytest_addoption(parser):
"""
Adds options to pytest that are used by the snmp tests.
"""
parser.addoption(
"--percentage",
action="store",
default=False,
help="Set percentage difference for snmp test",
type=int)
| 31.434783
| 107
| 0.644537
|
1494dc3c21d9e489b9c19148e263039d5435041a
| 116
|
py
|
Python
|
PythonBlog/configs/constant/userConst.py
|
Nohysiwe/FastAPIBlogBackend
|
2052c630a1a6e9bb9e6555f734c60020b107afc8
|
[
"MIT"
] | 1
|
2022-02-09T06:58:44.000Z
|
2022-02-09T06:58:44.000Z
|
PythonBlog/configs/constant/userConst.py
|
Nohysiwe/FastAPIBlogBackend
|
2052c630a1a6e9bb9e6555f734c60020b107afc8
|
[
"MIT"
] | null | null | null |
PythonBlog/configs/constant/userConst.py
|
Nohysiwe/FastAPIBlogBackend
|
2052c630a1a6e9bb9e6555f734c60020b107afc8
|
[
"MIT"
] | null | null | null |
"""
用户常量
"""
class UserConst:
BLOGGER_ID: int = 1 # 博主id
SILENCE: int = 1 # 禁言状态
| 7.733333
| 34
| 0.439655
|
0541c92f1a1bfff5f2948431b3a7e55cb90861de
| 12,049
|
py
|
Python
|
ckan/tests/logic/auth/test_create.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | 1
|
2020-01-16T10:46:18.000Z
|
2020-01-16T10:46:18.000Z
|
ckan/tests/logic/auth/test_create.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | null | null | null |
ckan/tests/logic/auth/test_create.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""Unit tests for ckan/logic/auth/create.py.
"""
import mock
import pytest
import ckan.logic.auth.create as auth_create
import ckan.model as core_model
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
logic = helpers.logic
def test_anon_cant_create():
response = auth_create.package_create({"user": None}, None)
assert not response["success"]
@pytest.mark.ckan_config("ckan.auth.anon_create_dataset", True)
def test_anon_can_create():
response = auth_create.package_create({"user": None}, None)
assert response["success"]
@pytest.mark.ckan_config("ckan.auth.anon_create_dataset", True)
@pytest.mark.ckan_config(
"ckan.auth.create_dataset_if_not_in_organization", False
)
def test_cdnio_overrides_acd():
response = auth_create.package_create({"user": None}, None)
assert not response["success"]
@pytest.mark.ckan_config("ckan.auth.anon_create_dataset", True)
@pytest.mark.ckan_config("ckan.auth.create_unowned_dataset", False)
def test_cud_overrides_acd():
response = auth_create.package_create({"user": None}, None)
assert not response["success"]
@pytest.mark.usefixtures("clean_db")
class TestRealUsersAuth(object):
def test_no_org_user_can_create(self):
user = factories.User()
response = auth_create.package_create({"user": user["name"]}, None)
assert response["success"]
@pytest.mark.ckan_config("ckan.auth.anon_create_dataset", True)
@pytest.mark.ckan_config(
"ckan.auth.create_dataset_if_not_in_organization", False
)
def test_no_org_user_cant_create_if_cdnio_false(self):
user = factories.User()
response = auth_create.package_create({"user": user["name"]}, None)
assert not response["success"]
@pytest.mark.ckan_config("ckan.auth.anon_create_dataset", True)
@pytest.mark.ckan_config("ckan.auth.create_unowned_dataset", False)
def test_no_org_user_cant_create_if_cud_false(self):
user = factories.User()
response = auth_create.package_create({"user": user["name"]}, None)
assert not response["success"]
def test_same_org_user_can_create(self):
user = factories.User()
org_users = [{"name": user["name"], "capacity": "editor"}]
org = factories.Organization(users=org_users)
dataset = {"name": "same-org-user-can-create", "owner_org": org["id"]}
context = {"user": user["name"], "model": core_model}
response = auth_create.package_create(context, dataset)
assert response["success"]
def test_different_org_user_cant_create(s):
user = factories.User()
org_users = [{"name": user["name"], "capacity": "editor"}]
org1 = factories.Organization(users=org_users)
org2 = factories.Organization()
dataset = {
"name": "different-org-user-cant-create",
"owner_org": org2["id"],
}
context = {"user": user["name"], "model": core_model}
response = auth_create.package_create(context, dataset)
assert not response["success"]
@mock.patch("ckan.logic.auth.create.group_member_create")
def test_user_invite_delegates_correctly_to_group_member_create(self, gmc):
user = factories.User()
context = {"user": user["name"], "model": None, "auth_user_obj": user}
data_dict = {"group_id": 42}
gmc.return_value = {"success": False}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("user_invite", context=context, **data_dict)
gmc.return_value = {"success": True}
result = helpers.call_auth("user_invite", context=context, **data_dict)
assert result
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_authorized_if_user_has_permissions_on_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
resource = factories.Resource(user=user, package_id=dataset["id"])
resource_view = {
"resource_id": resource["id"],
"title": u"Resource View",
"view_type": u"image_view",
"image_url": "url",
}
context = {"user": user["name"], "model": core_model}
response = helpers.call_auth(
"resource_view_create", context=context, **resource_view
)
assert response
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_not_authorized_if_user_has_no_permissions_on_dataset(self):
org = factories.Organization()
user = factories.User()
member = {"username": user["name"], "role": "admin", "id": org["id"]}
helpers.call_action("organization_member_create", **member)
user_2 = factories.User()
dataset = factories.Dataset(owner_org=org["id"])
resource = factories.Resource(package_id=dataset["id"])
resource_view = {
"resource_id": resource["id"],
"title": u"Resource View",
"view_type": u"image_view",
"image_url": "url",
}
context = {"user": user_2["name"], "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_view_create", context=context, **resource_view
)
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("with_plugins")
def test_not_authorized_if_not_logged_in_3(self):
resource_view = {
"title": u"Resource View",
"view_type": u"image_view",
"image_url": "url",
}
context = {"user": None, "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_view_create", context=context, **resource_view
)
def test_authorized_if_user_has_permissions_on_dataset_3(self):
user = factories.User()
dataset = factories.Dataset(user=user)
resource = factories.Resource(user=user, package_id=dataset["id"])
context = {"user": user["name"], "model": core_model}
response = helpers.call_auth(
"resource_create_default_resource_views",
context=context,
resource=resource,
)
assert response
def test_not_authorized_if_user_has_no_permissions_on_dataset_2(self):
org = factories.Organization()
user = factories.User()
member = {"username": user["name"], "role": "admin", "id": org["id"]}
helpers.call_action("organization_member_create", **member)
user_2 = factories.User()
dataset = factories.Dataset(owner_org=org["id"])
resource = factories.Resource(package_id=dataset["id"])
context = {"user": user_2["name"], "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_create_default_resource_views",
context=context,
resource=resource,
)
def test_not_authorized_if_not_logged_in_2(self):
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset["id"])
context = {"user": None, "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"resource_create_default_resource_views",
context=context,
resource=resource,
)
def test_authorized_if_user_has_permissions_on_dataset_2(self):
user = factories.User()
dataset = factories.Dataset(user=user)
context = {"user": user["name"], "model": core_model}
response = helpers.call_auth(
"package_create_default_resource_views",
context=context,
package=dataset,
)
assert response
def test_not_authorized_if_user_has_no_permissions_on_dataset_3(self):
org = factories.Organization()
user = factories.User()
member = {"username": user["name"], "role": "admin", "id": org["id"]}
helpers.call_action("organization_member_create", **member)
user_2 = factories.User()
dataset = factories.Dataset(owner_org=org["id"])
context = {"user": user_2["name"], "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"package_create_default_resource_views",
context=context,
package=dataset,
)
def test_not_authorized_if_not_logged_in(self):
dataset = factories.Dataset()
context = {"user": None, "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth(
"package_create_default_resource_views",
context=context,
package=dataset,
)
def test_authorized_if_user_has_permissions_on_dataset_4(self):
user = factories.User()
dataset = factories.Dataset(user=user)
resource = {
"package_id": dataset["id"],
"title": "Resource",
"url": "http://test",
"format": "csv",
}
context = {"user": user["name"], "model": core_model}
response = helpers.call_auth(
"resource_create", context=context, **resource
)
assert response
def test_not_authorized_if_user_has_no_permissions_on_dataset_4(self):
org = factories.Organization()
user = factories.User()
member = {"username": user["name"], "role": "admin", "id": org["id"]}
helpers.call_action("organization_member_create", **member)
user_2 = factories.User()
dataset = factories.Dataset(user=user, owner_org=org["id"])
resource = {
"package_id": dataset["id"],
"title": "Resource",
"url": "http://test",
"format": "csv",
}
context = {"user": user_2["name"], "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("resource_create", context=context, **resource)
def test_not_authorized_if_not_logged_in_4(self):
resource = {"title": "Resource", "url": "http://test", "format": "csv"}
context = {"user": None, "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("resource_create", context=context, **resource)
def test_sysadmin_is_authorized(self):
sysadmin = factories.Sysadmin()
resource = {"title": "Resource", "url": "http://test", "format": "csv"}
context = {"user": sysadmin["name"], "model": core_model}
response = helpers.call_auth(
"resource_create", context=context, **resource
)
assert response
def test_raises_not_found_if_no_package_id_provided(self):
user = factories.User()
resource = {"title": "Resource", "url": "http://test", "format": "csv"}
context = {"user": user["name"], "model": core_model}
with pytest.raises(logic.NotFound):
helpers.call_auth("resource_create", context=context, **resource)
def test_raises_not_found_if_dataset_was_not_found(self):
user = factories.User()
resource = {
"package_id": "does_not_exist",
"title": "Resource",
"url": "http://test",
"format": "csv",
}
context = {"user": user["name"], "model": core_model}
with pytest.raises(logic.NotFound):
helpers.call_auth("resource_create", context=context, **resource)
def test_normal_user_cant_use_it(self):
normal_user = factories.User()
context = {"user": normal_user["name"], "model": core_model}
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("activity_create", context=context)
| 33.010959
| 79
| 0.625031
|
edf9325ac1dfa8bfde3dcf2b2c48e46d7e7efedc
| 26,863
|
py
|
Python
|
nipyapi/registry/apis/buckets_api.py
|
esecules/nipyapi
|
e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/buckets_api.py
|
esecules/nipyapi
|
e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/registry/apis/buckets_api.py
|
esecules/nipyapi
|
e8a53b79a5e1a6b29446f43d2b23b6a3e60873f1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Apache NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.5.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BucketsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_bucket(self, body, **kwargs):
"""
Create bucket
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_bucket(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Bucket body: The bucket to create (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_bucket_with_http_info(body, **kwargs)
else:
(data) = self.create_bucket_with_http_info(body, **kwargs)
return data
def create_bucket_with_http_info(self, body, **kwargs):
"""
Create bucket
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_bucket_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Bucket body: The bucket to create (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_bucket" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_bucket`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_bucket(self, bucket_id, **kwargs):
"""
Delete bucket
Deletes the bucket with the given id, along with all objects stored in the bucket
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_bucket(bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_bucket_with_http_info(bucket_id, **kwargs)
else:
(data) = self.delete_bucket_with_http_info(bucket_id, **kwargs)
return data
def delete_bucket_with_http_info(self, bucket_id, **kwargs):
"""
Delete bucket
Deletes the bucket with the given id, along with all objects stored in the bucket
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_bucket_with_http_info(bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bucket" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `delete_bucket`")
collection_formats = {}
path_params = {}
if 'bucket_id' in params:
path_params['bucketId'] = params['bucket_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets/{bucketId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_available_bucket_fields(self, **kwargs):
"""
Get bucket fields
Retrieves bucket field names for searching or sorting on buckets.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_available_bucket_fields(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Fields
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_available_bucket_fields_with_http_info(**kwargs)
else:
(data) = self.get_available_bucket_fields_with_http_info(**kwargs)
return data
def get_available_bucket_fields_with_http_info(self, **kwargs):
"""
Get bucket fields
Retrieves bucket field names for searching or sorting on buckets.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_available_bucket_fields_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Fields
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_available_bucket_fields" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets/fields', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Fields',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bucket(self, bucket_id, **kwargs):
"""
Get bucket
Gets the bucket with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bucket(bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_bucket_with_http_info(bucket_id, **kwargs)
else:
(data) = self.get_bucket_with_http_info(bucket_id, **kwargs)
return data
def get_bucket_with_http_info(self, bucket_id, **kwargs):
"""
Get bucket
Gets the bucket with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_bucket_with_http_info(bucket_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bucket" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `get_bucket`")
collection_formats = {}
path_params = {}
if 'bucket_id' in params:
path_params['bucketId'] = params['bucket_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets/{bucketId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_buckets(self, **kwargs):
"""
Get all buckets
The returned list will include only buckets for which the user is authorized.If the user is not authorized for any buckets, this returns an empty list.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_buckets(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Bucket]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_buckets_with_http_info(**kwargs)
else:
(data) = self.get_buckets_with_http_info(**kwargs)
return data
def get_buckets_with_http_info(self, **kwargs):
"""
Get all buckets
The returned list will include only buckets for which the user is authorized.If the user is not authorized for any buckets, this returns an empty list.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_buckets_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Bucket]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_buckets" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Bucket]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_bucket(self, bucket_id, body, **kwargs):
"""
Update bucket
Updates the bucket with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_bucket(bucket_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:param Bucket body: The updated bucket (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_bucket_with_http_info(bucket_id, body, **kwargs)
else:
(data) = self.update_bucket_with_http_info(bucket_id, body, **kwargs)
return data
def update_bucket_with_http_info(self, bucket_id, body, **kwargs):
"""
Update bucket
Updates the bucket with the given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_bucket_with_http_info(bucket_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str bucket_id: The bucket identifier (required)
:param Bucket body: The updated bucket (required)
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_bucket" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_id' is set
if ('bucket_id' not in params) or (params['bucket_id'] is None):
raise ValueError("Missing the required parameter `bucket_id` when calling `update_bucket`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_bucket`")
collection_formats = {}
path_params = {}
if 'bucket_id' in params:
path_params['bucketId'] = params['bucket_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/buckets/{bucketId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.153961
| 159
| 0.560101
|
1a626b1d80a6b8ac2b8cf5cb607b7740c35e1b27
| 395
|
py
|
Python
|
yossarian/wsgi.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
yossarian/wsgi.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
yossarian/wsgi.py
|
avinassh/yossarian
|
b485da0669d87ad29f57ba2a4a446131aaf820a6
|
[
"MIT"
] | null | null | null |
"""
WSGI config for yossarian project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yossarian.settings")
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
3e94d55aea2cad48dcf85e0a037a5db51e09e88a
| 30
|
py
|
Python
|
pcdet/version.py
|
Number1JT/PVRCNN-Waymo
|
60faa8ff6fe4532845719c144f0f9a252e85ddd2
|
[
"Apache-2.0"
] | null | null | null |
pcdet/version.py
|
Number1JT/PVRCNN-Waymo
|
60faa8ff6fe4532845719c144f0f9a252e85ddd2
|
[
"Apache-2.0"
] | null | null | null |
pcdet/version.py
|
Number1JT/PVRCNN-Waymo
|
60faa8ff6fe4532845719c144f0f9a252e85ddd2
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.2.0+fec61a4"
| 15
| 29
| 0.7
|
c286721a4d4fa0e489a7b57b8d834973606d72f7
| 765
|
py
|
Python
|
cms/migrations/0002_templateactivity.py
|
siruku6/life_recorder
|
1cda45144a2cef832e8b1ffb44894810cc096164
|
[
"MIT"
] | 1
|
2021-05-08T10:04:20.000Z
|
2021-05-08T10:04:20.000Z
|
cms/migrations/0002_templateactivity.py
|
siruku6/life_recorder
|
1cda45144a2cef832e8b1ffb44894810cc096164
|
[
"MIT"
] | 34
|
2021-02-23T09:00:24.000Z
|
2021-11-28T02:02:15.000Z
|
cms/migrations/0002_templateactivity.py
|
siruku6/life_recorder
|
1cda45144a2cef832e8b1ffb44894810cc096164
|
[
"MIT"
] | 2
|
2021-05-03T10:16:48.000Z
|
2021-05-08T10:04:23.000Z
|
# Generated by Django 3.2.2 on 2021-05-09 06:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TemplateActivity',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='内容')),
('activity_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='template_activities', to='cms.activitytype', verbose_name='種別')),
],
),
]
| 33.26087
| 187
| 0.633987
|
9998ddd9d03f7458e2af8c8e35093c180f1d24ee
| 268
|
py
|
Python
|
app/models/User.py
|
thekiharani/FlaskPlayground
|
9287431f431cc00a150b0759448b9e4ad64ad139
|
[
"MIT"
] | null | null | null |
app/models/User.py
|
thekiharani/FlaskPlayground
|
9287431f431cc00a150b0759448b9e4ad64ad139
|
[
"MIT"
] | null | null | null |
app/models/User.py
|
thekiharani/FlaskPlayground
|
9287431f431cc00a150b0759448b9e4ad64ad139
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from app.app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
location = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=datetime.now)
| 26.8
| 63
| 0.716418
|
1a6dea0381ab8d43d2fdfaf641afbf77a20da64c
| 3,067
|
py
|
Python
|
byceps/blueprints/site/user/avatar/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 23
|
2015-08-03T23:28:54.000Z
|
2018-12-12T20:11:45.000Z
|
byceps/blueprints/site/user/avatar/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 1
|
2018-09-30T18:18:24.000Z
|
2018-09-30T18:18:24.000Z
|
byceps/blueprints/site/user/avatar/views.py
|
homeworkprod/byceps
|
cd0f9f37f7b5eb517106ec761acc7e0bdf75e22e
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T16:41:36.000Z
|
2018-09-25T11:17:31.000Z
|
"""
byceps.blueprints.site.user.avatar.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, g, request
from flask_babel import gettext
from .....services.image import service as image_service
from .....services.user_avatar import service as avatar_service
from .....signals import user_avatar as user_avatar_signals
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_notice, flash_success
from .....util.image.models import ImageType
from .....util.framework.templating import templated
from .....util.views import redirect_to, respond_no_content
from .forms import UpdateForm
blueprint = create_blueprint('user_avatar', __name__)
ALLOWED_IMAGE_TYPES = frozenset(
[
ImageType.jpeg,
ImageType.png,
ImageType.webp,
]
)
@blueprint.get('/me/avatar/update')
@templated
def update_form(erroneous_form=None):
"""Show a form to update the current user's avatar image."""
_get_current_user_or_404()
form = erroneous_form if erroneous_form else UpdateForm()
image_type_names = image_service.get_image_type_names(ALLOWED_IMAGE_TYPES)
return {
'form': form,
'allowed_types': image_type_names,
'maximum_dimensions': avatar_service.MAXIMUM_DIMENSIONS,
}
@blueprint.post('/me/avatar')
def update():
"""Update the current user's avatar image."""
user = _get_current_user_or_404()
# Make `InputRequired` work on `FileField`.
form_fields = request.form.copy()
if request.files:
form_fields.update(request.files)
form = UpdateForm(form_fields)
if not form.validate():
return update_form(form)
image = request.files.get('image')
_update(user.id, image)
flash_success(gettext('Avatar image has been updated.'), icon='upload')
user_avatar_signals.avatar_updated.send(None, user_id=user.id)
return redirect_to('user_settings.view')
def _update(user_id, image):
if not image or not image.filename:
abort(400, 'No file to upload has been specified.')
try:
avatar_service.update_avatar_image(
user_id, image.stream, ALLOWED_IMAGE_TYPES
)
except avatar_service.ImageTypeProhibited as e:
abort(400, str(e))
except FileExistsError:
abort(409, 'File already exists, not overwriting.')
@blueprint.delete('/me/avatar')
@respond_no_content
def delete():
"""Remove the current user's avatar image."""
user = _get_current_user_or_404()
try:
avatar_service.remove_avatar_image(user.id)
except ValueError:
# No avatar selected.
# But that's ok, deletions should be idempotent.
flash_notice(gettext('No avatar image is set that could be removed.'))
else:
flash_success(gettext('Avatar image has been removed.'))
def _get_current_user_or_404():
user = g.user
if not user.authenticated:
abort(404)
return user
| 26.669565
| 78
| 0.695468
|
2d00f4ac5da22f8c6bb46f76e5e845de9da3e5d0
| 29
|
py
|
Python
|
6-del.py
|
YpchenLove/py-example
|
88bef3ebe3e4045b09c9d2efcd6164c9cfb4c87f
|
[
"MIT"
] | null | null | null |
6-del.py
|
YpchenLove/py-example
|
88bef3ebe3e4045b09c9d2efcd6164c9cfb4c87f
|
[
"MIT"
] | null | null | null |
6-del.py
|
YpchenLove/py-example
|
88bef3ebe3e4045b09c9d2efcd6164c9cfb4c87f
|
[
"MIT"
] | null | null | null |
obj = 5
print(obj)
del obj
| 4.833333
| 10
| 0.62069
|
56ae84b9a24134a4d8397330f173a384b63e0802
| 3,180
|
py
|
Python
|
qa/rpc-tests/invalidateblock.py
|
L00119483/TechSquad.io
|
3ebafca95c5b125f3dbe52d9d4cde29c61a48975
|
[
"MIT"
] | 4
|
2018-06-16T20:08:19.000Z
|
2018-08-22T15:44:58.000Z
|
qa/rpc-tests/invalidateblock.py
|
L00119483/TechSquad.io
|
3ebafca95c5b125f3dbe52d9d4cde29c61a48975
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/invalidateblock.py
|
L00119483/TechSquad.io
|
3ebafca95c5b125f3dbe52d9d4cde29c61a48975
|
[
"MIT"
] | 7
|
2018-06-06T18:51:07.000Z
|
2018-09-08T15:17:04.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class InvalidateTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].setgenerate(True, 4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].setgenerate(True, 6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].setgenerate(True, 1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 41.298701
| 95
| 0.650314
|
b1b38a000e56a0d7333742ec7d111f17b908efe9
| 2,482
|
py
|
Python
|
kerlas/brain.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
kerlas/brain.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
kerlas/brain.py
|
imandr/KeRLas
|
8c347cbfea982f470372fb7cf8943f4d6bda8a8a
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
np.set_printoptions(precision=4, suppress=True)
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input, Lambda
from keras.optimizers import Adam
class TrainingContext(object):
def __init__(self, brain, in_traning):
self.Brain = brain
self.Flag = in_traning
self.SavedTraining = None
def __enter__(self):
self.SavedTraining = self.Brain.Training
self.Brain.setTraining(self.Flag)
def __exit__(self, *params):
self.Brain.setTraining(self.SavedTraining)
class Brain(object):
def __init__(self, rlmodel, run_policy, training_policies = None):
self.RLModel = rlmodel
self.RunPolicy = run_policy
self.Training = False
self.TrainingPolicies = training_policies or [run_policy]
self.TrainingPolicyIndex = 0
self.Policy = self.RunPolicy
def training(self, in_traning):
return TrainingContext(self, in_traning)
def setTraining(self, training):
self.Training = training
def episodeBegin(self):
if self.Training:
self.Policy = self.TrainingPolicies[self.TrainingPolicyIndex]
#print "Brain.episodeBegin: index, tau=", self.TrainingPolicyIndex, self.Policy.tau
else:
self.Policy = self.RunPolicy
def episodeEnd(self):
pass
def nextTrainingPolicy(self):
self.TrainingPolicyIndex = (self.TrainingPolicyIndex + 1) % len(self.TrainingPolicies)
#print "Brain.nextTrainingPolicy:", self.TrainingPolicyIndex
def q(self, observation):
return self.RLModel.predict_on_batch([np.array([observation])])[0]
def action(self, observation):
q = self.q(observation)
#print "Brain: q=", q
a = self.Policy(q)
return a, q
def training_model(self):
return self.RLModel.training_model()
def train_on_sample(self, sample):
return self.RLModel.train_on_sample(sample)
def training_data(self, sample):
return self.RLModel.training_data(sample)
def formatTrajectory(self, trajectory):
""" Format trajectory into a list of tuples before they are stored in memory.
Trajectory is list of (s,a,r,s,d) tuples
"""
return self.RLModel.formatTrajectory(trajectory)
| 31.417722
| 95
| 0.633763
|
d99483f759b8272439d79d0fe8391f44024ba461
| 27,361
|
py
|
Python
|
insights/parsers/lvm.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/lvm.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | 10
|
2018-04-16T15:38:04.000Z
|
2018-05-15T18:43:02.000Z
|
insights/parsers/lvm.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | null | null | null |
"""
Logical Volume Management configuration and status
==================================================
Parsers for lvm data based on output of various commands and file contents.
This module contains the classes that parse the output of the commands `lvs`,
`pvs`, and `vgs`, and the contents of the file `/etc/lvm/lvm.conf`.
Pvs - command ``/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all``
------------------------------------------------------------------------------------
PvsHeadings - command ``pvs -a -v -o +pv_mda_free,pv_mda_size,pv_mda_count,pv_mda_used_count,pe_count --config="global{locking_type=0}"``
-----------------------------------------------------------------------------------------------------------------------------------------
Vgs - command ``/sbin/vgs --nameprefixes --noheadings --separator='|' -a -o vg_all``
------------------------------------------------------------------------------------
VgsHeadings - command ``vgs -v -o +vg_mda_count,vg_mda_free,vg_mda_size,vg_mda_used_count,vg_tags --config="global{locking_type=0}"``
-------------------------------------------------------------------------------------------------------------------------------------
Lvs - command ``/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_all``
------------------------------------------------------------------------------------
LvsHeadings - command ``/sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"``
--------------------------------------------------------------------------------------------
LvmConf - file ``/etc/lvm/lvm.conf``
------------------------------------
"""
from __future__ import print_function
import json
from ..util import parse_keypair_lines
from .. import add_filter
from .. import Parser, parser, get_active_lines, LegacyItemAccess, CommandParser
from . import parse_fixed_table
from insights.parsers import ParseException
from insights.specs import Specs
def map_keys(pvs, keys):
"""
Add human readable key names to dictionary while leaving any existing key names.
"""
rs = []
for pv in pvs:
r = dict((v, None) for k, v in keys.items())
for k, v in pv.items():
if k in keys:
r[keys[k]] = v
r[k] = v
rs.append(r)
return rs
def find_warnings(content):
"""Look for lines containing warning/error/info strings instead of data."""
keywords = [k.lower() for k in [
"WARNING", "Couldn't find device", "Configuration setting",
"read failed", "Was device resized?", "Invalid argument",
"leaked on lvs", "Checksum error", "is exported", "failed.",
"Invalid metadata", "response failed", "unknown device",
"duplicate", "not found", "Missing device", "Internal error",
"Input/output error", "Incorrect metadata", "Cannot process volume",
"No such file or directory", "Logging initialised", "changed sizes",
"vsnprintf failed", "write failed", "correction failed",
"Failed to write", "Couldn't read", "marked missing",
"Attempt to close device", "Ignoring supplied major",
"not match metadata"
]]
for l in content:
lower = l.strip().lower()
# Avoid hitting keywords inside the data
if not lower.startswith('lvm2'):
if any(k in lower for k in keywords):
yield l
class LvmHeadings(CommandParser):
"""Base class for parsing LVM data in table format."""
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
class Lvm(CommandParser):
"""Base class for parsing LVM data in key=value format."""
def parse_content(self, content):
if "Unrecognised field:" in content[-1]:
raise ParseException(content[-1])
d = {"warnings": set(find_warnings(content))}
content = [l for l in content if l not in d["warnings"]]
d["content"] = list(map_keys(parse_keypair_lines(content), self.KEYS))
self.data = d if d else None
def __iter__(self):
return iter(self.data["content"])
def __len__(self):
return len(self.data["content"])
def __getitem__(self, key):
if isinstance(key, int):
return self.data['content'][key]
for i in self.data['content']:
if i[self.PRIMARY_KEY] == key:
return i
return None
@property
def locking_disabled(self):
"""bool: Returns True if any lines in input data indicate locking is disabled."""
return any(l for l in self.data["warnings"] if "Locking disabled" in l)
@property
def warnings(self):
"""list: Returns a list of lines from input data containing
warning/error/info strings.
"""
return self.data["warnings"]
@parser(Specs.pvs_noheadings)
class Pvs(Lvm):
"""
Parse the output of the `/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all` command.
Parse each line in the output of pvs based on the of pvs datasource in
`insights/specs/` Output sample of pvs::
LVM2_PV_FMT=''|LVM2_PV_UUID=''|LVM2_DEV_SIZE='500.00m'|...
LVM2_PV_FMT='lvm2'|LVM2_PV_UUID='JvSULk-ileq-JbuS-GGgg-jkif-thuW-zvFBEl'|LVM2_DEV_SIZE='476.45g'|...
Returns a list like::
[
{
'LVM2_PV_FMT' : '',
'LVM2_PV_UUID' : '',
'LVM2_DEV_SIZE' : '500.00m',
...
},
{
'LVM2_PV_FMT' : 'lvm2',
'LVM2_PV_UUID' : 'JvSULk-ileq-JbuS-GGgg-jkif-thuW-zvFBEl',
'LVM2_DEV_SIZE' : '476.45g',
...
}
]
Since it is possible to have two PV's with the same name (for example *unknown device*) a
unique key for each PV is created by joining the `PV_NAME and PV_UUID fields with a `+`
character. This key is added to the dictionary as the `PV_KEY` field.
"""
KEYS = {
"LVM2_PV_MDA_USED_COUNT": "#PMdaUse",
"LVM2_PV_UUID": "PV_UUID",
"LVM2_DEV_SIZE": "DevSize",
"LVM2_PV_FMT": "Fmt",
"LVM2_PV_MDA_FREE": "PMdaFree",
"LVM2_PV_EXPORTED": "Exported",
"LVM2_PV_SIZE": "PSize",
"LVM2_PV_BA_START": "BA_start",
"LVM2_PV_PE_ALLOC_COUNT": "Alloc",
"LVM2_VG_NAME": "VG",
"LVM2_PV_TAGS": "PV_Tags",
"LVM2_PV_PE_COUNT": "PE",
"LVM2_PV_BA_SIZE": "BA_size",
"LVM2_PV_ATTR": "Attr",
"LVM2_PE_START": "1st_PE",
"LVM2_PV_USED": "Used",
"LVM2_PV_NAME": "PV",
"LVM2_PV_MDA_COUNT": "#PMda",
"LVM2_PV_FREE": "PFree",
"LVM2_PV_ALLOCATABLE": "Allocatable",
"LVM2_PV_MDA_SIZE": "PMdaSize",
"LVM2_PV_MISSING": "Missing"
}
PRIMARY_KEY = "PV"
def parse_content(self, content):
super(Pvs, self).parse_content(content)
for pv in self.data['content']:
pv_name = pv.get('PV') if pv.get('PV') is not None else 'no_name'
pv_uuid = pv.get('PV_UUID') if pv.get('PV_UUID') is not None else 'no_uuid'
pv.update({'PV_KEY': '+'.join([pv_name, pv_uuid])})
def vg(self, name):
"""Return all physical volumes assigned to the given volume group"""
return [i for i in self.data["content"] if i["VG"] == name]
@parser(Specs.pvs_noheadings_all)
class PvsAll(Pvs):
"""
Parse the output of the `/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config='global{locking_type=0} devices{filter=["a|.*|"]}'` command.
Uses the ``Pvs`` class defined in this module.
"""
pass
@parser(Specs.pvs)
class PvsHeadings(LvmHeadings):
"""
Parses the output of the
`pvs -a -v -o +pv_mda_free,pv_mda_size,pv_mda_count,pv_mda_used_count,pe_count --config="global{locking_type=0}"`
command.
Since it is possible to have two PV's with the same name (for example *unknown device*) a
unique key for each PV is created by joining the `PV_NAME and PV_UUID fields with a `+`
character. This key is added to the resulting dictionary as the `PV_KEY` field.
Sample input::
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
Scanning all devices to update lvmetad.
No PV label found on /dev/loop0.
No PV label found on /dev/loop1.
No PV label found on /dev/sda1.
No PV label found on /dev/fedora/root.
No PV label found on /dev/sda2.
No PV label found on /dev/fedora/swap.
No PV label found on /dev/fedora/home.
No PV label found on /dev/mapper/docker-253:1-2361272-pool.
Wiping internal VG cache
Wiping cache of LVM-capable devices
PV VG Fmt Attr PSize PFree DevSize PV UUID PMdaFree PMdaSize #PMda #PMdaUse PE
/dev/fedora/home --- 0 0 418.75g 0 0 0 0 0
/dev/fedora/root --- 0 0 50.00g 0 0 0 0 0
/dev/fedora/swap --- 0 0 7.69g 0 0 0 0 0
/dev/loop0 --- 0 0 100.00g 0 0 0 0 0
/dev/loop1 --- 0 0 2.00g 0 0 0 0 0
/dev/mapper/docker-253:1-2361272-pool --- 0 0 100.00g 0 0 0 0 0
/dev/mapper/luks-7430952e-7101-4716-9b46-786ce4684f8d fedora lvm2 a-- 476.45g 4.00m 476.45g FPLCRf-d918-LVL7-6e3d-n3ED-aiZv-EesuzY 0 1020.00k 1 1 121970
/dev/sda1 --- 0 0 500.00m 0 0 0 0 0
/dev/sda2 --- 0 0 476.45g 0 0 0 0 0
Reloading config files
Wiping internal VG cache
Attributes:
data (list): List of dicts, each dict containing one row of the table
with column headings as keys.
Examples:
>>> pvs_data = shared[PvsHeadings]
>>> pvs_data[0]
{'PV': '/dev/fedora/home', 'VG': '', 'Fmt': '', 'Attr': '---', 'PSize': '0',
'PFree': '0', 'DevSize': '418.75g', 'PV_UUID': '', 'PMdaFree': '0',
'PMdaSize': '0', '#PMda': '0', '#PMdaUse': '0', 'PE': '0', 'PV_KEY': '/dev/fedora/home+no_uuid'}
>>> pvs_data[0]['PV']
'/dev/fedora/home'
"""
PRIMARY_KEY = Pvs.PRIMARY_KEY
def parse_content(self, content):
self.data = parse_fixed_table(content,
heading_ignore=['PV '],
header_substitute=[('PV UUID', 'PV_UUID'),
('1st PE', '1st_PE')],
trailing_ignore=['Reloading', 'Wiping'])
self.data = map_keys(self.data, Pvs.KEYS)
for pv in self.data:
pv_name = pv.get('PV') if pv.get('PV') is not None else 'no_name'
pv_uuid = pv.get('PV_UUID') if pv.get('PV_UUID') is not None else 'no_uuid'
pv.update({'PV_KEY': '+'.join([pv_name, pv_uuid])})
def vg(self, name):
"""Return all physical volumes assigned to the given volume group"""
return [i for i in self.data if i["VG"] == name]
@parser(Specs.vgs_noheadings)
class Vgs(Lvm):
"""
Parse the output of the `/sbin/vgs --nameprefixes --noheadings --separator='|' -a -o vg_all` command.
Parse each line in the output of vgs based on the vgs datasource in
`insights/specs/` Output sample of vgs::
LVM2_VG_FMT='lvm2'|LVM2_VG_UUID='YCpusB-LEly-THGL-YXhC-t3q6-mUQV-wyFZrx'|LVM2_VG_NAME='rhel'|LVM2_VG_ATTR='wz--n-'|...
LVM2_VG_FMT='lvm2'|LVM2_VG_UUID='123456-LEly-THGL-YXhC-t3q6-mUQV-123456'|LVM2_VG_NAME='fedora'|LVM2_VG_ATTR='wz--n-'|...
Returns a list like::
[
{
'LVM2_PV_FMT' : 'lvm2',
'LVM2_VG_UUID' : 'YCpusB-LEly-THGL-YXhC-t3q6-mUQV-wyFZrx',
'LVM2_VG_NAME' : 'rhel',
...
},
{
'LVM2_PV_FMT' : 'lvm2',
'LVM2_VG_UUID' : '123456-LEly-THGL-YXhC-t3q6-mUQV-123456',
'LVM2_VG_NAME' : 'fedora',
...
}
]
"""
KEYS = {
"LVM2_VG_EXTENDABLE": "Extendable",
"LVM2_VG_EXTENT_SIZE": "Ext",
"LVM2_VG_MDA_COUNT": "#VMda",
"LVM2_VG_PROFILE": "VProfile",
"LVM2_VG_ALLOCATION_POLICY": "AllocPol",
"LVM2_MAX_PV": "MaxPV",
"LVM2_VG_UUID": "VG_UUID",
"LVM2_VG_ATTR": "Attr",
"LVM2_VG_SYSID": "SYS_ID",
"LVM2_VG_MDA_USED_COUNT": "#VMdaUse",
"LVM2_VG_MDA_FREE": "VMdaFree",
"LVM2_VG_LOCKTYPE": "Lock_Type",
"LVM2_VG_TAGS": "VG_Tags",
"LVM2_VG_FMT": "Fmt",
"LVM2_PV_COUNT": "#PV",
"LVM2_VG_EXTENT_COUNT": "#Ext",
"LVM2_VG_MDA_SIZE": "VMdaSize",
"LVM2_SNAP_COUNT": "#SN",
"LVM2_VG_EXPORTED": "Exported",
"LVM2_LV_COUNT": "#LV",
"LVM2_VG_NAME": "VG",
"LVM2_VG_MDA_COPIES": "#VMdaCps",
"LVM2_VG_SYSTEMID": "System_ID",
"LVM2_VG_FREE": "VFree",
"LVM2_VG_SEQNO": "Seq",
"LVM2_VG_FREE_COUNT": "Free",
"LVM2_VG_PARTIAL": "Partial",
"LVM2_VG_PERMISSIONS": "VPerms",
"LVM2_VG_CLUSTERED": "Clustered",
"LVM2_VG_LOCKARGS": "Lock Args",
"LVM2_MAX_LV": "MaxLV",
"LVM2_VG_SIZE": "VSize"
}
PRIMARY_KEY = "VG"
@parser(Specs.vgs_noheadings_all)
class VgsAll(Vgs):
"""
Parse the output of the `/sbin/vgs --nameprefixes --noheadings --separator='|' -a -o vg_all --config='global{locking_type=0} devices{filter=[\"a|.*|\"]}'` command.
Uses the ``Vgs`` class defined in this module.
"""
pass
@parser(Specs.vgs)
class VgsHeadings(LvmHeadings):
"""
Parses output of the
`vgs -v -o +vg_mda_count,vg_mda_free,vg_mda_size,vg_mda_used_count,vg_tags --config="global{locking_type=0}"` command.
Sample input::
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
Using volume group(s) on command line.
VG Attr Ext #PV #LV #SN VSize VFree VG UUID VProfile #VMda VMdaFree VMdaSize #VMdaUse VG Tags
DATA_OTM_VG wz--n- 4.00m 6 1 0 2.05t 1020.00m xK6HXk-xl2O-cqW5-2izb-LI9M-4fV0-dAzfcc 6 507.00k 1020.00k 6
ITM_VG wz--n- 4.00m 1 1 0 16.00g 4.00m nws5dd-INe6-1db6-9U1N-F0G3-S1z2-5XTdO4 1 508.00k 1020.00k 1
ORABIN_OTM_VG wz--n- 4.00m 2 3 0 190.00g 0 hfJwg8-hset-YgUY-X6NJ-gkWE-EunZ-KuCXGP 2 507.50k 1020.00k 2
REDO_OTM_VG wz--n- 4.00m 1 3 0 50.00g 0 Q2YtGy-CWKU-sEYj-mqHk-rbdP-Hzup-wi8jsf 1 507.50k 1020.00k 1
SWAP_OTM_VG wz--n- 4.00m 1 1 0 24.00g 8.00g hAerzZ-U8QU-ICkc-xxCj-N2Ny-rWzq-pmTpWJ 1 508.00k 1020.00k 1
rootvg wz--n- 4.00m 1 6 0 19.51g 1.95g p4tLLb-ikeo-Ankk-2xJ6-iHYf-D4E6-KFCFvr 1 506.50k 1020.00k 1
Reloading config files
Wiping internal VG cache
Attributes:
data (list): List of dicts, each dict containing one row of the table
with column headings as keys.
Examples:
>>> vgs_info = shared[VgsHeadings]
>>> vgs_info.data[0]
{}
>>> vgs_info.data[2]['LSize']
'2.00g'
"""
PRIMARY_KEY = Vgs.PRIMARY_KEY
def parse_content(self, content):
self.data = parse_fixed_table(content,
heading_ignore=['VG '],
header_substitute=[('VG Tags', 'VG_Tags'),
('VG UUID', 'VG_UUID')],
trailing_ignore=['Reloading', 'Wiping'])
self.data = map_keys(self.data, Vgs.KEYS)
@parser(Specs.lvs_noheadings)
class Lvs(Lvm):
"""
Parse the output of the `/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_all` command.
Parse each line in the output of lvs based on the lvs datasource in
`insights/specs/`:
Output sample of lvs::
LVM2_LV_UUID='KX68JI-8ISN-YedH-ZYDf-yZbK-zkqE-3aVo6m'|LVM2_LV_NAME='docker-poolmeta'|LVM2_LV_FULL_NAME='rhel/docker-poolmeta'|...
LVM2_LV_UUID='123456-8ISN-YedH-ZYDf-yZbK-zkqE-123456'|LVM2_LV_NAME='rhel_root'|LVM2_LV_FULL_NAME='rhel/rhel_root'|LVM2_LV_PATH='/dev/rhel/docker-poolmeta'|...
Return a list, as shown below::
[
{
'LVM2_LV_UUID' : 'KX68JI-8ISN-YedH-ZYDf-yZbK-zkqE-3aVo6m',
'LVM2_LV_NAME' : 'docker-poolmeta',
'LVM2_LV_FULL_NAME' : 'rhel/docker-poolmeta',
...
},
{
'LVM2_LV_UUID' : '123456-8ISN-YedH-ZYDf-yZbK-zkqE-123456',
'LVM2_LV_NAME' : 'rhel_root',
'LVM2_LV_FULL_NAME' : 'rhel/rhel_root',
...
}
]
"""
KEYS = {
"LVM2_POOL_LV_UUID": "Pool_UUID",
"LVM2_LV_PARENT": "Parent",
"LVM2_LV_SKIP_ACTIVATION": "SkipAct",
"LVM2_LV_HEALTH_STATUS": "Health",
"LVM2_LV_KERNEL_MINOR": "KMin",
"LVM2_RAID_WRITE_BEHIND": "WBehind",
"LVM2_LV_ANCESTORS": "Ancestors",
"LVM2_LV_TIME": "Time",
"LVM2_METADATA_PERCENT": "Meta%",
"LVM2_LV_DM_PATH": "DMPath",
"LVM2_LV_INACTIVE_TABLE": "InactiveTable",
"LVM2_LV_UUID": "LV_UUID",
"LVM2_LV_MODULES": "Modules",
"LVM2_DEVICES": "Devices",
"LVM2_LV_ACTIVE_REMOTELY": "ActRemote",
"LVM2_LV_ACTIVE_LOCALLY": "ActLocal",
"LVM2_LV_TAGS": "LV_Tags",
"LVM2_LV_IMAGE_SYNCED": "ImgSynced",
"LVM2_CACHE_WRITE_MISSES": "CacheWriteMisses",
"LVM2_LV_PERMISSIONS": "LPerms",
"LVM2_CACHE_TOTAL_BLOCKS": "CacheTotalBlocks",
"LVM2_LV_ACTIVE_EXCLUSIVELY": "ActExcl",
"LVM2_LV_PATH": "Path",
"LVM2_LV_FULL_NAME": "LV",
"LVM2_LV_READ_AHEAD": "Rahead",
"LVM2_SNAP_PERCENT": "Snap%",
"LVM2_CACHE_WRITE_HITS": "CacheWriteHits",
"LVM2_MIRROR_LOG": "Log",
"LVM2_CACHE_DIRTY_BLOCKS": "CacheDirtyBlocks",
"LVM2_SEG_COUNT": "#Seg",
"LVM2_MOVE_PV": "Move",
"LVM2_LV_FIXED_MINOR": "FixMin",
"LVM2_SYNC_PERCENT": "Cpy%Sync",
"LVM2_LV_METADATA_SIZE": "MSize",
"LVM2_LV_ATTR": "Attr",
"LVM2_RAID_MAX_RECOVERY_RATE": "MaxSync",
"LVM2_LV_DEVICE_OPEN": "DevOpen",
"LVM2_LV_ALLOCATION_POLICY": "AllocPol",
"LVM2_LV_MERGING": "Merging",
"LVM2_LV_SIZE": "LSize",
"LVM2_LV_MAJOR": "Maj",
"LVM2_ORIGIN_SIZE": "OSize",
"LVM2_RAID_SYNC_ACTION": "SyncAction",
"LVM2_MIRROR_LOG_UUID": "Log_UUID",
"LVM2_POOL_LV": "Pool",
"LVM2_COPY_PERCENT": "Cpy%Sync",
"LVM2_CONVERT_LV": "Convert",
"LVM2_LV_KERNEL_READ_AHEAD": "KRahead",
"LVM2_LV_NAME": "LV",
"LVM2_LV_HOST": "Host",
"LVM2_CACHE_USED_BLOCKS": "CacheUsedBlocks",
"LVM2_RAID_MIN_RECOVERY_RATE": "MinSync",
"LVM2_ORIGIN_UUID": "Origin_UUID",
"LVM2_LV_SUSPENDED": "Suspended",
"LVM2_RAID_MISMATCH_COUNT": "Mismatches",
"LVM2_LV_KERNEL_MAJOR": "KMaj",
"LVM2_LV_LAYOUT": "Layout",
"LVM2_LV_PROFILE": "LProfile",
"LVM2_LV_LIVE_TABLE": "LiveTable",
"LVM2_LV_INITIAL_IMAGE_SYNC": "InitImgSync",
"LVM2_LV_CONVERTING": "Converting",
"LVM2_CACHE_READ_HITS": "CacheReadHits",
"LVM2_VG_NAME": "VG",
"LVM2_METADATA_LV": "Meta",
"LVM2_LV_ACTIVE": "Active",
"LVM2_CONVERT_LV_UUID": "Convert",
"LVM2_LV_MERGE_FAILED": "MergeFailed",
"LVM2_METADATA_LV_UUID": "Meta_UUID",
"LVM2_LV_ROLE": "Role",
"LVM2_LV_WHEN_FULL": "WhenFull",
"LVM2_LV_ALLOCATION_LOCKED": "AllocLock",
"LVM2_DATA_PERCENT": "Data%",
"LVM2_LV_LOCKARGS": "Lock_Args",
"LVM2_LV_SNAPSHOT_INVALID": "SnapInvalid",
"LVM2_MOVE_PV_UUID": "Move_UUID",
"LVM2_LV_MINOR": "Min",
"LVM2_ORIGIN": "Origin",
"LVM2_DATA_LV_UUID": "Data_UUID",
"LVM2_DATA_LV": "Data",
"LVM2_CACHE_READ_MISSES": "CacheReadMisses",
"LVM2_LV_DESCENDANTS": "Descendants",
"LVM2_REGION_SIZE": "Region",
"LVM2_SEGTYPE": "SegType",
"LVM2_SEG_MONITOR": "Monitor"
}
PRIMARY_KEY = "LV"
def parse_content(self, content):
super(Lvs, self).parse_content(content)
for item in self.data["content"]:
lv_name = item["LV"]
if "/" in lv_name:
# Reduce full name to just the name
# This is due to the lvs command having *two identical keys*
# with different values
item["LV"] = lv_name.split("/")[1]
def vg(self, name):
"""Return all logical volumes in the given volume group"""
return [i for i in self.data["content"] if i["VG"] == name]
@parser(Specs.lvs_noheadings_all)
class LvsAll(Lvs):
"""
Parse the output of the `/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent --config='global{locking_type=0} devices{filter=["a|.*|"]}'` command.
Uses the ``Lvs`` class defined in this module.
"""
pass
@parser(Specs.lvs)
class LvsHeadings(LvmHeadings):
"""
Process output of the command `/sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"`.
Sample Input data::
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert LV Tags Devices
lv_app vg_root -wi-ao---- 71.63g /dev/sda2(7136)
lv_home vg_root -wi-ao---- 2.00g /dev/sda2(2272)
lv_opt vg_root -wi-ao---- 5.00g /dev/sda2(2784)
lv_root vg_root -wi-ao---- 5.00g /dev/sda2(0)
lv_tmp vg_root -wi-ao---- 1.00g /dev/sda2(4064)
lv_usr vg_root -wi-ao---- 5.00g /dev/sda2(4320)
lv_usrlocal vg_root -wi-ao---- 1.00g /dev/sda2(5600)
lv_var vg_root -wi-ao---- 5.00g /dev/sda2(5856)
swap vg_root -wi-ao---- 3.88g /dev/sda2(1280)
Attributes:
data (list): List of dicts, each dict containing one row of the table
with column headings as keys.
Examples:
>>> lvs_info = shared[LvsHeadings]
>>> lvs_info.data[0]
{'LV': 'lv_app', 'VG': 'vg_root', 'Attr': '-wi-ao----', 'LSize': '71.63',
'Pool': '', 'Origin': '', 'Data%': '', 'Meta%': '', 'Move': '', 'Log': '',
'Cpy%Sync': '', 'Convert': '', 'LV_Tags': '', 'Devices': '/dev/sda2(7136)'}
>>> lvs_info.data[2]['LSize']
'2.00g'
"""
PRIMARY_KEY = Lvs.PRIMARY_KEY
def parse_content(self, content):
self.data = parse_fixed_table(content,
heading_ignore=['LV '],
header_substitute=[('LV Tags', 'LV_Tags')])
self.data = map_keys(self.data, Lvs.KEYS)
KEYS_WITH_SPACES = []
for cls in (Lvs, Pvs, Vgs):
KEYS_WITH_SPACES.extend([k for k in cls.KEYS.values() if " " in k])
LVM_CONF_FILTERS = [
"locking_type", # CMIRROR_PERF_ISSUE
"filter", # LVM_CONF_REMOVE_BOOTDEV HA_LVM_RELOCATE_ISSUE LVM_FILTER_ISSUE
"volume_list" # HA_LVM_RELOCATE_ISSUE
]
add_filter(Specs.lvm_conf, LVM_CONF_FILTERS)
@parser(Specs.lvm_conf)
class LvmConf(LegacyItemAccess, Parser):
"""
Parses contents of the `/etc/lvm/lvm.conf` file.
Sample Input::
locking_type = 1
#locking_type = 2
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
volume_list = [ "vg2", "vg3/lvol3", "@tag2", "@*" ]
# filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
filter = [ "r/sda[0-9]*$/", "a/sd.*/" ]
filter = [ "a/sda[0-9]*$/", "r/sd.*/" ]
shell {
history_size = 100
}
Examples:
>>> lvm_conf_data = shared[LvmConf]
>>> lvm_conf_data.data
{"locking_type": 1, "volume_list": ["vg1", "vg2/lvol1", "@tag1", "@*"],
"filter": ["a/sda[0-9]*$/", "r/sd.*/"], "history_size": 100}
>>> lvm_conf_data.get("locking_type")
1
"""
def parse_content(self, content):
"""
Returns a dict:
locking_type : 1
filter : ['a/sda[0-9]*$/', 'r/sd.*/']
volume_list : ['vg2', 'vg3/lvol3', '@tag2', '@*']
"""
lvm_conf_dict = {}
for line in get_active_lines(content):
if "=" in line:
(key, value) = [item.strip() for item in line.split('=', 1)]
try:
lvm_conf_dict[key] = json.loads(value)
except Exception:
lvm_conf_dict[key] = value
self.data = lvm_conf_dict
if __name__ == "__main__":
# This is a quick script to generate the key mappings in each subclass.
# Run each lvm command with --separator="|", --nameprefixes and *not* --noheadings
import sys
from collections import OrderedDict
content = sys.stdin.read().splitlines()
headers = [h.strip().replace(" ", "_") for h in content[0].split("|")]
nameprefixes = [v.split("=")[0].strip() for v in content[1].replace("0 ", "0").split("|")]
pairs = zip(nameprefixes, headers)
print(json.dumps(OrderedDict(sorted(pairs))))
| 41.582067
| 251
| 0.526552
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.