hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5c041cdbf282f4acdfb6599f8aefee37b9cdbc18
| 2,405
|
py
|
Python
|
allennlp-tutorial/venue/venue_reader.py
|
thomakl/BDSC
|
3c4f10126ff7dabe631839c47fc2362bf16e8588
|
[
"MIT"
] | null | null | null |
allennlp-tutorial/venue/venue_reader.py
|
thomakl/BDSC
|
3c4f10126ff7dabe631839c47fc2362bf16e8588
|
[
"MIT"
] | null | null | null |
allennlp-tutorial/venue/venue_reader.py
|
thomakl/BDSC
|
3c4f10126ff7dabe631839c47fc2362bf16e8588
|
[
"MIT"
] | null | null | null |
import json
from typing import Iterator, List, Dict, Optional
from allennlp.data import Instance
from allennlp.data.fields import TextField, SequenceLabelField, LabelField
from allennlp.data.dataset_readers import DatasetReader
from allennlp.common.file_utils import cached_path
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.vocabulary import Vocabulary
@DatasetReader.register("venue_reader")
class PublicationDatasetReader(DatasetReader):
"""
DatasetReader for publication and venue dataaet
"""
def __init__(self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterator[Instance]:
"""
Read publication and venue dataset in JSON format
Data is in the following format:
{"title": ..., "paperAbstract": ..., "venue": ...}
"""
with open(cached_path(file_path), "r") as data_file:
for line in data_file:
line = line.strip("\n")
if not line:
continue
paper_json = json.loads(line)
title = paper_json['title']
abstract = paper_json['paperAbstract']
venue = paper_json['venue']
yield self.text_to_instance(title, abstract, venue)
def text_to_instance(self,
title: str,
abstract: str,
venue: str=None) -> Instance:
"""
Turn title, abstract, and venue to instance
"""
tokenized_title = self._tokenizer.tokenize(title)
tokenized_abstract = self._tokenizer.tokenize(abstract)
title_field = TextField(tokenized_title, self._token_indexers)
abstract_field = TextField(tokenized_abstract, self._token_indexers)
fields = {'title': title_field,
'abstract': abstract_field}
if venue is not None:
fields['label'] = LabelField(venue)
return Instance(fields)
| 40.762712
| 83
| 0.622453
|
584b28c3f07f7802d95e59900f5148b8f733f722
| 2,932
|
py
|
Python
|
upvote/gae/modules/upvote_app/api/handlers/lookups.py
|
cclauss/upvote
|
9d526fec72690cde1575dbd32dacf68cbbab81d1
|
[
"Apache-2.0"
] | null | null | null |
upvote/gae/modules/upvote_app/api/handlers/lookups.py
|
cclauss/upvote
|
9d526fec72690cde1575dbd32dacf68cbbab81d1
|
[
"Apache-2.0"
] | null | null | null |
upvote/gae/modules/upvote_app/api/handlers/lookups.py
|
cclauss/upvote
|
9d526fec72690cde1575dbd32dacf68cbbab81d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for looking up extra info on binaries."""
import httplib
import logging
from upvote.gae.datastore.models import base as base_db
from upvote.gae.datastore.models import santa as santa_db
from upvote.gae.modules.upvote_app.api import monitoring
from upvote.gae.modules.upvote_app.api.handlers import base
from upvote.gae.shared.binary_health import binary_health
from upvote.gae.shared.binary_health.virustotal import constants as vt_constants
from upvote.gae.shared.common import handlers
class Lookup(base.BaseHandler):
"""Handler for looking up binary info."""
@property
def RequestCounter(self):
return monitoring.event_requests
@handlers.RecordRequest
def check_virus_total(self, blockable_id):
blockable = base_db.Blockable.get_by_id(blockable_id)
if not blockable:
self.abort(httplib.NOT_FOUND, explanation='Blockable not found')
if isinstance(blockable, santa_db.SantaBundle):
keys = santa_db.SantaBundle.GetBundleBinaryKeys(blockable.key)
all_results = {
'response_code': vt_constants.RESPONSE_CODE.UNKNOWN,
'positives': 0,
'reports': {}}
for key in keys:
try:
results = binary_health.VirusTotalLookup(key.id())
except binary_health.LookupFailure as e: # pylint: disable=broad-except
# NOTE: We suppress all errors here because an omitted entry will be
# considered an error and prevent the response from being considered
# fully analyzed.
logging.warning(str(e))
else:
if 'scans' in results:
del results['scans']
all_results['positives'] += bool(results.get('positives'))
all_results['reports'][key.id()] = results
# If all binaries have reports, set response to ANALYZED.
if (len(all_results['reports']) == len(keys) and
all('total' in report for report in all_results['reports'].values())):
all_results['response_code'] = vt_constants.RESPONSE_CODE.ANALYZED
self.respond_json(all_results)
else:
try:
results = binary_health.VirusTotalLookup(blockable_id)
except binary_health.LookupFailure as e: # pylint: disable=broad-except
logging.exception(str(e))
self.abort(httplib.NOT_FOUND)
else:
self.respond_json(results)
| 38.578947
| 80
| 0.713506
|
7a83585f06778ea221f4502215e52c2b2b52fce5
| 576
|
py
|
Python
|
tests/token/test_setIssuer.py
|
blockchain-jedi/erc20-contract-template
|
4364d131e5046d0534f6b92304e6f91b5e8adb23
|
[
"MIT"
] | null | null | null |
tests/token/test_setIssuer.py
|
blockchain-jedi/erc20-contract-template
|
4364d131e5046d0534f6b92304e6f91b5e8adb23
|
[
"MIT"
] | null | null | null |
tests/token/test_setIssuer.py
|
blockchain-jedi/erc20-contract-template
|
4364d131e5046d0534f6b92304e6f91b5e8adb23
|
[
"MIT"
] | null | null | null |
import pytest
from brownie import reverts
from tests.fixtures.token import token
from tests.fixtures.accounts import admin, issuer, non_holders
def test_setIssuer_from_non_owner(token, non_holders):
with reverts("dev: missing role"):
token.setIssuer(non_holders[0], {'from': non_holders[0]})
def test_setIssuer_from_issuer(token, issuer, non_holders):
with reverts("dev: missing role"):
token.setIssuer(non_holders[0], {'from': issuer})
def test_setIssuer_from_owner(token, admin, non_holders):
token.setIssuer(non_holders[0], {'from': admin})
| 33.882353
| 65
| 0.751736
|
49db783a5aa85d7bd85fef1e576c1214082745e4
| 537
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/goods/urls.py
|
tuhao1250/meiduo-mall
|
7f0e64de72a0ad5b883fd8eb568aca3befd6e120
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/goods/urls.py
|
tuhao1250/meiduo-mall
|
7f0e64de72a0ad5b883fd8eb568aca3befd6e120
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/goods/urls.py
|
tuhao1250/meiduo-mall
|
7f0e64de72a0ad5b883fd8eb568aca3befd6e120
|
[
"MIT"
] | null | null | null |
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
app_name = 'goods'
urlpatterns = [
path(r'categories/<int:category_id>/hotskus/', views.HotSKUListView.as_view()), # 返回商品SKU热销数据
path(r'categories/<int:category_id>/skus/', views.SKUListView.as_view()), # 获取商品列表
path(r'categories/<int:cat_id>/', views.CatView.as_view()), # 获取商品分类面包屑导航
]
router = DefaultRouter()
router.register('skus/search', views.SKUSearchViewSet, base_name='skus_search')
urlpatterns += router.urls
| 31.588235
| 98
| 0.746741
|
0998faa6975800bce7455b021367e3870c976389
| 535
|
py
|
Python
|
test/test_create_vendor.py
|
oostapenko84/My-project
|
eba54a6f6d58f486b650877417457404962483e0
|
[
"Apache-2.0"
] | null | null | null |
test/test_create_vendor.py
|
oostapenko84/My-project
|
eba54a6f6d58f486b650877417457404962483e0
|
[
"Apache-2.0"
] | null | null | null |
test/test_create_vendor.py
|
oostapenko84/My-project
|
eba54a6f6d58f486b650877417457404962483e0
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'olga.ostapenko'
from model.vendor import Vendor
def test_create_vendor(app):
app.session.login(username="admin@admin.com", password="12345678")
app.vendor.create(
Vendor(Company_Name="Test Company vv", Phone="12345678", Address="San Francisco", City="San Francisco",
Zip_Code="12345", Web_Site="www.test.com", Description="test", VAD_First_Name="VAD First Name vv",
VAD_Last_Name="VAD Last Name vv", Email="testvv@test.com", VAD_Phone="12345678"))
app.session.logout()
| 44.583333
| 113
| 0.693458
|
c79aed851cbe87c5e43289b0130b14b817dcaa34
| 3,111
|
py
|
Python
|
MEng Portfolio Scripts/PyVISA/main_sweep_script.py
|
aidansmyth95/Masters-Project-Characterization-of-Reflectors-in-a-Wireless-Channel
|
66780e7d0be8b3929e657a3f30455cfa224655f6
|
[
"MIT"
] | null | null | null |
MEng Portfolio Scripts/PyVISA/main_sweep_script.py
|
aidansmyth95/Masters-Project-Characterization-of-Reflectors-in-a-Wireless-Channel
|
66780e7d0be8b3929e657a3f30455cfa224655f6
|
[
"MIT"
] | null | null | null |
MEng Portfolio Scripts/PyVISA/main_sweep_script.py
|
aidansmyth95/Masters-Project-Characterization-of-Reflectors-in-a-Wireless-Channel
|
66780e7d0be8b3929e657a3f30455cfa224655f6
|
[
"MIT"
] | 3
|
2019-10-25T16:32:29.000Z
|
2021-02-28T06:48:40.000Z
|
import csv
import visa
import socket
import vxi11
import time
import sys
import math
import string
from SA_CTRL_function import *
from SG_CTRL_function import *
# sig gen settings
start_freq = 2.0000000 # GHz
end_freq = 3.000000 # GHz
delta_freq = 0.0005000 # GHz
pwr = 14 # dBm
# fsv settings
fspan = '0.5' # scope peak range, MHz
avnum = '128'
# position setting
pos = 61803 # room location
# show GPIB device
print('Resources: ', visa.ResourceManager().list_resources())
sg = SG_CTRL_function('GPIB0::13::INSTR', 0)
print("Connecting to signal generator ...")
sg.connect()
# FSV network connectivity settings.... 'ipconfig /all' on remote host to show its IPV4
resourceString = 'TCPIP::10.198.138.37::INSTR' # Auditorium B, port 159D
LAN = '10.198.138.37'
print("Connecting to FSV ...")
scope = SA_CTRL_function(LAN, resourceString)
scope.fsa_set_fspan_MHz(fspan)
scope.fsa_set_average(avnum) # why was this commented out? XXXXXXX
# Clear CSV File
f1 = open('fsa_markers_pos%s.csv' % pos, 'w')
f1.truncate()
f1.close()
# Measuring peaks and record to CSV
print("Starting measurements ...")
SL = int((end_freq - start_freq) / delta_freq) # spectral lines
loops = 1 # loops for repeated sweep averaging
t_sleep = 2 # seconds
results = [0] * SL * (loops+1) # zeros array initialization
print("Estimated run time: ", SL * loops * t_sleep / 60, ' minutes.')
with open('fsa_markers_pos%s.csv' % pos, 'a') as f1:
writer = csv.writer(f1, delimiter='\t', lineterminator='\n', )
columnTitleRow = "Freq TX\tMagnitude\n"
writer.writerow(columnTitleRow) # write headings for columns
for i in range(1, loops + 1): # average of several sweeps
print("Sweep number ", i)
freq = start_freq
for j in range(0, SL + 1): # perform single sweep on 80 channels
scope.fsa_set_fc_GHz(str(freq)) # set scope centre frequency
sg.set_sg(freq, pwr) # set signal generator
time.sleep(t_sleep) # enough time to allow average to work better
scope.fsa_measure_peak() # take measurement
time.sleep(0.01) # wait before looping again
print("Freq Tx: ", freq)
print("Freq Rx: ", scope.res_x)
print("Measurement: ", scope.res_y)
results[j] += float(scope.res_y) # increment useful if loops > 1
freq += delta_freq # increment freq prior to next measurement
results = [x / loops for x in results] # average of frequency sweep
for i in range(0, SL + 1): # print results
print(results[i])
writer.writerow([str(start_freq + i*delta_freq), '\t', str(results[i])])
print("Finished measurements!")
| 37.035714
| 98
| 0.575699
|
96ee38f9fcb432ded99fa0682078966d30627c56
| 1,868
|
py
|
Python
|
main.py
|
M-Faheem-Khan/Flickr-Image-Downloader
|
db3d3e98f2892d8f85b61a925f9848d1c0ae2e10
|
[
"MIT"
] | null | null | null |
main.py
|
M-Faheem-Khan/Flickr-Image-Downloader
|
db3d3e98f2892d8f85b61a925f9848d1c0ae2e10
|
[
"MIT"
] | null | null | null |
main.py
|
M-Faheem-Khan/Flickr-Image-Downloader
|
db3d3e98f2892d8f85b61a925f9848d1c0ae2e10
|
[
"MIT"
] | null | null | null |
import urllib.request
import requests
from bs4 import BeautifulSoup
def flickr_img(search, no_img):
# gets page source
url = "https://www.flickr.com/search/?text="+search
html = urllib.request.urlopen(url)
soup = BeautifulSoup(html, "html.parser")
# gets the urls from the div tags
ls = soup.find_all("div", {"class": "view photo-list-photo-view requiredToShowOnServer awake"})
k = []
for i in ls:
i = str(i)
i_o = i.index("/")
i_p = i[::-1].index(")")
i = i[i_o:-i_p-1]
i = "https:"+i
k.append(i)
# downloads all the images
for i in range(no_img):
with open(str(i)+".jpg", "wb") as handle:
response = requests.get(k[i], stream=True)
if not response.ok:
print (response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
# makes sure that user enters something
def get_search():
flag = True
while flag:
ui = input("What do you want images of: ")
if len(ui) == 0:
print("Search cannot be empty")
else:
return ui
# makes sure that the user enters a integer
def get_images():
flag = True
while flag:
try:
ui = int(input("Out of 22 how many images do you want to download: "))
if ui <= 22:
return ui
else:
print("Pick a number between 0 and 22")
except ValueError:
print("Invalid Input \n Integers only!!")
# Waning
print("If you choose to download any pictures from the website please be sure to know that who they belong and the licenses that come with it. \n\n\nUSE AT YOUR OWN RISK")
# Help
print("HELP \nEnter what you are looking for \nEnter the number of images you want to download less than 22 because that is the default number of images that the website loads")
# calling the necessary functions
search = get_search()
no_img = get_images()
flickr_img(search, no_img)
| 27.880597
| 178
| 0.664347
|
7572dea154e64a43ca5d0ecca2d2efb4987f7b85
| 315
|
py
|
Python
|
terra_sdk/core/wasm/__init__.py
|
terra-money/terra.py
|
d66de6bb4d9d78f08ad8ffe05ae72d847fc5099d
|
[
"MIT"
] | 66
|
2021-10-21T23:29:38.000Z
|
2022-03-30T15:58:13.000Z
|
terra_sdk/core/wasm/__init__.py
|
terra-money/terra.py
|
d66de6bb4d9d78f08ad8ffe05ae72d847fc5099d
|
[
"MIT"
] | 50
|
2021-10-19T06:11:56.000Z
|
2022-03-31T17:06:57.000Z
|
terra_sdk/core/wasm/__init__.py
|
terra-money/terra.py
|
d66de6bb4d9d78f08ad8ffe05ae72d847fc5099d
|
[
"MIT"
] | 39
|
2021-11-07T17:28:31.000Z
|
2022-03-31T15:03:57.000Z
|
from .msgs import (
MsgClearAdmin,
MsgExecuteContract,
MsgInstantiateContract,
MsgMigrateContract,
MsgStoreCode,
MsgUpdateAdmin,
)
__all__ = [
"MsgStoreCode",
"MsgInstantiateContract",
"MsgExecuteContract",
"MsgMigrateContract",
"MsgUpdateAdmin",
"MsgClearAdmin",
]
| 17.5
| 29
| 0.68254
|
847f001692f213cbce96a8fc0a4df79fe2d7d963
| 9,071
|
py
|
Python
|
src/mca.py
|
jakub-stejskal/mca
|
7344f3b77f2a4d6c0440bc0aaa99945d108162b2
|
[
"BSD-3-Clause"
] | 9
|
2015-01-31T08:22:34.000Z
|
2021-04-14T16:01:27.000Z
|
src/mca.py
|
dataculture/mca
|
7344f3b77f2a4d6c0440bc0aaa99945d108162b2
|
[
"BSD-3-Clause"
] | null | null | null |
src/mca.py
|
dataculture/mca
|
7344f3b77f2a4d6c0440bc0aaa99945d108162b2
|
[
"BSD-3-Clause"
] | 2
|
2017-07-19T18:36:06.000Z
|
2019-03-12T01:00:29.000Z
|
# -*- coding: utf-8 -*-
from scipy.linalg import diagsvd
import numpy as np
import pandas as pd
import functools
def process_df(DF, cols, ncols):
if cols: # if you want us to do the dummy coding
K = len(cols) # the number of categories
X = dummy(DF, cols)
else: # if you want to dummy code it yourself or do all the cols
K = ncols
if ncols is None: # be sure to pass K if you didn't multi-index
K = len(DF.columns) # ... it with mca.dummy()
if not K:
raise ValueError("Your DataFrame has no columns.")
elif not isinstance(ncols, int) or ncols <= 0 or \
ncols > len(DF.columns): # if you dummy coded it yourself
raise ValueError("You must pass a valid number of columns.")
X = DF
J = X.shape[1]
return X, K, J
def dummy(DF, cols=None):
"""Dummy code select columns of a DataFrame."""
return pd.concat((pd.get_dummies(DF[col])
for col in (DF.columns if cols is None else cols)),
axis=1, keys=DF.columns)
def _mul(*args):
"""An internal method to multiply matrices."""
return functools.reduce(np.dot, args)
class MCA(object):
"""Run MCA on selected columns of a pd DataFrame.
If the column are specified, assume that they hold
categorical variables that need to be replaced with
dummy indicators, otherwise process the DataFrame as is.
'cols': The columns of the DataFrame to process.
'ncols': The number of columns before dummy coding. To be passed if cols isn't.
'benzecri': Perform Benzécri correction (default: True)
'TOL': value below which to round eigenvalues to zero (default: 1e-4)
"""
def __init__(self, DF, cols=None, ncols=None, benzecri=True, TOL=1e-4):
X, self.K, self.J = process_df(DF, cols, ncols)
S = X.sum().sum()
Z = X / S # correspondence matrix
self.r = Z.sum(axis=1)
self.c = Z.sum()
self._numitems = len(DF)
self.cor = benzecri
self.D_r = np.diag(1/np.sqrt(self.r))
Z_c = Z - np.outer(self.r, self.c) # standardized residuals matrix
self.D_c = np.diag(1/np.sqrt(self.c))
# another option, not pursued here, is sklearn.decomposition.TruncatedSVD
self.P, self.s, self.Q = np.linalg.svd(_mul(self.D_r, Z_c, self.D_c))
self.E = None
E = self._benzecri() if self.cor else self.s**2
self.inertia = sum(E)
self.rank = np.argmax(E < TOL)
self.L = E[:self.rank]
def _benzecri(self):
if self.E is None:
self.E = np.array([(self.K/(self.K-1.)*(_ - 1./self.K))**2
if _ > 1./self.K else 0 for _ in self.s**2])
return self.E
def fs_r(self, percent=0.9, N=None):
"""Get the row factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, np.int64)) or N <= 0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank)
# S = np.zeros((self._numitems, N))
# else:
self.k = 1 + np.flatnonzero(np.cumsum(self.L) >= sum(self.L)*percent)[0]
# S = np.zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# np.fill_diagonal(S, -np.sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -np.sqrt(self.L) if self.cor else self.s
S = diagsvd(s[:num2ret], self._numitems, num2ret)
self.F = _mul(self.D_r, self.P, S)
return self.F
def fs_c(self, percent=0.9, N=None):
"""Get the column factor scores (dimensionality-reduced representation),
choosing how many factors to retain, directly or based on the explained
variance.
'percent': The minimum variance that the retained factors are required
to explain (default: 90% = 0.9)
'N': The number of factors to retain. Overrides 'percent'.
If the rank is less than N, N is ignored.
"""
if not 0 <= percent <= 1:
raise ValueError("Percent should be a real number between 0 and 1.")
if N:
if not isinstance(N, (int, np.int64)) or N <= 0:
raise ValueError("N should be a positive integer.")
N = min(N, self.rank) # maybe we should notify the user?
# S = np.zeros((self._numitems, N))
# else:
self.k = 1 + np.flatnonzero(np.cumsum(self.L) >= sum(self.L)*percent)[0]
# S = np.zeros((self._numitems, self.k))
# the sign of the square root can be either way; singular value vs. eigenvalue
# np.fill_diagonal(S, -np.sqrt(self.E) if self.cor else self.s)
num2ret = N if N else self.k
s = -np.sqrt(self.L) if self.cor else self.s
S = diagsvd(s[:num2ret], len(self.Q), num2ret)
self.G = _mul(self.D_c, self.Q.T, S) # important! note the transpose on Q
return self.G
def cos_r(self, N=None): # percent=0.9
"""Return the squared cosines for each row."""
if not hasattr(self, 'F') or self.F.shape[1] < self.rank:
self.fs_r(N=self.rank) # generate F
self.dr = np.linalg.norm(self.F, axis=1)**2
# cheaper than np.diag(self.F.dot(self.F.T))?
return np.apply_along_axis(lambda _: _/self.dr, 0, self.F[:, :N]**2)
def cos_c(self, N=None): # percent=0.9,
"""Return the squared cosines for each column."""
if not hasattr(self, 'G') or self.G.shape[1] < self.rank:
self.fs_c(N=self.rank) # generate
self.dc = np.linalg.norm(self.G, axis=1)**2
# cheaper than np.diag(self.G.dot(self.G.T))?
return np.apply_along_axis(lambda _: _/self.dc, 0, self.G[:, :N]**2)
def cont_r(self, percent=0.9, N=None):
"""Return the contribution of each row."""
if not hasattr(self, 'F'):
self.fs_r(N=self.rank) # generate F
return np.apply_along_axis(lambda _: _/self.L[:N], 1,
np.apply_along_axis(lambda _: _*self.r, 0, self.F[:, :N]**2))
def cont_c(self, percent=0.9, N=None): # bug? check axis number 0 vs 1 here
"""Return the contribution of each column."""
if not hasattr(self, 'G'):
self.fs_c(N=self.rank) # generate G
return np.apply_along_axis(lambda _: _/self.L[:N], 1,
np.apply_along_axis(lambda _: _*self.c, 0, self.G[:, :N]**2))
def expl_var(self, greenacre=True, N=None):
"""
Return proportion of explained inertia (variance) for each factor.
:param greenacre: Perform Greenacre correction (default: True)
"""
if greenacre:
greenacre_inertia = (self.K / (self.K - 1.) * (sum(self.s**4)
- (self.J - self.K) / self.K**2.))
return (self._benzecri() / greenacre_inertia)[:N]
else:
E = self._benzecri() if self.cor else self.s**2
return (E / sum(E))[:N]
def fs_r_sup(self, DF, N=None):
"""Find the supplementary row factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
"""
if not hasattr(self, 'G'):
self.fs_c(N=self.rank) # generate G
if N and (not isinstance(N, int) or N <= 0):
raise ValueError("ncols should be a positive integer.")
s = -np.sqrt(self.E) if self.cor else self.s
N = min(N, self.rank) if N else self.rank
S_inv = diagsvd(-1/s[:N], len(self.G.T), N)
# S = scipy.linalg.diagsvd(s[:N], len(self.tau), N)
return _mul(DF.div(DF.sum(axis=1), axis=0), self.G, S_inv)[:, :N]
def fs_c_sup(self, DF, N=None):
"""Find the supplementary column factor scores.
ncols: The number of singular vectors to retain.
If both are passed, cols is given preference.
"""
if not hasattr(self, 'F'):
self.fs_r(N=self.rank) # generate F
if N and (not isinstance(N, int) or N <= 0):
raise ValueError("ncols should be a positive integer.")
s = -np.sqrt(self.E) if self.cor else self.s
N = min(N, self.rank) if N else self.rank
S_inv = diagsvd(-1/s[:N], len(self.F.T), N)
# S = scipy.linalg.diagsvd(s[:N], len(self.tau), N)
return _mul((DF/DF.sum()).T, self.F, S_inv)[:, :N]
| 41.420091
| 86
| 0.572373
|
3ce562331fb9b252dc47d035be921a9c455d692c
| 103,416
|
py
|
Python
|
core/domain/exp_domain_test.py
|
vibhor98/oppia
|
47aeba6879467ac53e8a9dc89bbf73a4fbb7ebaf
|
[
"Apache-2.0"
] | null | null | null |
core/domain/exp_domain_test.py
|
vibhor98/oppia
|
47aeba6879467ac53e8a9dc89bbf73a4fbb7ebaf
|
[
"Apache-2.0"
] | null | null | null |
core/domain/exp_domain_test.py
|
vibhor98/oppia
|
47aeba6879467ac53e8a9dc89bbf73a4fbb7ebaf
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import copy
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
fallbacks: []
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
exploration.validate()
# Ensure an answer group with two classifier rules is invalid
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'html': 'Feedback',
'audio_translations': {}
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}, {
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}],
})
)
self._assert_validation_error(
exploration, 'AnswerGroups can only have one classifier rule.')
# Restore a valid exploration.
init_state.interaction.answer_groups.pop()
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'html': 'Feedback',
'audio_translations': {}
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = exp_domain.SubtitledHtml('Feedback', {})
exploration.validate()
outcome.labelled_as_correct = 'hello'
self._assert_validation_error(
exploration, 'The "labelled_as_correct" field should be a boolean')
# Test that labelled_as_correct must be False for self-loops, and that
# this causes a strict validation failure but not a normal validation
# failure.
outcome.labelled_as_correct = True
with self.assertRaisesRegexp(
Exception, 'is labelled correct but is a self-loop.'
):
exploration.validate(strict=True)
exploration.validate()
outcome.labelled_as_correct = False
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = []
exploration.validate()
outcome.refresher_exploration_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome refresher_exploration_id to be a string')
outcome.refresher_exploration_id = None
exploration.validate()
outcome.refresher_exploration_id = 'valid_string'
exploration.validate()
# Test that refresher_exploration_id must be None for non-self-loops.
new_state_name = 'New state'
exploration.add_states([new_state_name])
outcome.dest = new_state_name
outcome.refresher_exploration_id = 'another_string'
self._assert_validation_error(
exploration,
'has a refresher exploration ID, but is not a self-loop')
outcome.refresher_exploration_id = None
exploration.validate()
exploration.delete_state(new_state_name)
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
interaction.default_outcome = None
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
exploration.validate()
interaction.hints = {}
self._assert_validation_error(
exploration, 'Expected hints to be a list')
# Validate AnswerGroup.
answer_group.rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule for each answer group.')
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
# TODO(sll): Remove the next two lines once the App Engine search
# service supports 3-letter language codes.
exploration.language_code = 'kab'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_hints_validation(self):
"""Test validation of state hints."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
init_state.update_interaction_hints([{
'hint_content': {
'html': 'hint one',
'audio_translations': {}
},
}])
solution = {
'answer_is_exclusive': False,
'correct_answer': 'helloworld!',
'explanation': {
'html': 'hello_world is a string',
'audio_translations': {}
},
}
init_state.interaction.solution = (
exp_domain.Solution.from_dict(init_state.interaction.id, solution))
exploration.validate()
# Add hint and delete hint
init_state.add_hint(exp_domain.SubtitledHtml('new hint', {}))
self.assertEquals(
init_state.interaction.hints[1].hint_content.html,
'new hint')
init_state.add_hint(exp_domain.SubtitledHtml('hint three', {}))
init_state.delete_hint(1)
self.assertEquals(
len(init_state.interaction.hints),
2)
exploration.validate()
def test_solution_validation(self):
"""Test validation of state solution."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
# Solution should be set to None as default.
self.assertEquals(init_state.interaction.solution, None)
init_state.add_hint(exp_domain.SubtitledHtml('hint #1', {}))
solution = {
'answer_is_exclusive': False,
'correct_answer': [0, 0],
'explanation': {
'html': 'hello_world is a string',
'audio_translations': {}
}
}
# Object type of answer must match that of correct_answer
with self.assertRaises(AssertionError):
init_state.interaction.solution = (
exp_domain.Solution.from_dict(
init_state.interaction.id, solution))
solution = {
'answer_is_exclusive': False,
'correct_answer': 'hello_world!',
'explanation': {
'html': 'hello_world is a string',
'audio_translations': {}
}
}
init_state.interaction.solution = (
exp_domain.Solution.from_dict(init_state.interaction.id, solution))
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_audio_translation_validation(self):
"""Test validation of audio translations."""
audio_translation = exp_domain.AudioTranslation('a.mp3', 20, True)
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected audio filename to be a string'
):
with self.swap(audio_translation, 'filename', 20):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_translation, 'filename', '.invalidext'):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_translation, 'filename', 'justanextension'):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid audio filename'
):
with self.swap(audio_translation, 'filename', 'a.invalidext'):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected file size to be an int'
):
with self.swap(audio_translation, 'file_size_bytes', 'abc'):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid file size'
):
with self.swap(audio_translation, 'file_size_bytes', -3):
audio_translation.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected needs_update to be a bool'
):
with self.swap(audio_translation, 'needs_update', 'hello'):
audio_translation.validate()
def test_subtitled_html_validation(self):
"""Test validation of subtitled HTML."""
audio_translation = exp_domain.AudioTranslation(
'a.mp3', 20, True)
subtitled_html = exp_domain.SubtitledHtml('some html', {
'hi-en': audio_translation,
})
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid content HTML'
):
with self.swap(subtitled_html, 'html', 20):
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected audio_translations to be a dict'
):
with self.swap(subtitled_html, 'audio_translations', 'not_dict'):
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected language code to be a string'
):
with self.swap(subtitled_html, 'audio_translations',
{20: audio_translation}):
subtitled_html.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'Unrecognized language code'
):
with self.swap(subtitled_html, 'audio_translations',
{'invalid-code': audio_translation}):
subtitled_html.validate()
def test_get_state_names_mapping(self):
"""Test the get_state_names_mapping() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_services.get_exploration_by_id(exp_id)
# Rename a state.
exploration.rename_state('Home', 'Renamed state')
change_list = [{
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
}]
expected_dict = {
'Renamed state': 'Home',
'End': 'End'
}
actual_dict = exploration.get_state_names_mapping(change_list)
self.assertEqual(actual_dict, expected_dict)
# Add a state
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [{
'cmd': 'add_state',
'state_name': 'New state',
}]
expected_dict = {
'New state': 'New state',
'Renamed state': 'Renamed state',
'End': 'End'
}
actual_dict = exploration.get_state_names_mapping(change_list)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [{
'cmd': 'delete_state',
'state_name': 'New state'
}]
expected_dict = {
'Renamed state': 'Renamed state',
'End': 'End'
}
actual_dict = exploration.get_state_names_mapping(change_list)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [{
'cmd': 'add_state',
'state_name': 'New state',
}, {
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}, {
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
}]
expected_dict = {
'New state3': 'New state',
'Renamed state': 'Renamed state',
'End': 'End'
}
actual_dict = exploration.get_state_names_mapping(change_list)
self.assertEqual(actual_dict, expected_dict)
# Test addition, rename and deletion.
exploration.add_states(['New state 2'])
exploration.rename_state('New state 2', 'Renamed state 2')
exploration.delete_state('Renamed state 2')
change_list = [{
'cmd': 'add_state',
'state_name': 'New state 2'
}, {
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}, {
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
}]
expected_dict = {
'New state3': 'New state3',
'Renamed state': 'Renamed state',
'End': 'End'
}
actual_dict = exploration.get_state_names_mapping(change_list)
self.assertEqual(actual_dict, expected_dict)
def test_get_trainable_states_dict(self):
"""Test the get_trainable_states_dict() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=False)
old_states = exp_services.get_exploration_from_model(
exploration_model).states
exploration = exp_services.get_exploration_by_id(exp_id)
# Rename a state to add it in unchanged answer group.
exploration.rename_state('Home', 'Renamed state')
change_list = [{
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
}]
expected_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': ['Renamed state']
}
new_to_old_state_names = exploration.get_state_names_mapping(
change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, new_to_old_state_names)
self.assertEqual(actual_dict, expected_dict)
# Modify answer groups to trigger change in answer groups.
state = exploration.states['Renamed state']
exploration.states['Renamed state'].interaction.answer_groups.insert(
3, state.interaction.answer_groups[3])
answer_groups = []
for answer_group in state.interaction.answer_groups:
answer_groups.append(answer_group.to_dict())
change_list = [{
'cmd': 'edit_state_property',
'state_name': 'Renamed state',
'property_name': 'answer_groups',
'new_value': answer_groups
}]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
new_to_old_state_names = exploration.get_state_names_mapping(
change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, new_to_old_state_names)
self.assertEqual(actual_dict, expected_dict)
# Add new state to trigger change in answer groups.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [{
'cmd': 'add_state',
'state_name': 'New state',
}]
expected_dict = {
'state_names_with_changed_answer_groups': [
'New state', 'Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
new_to_old_state_names = exploration.get_state_names_mapping(
change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, new_to_old_state_names)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [{
'cmd': 'delete_state',
'state_name': 'New state'
}]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
new_to_old_state_names = exploration.get_state_names_mapping(
change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, new_to_old_state_names)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [{
'cmd': 'add_state',
'state_name': 'New state',
}, {
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}, {
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
}]
expected_dict = {
'state_names_with_changed_answer_groups': [
'Renamed state', 'New state3'],
'state_names_with_unchanged_answer_groups': []
}
new_to_old_state_names = exploration.get_state_names_mapping(
change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, new_to_old_state_names)
self.assertEqual(actual_dict, expected_dict)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': {
'html': '',
'audio_translations': {}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': {
'html': '',
'audio_translations': {}
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
YAML_CONTENT_V13 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 13
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 10
tags: []
title: Title
""")
YAML_CONTENT_V14 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 14
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: []
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 11
tags: []
title: Title
""")
YAML_CONTENT_V15 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 15
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 12
tags: []
title: Title
""")
YAML_CONTENT_V16 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 16
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V17 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 17
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V18 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 18
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V19 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 19
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 14
tags: []
title: Title
""")
YAML_CONTENT_V20 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 20
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 15
tags: []
title: Title
""")
YAML_CONTENT_V21 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 21
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 16
tags: []
title: Title
""")
YAML_CONTENT_V22 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 22
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 17
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V22
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v13(self):
"""Test direct loading from a v13 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V13)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v14(self):
"""Test direct loading from a v14 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V14)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v15(self):
"""Test direct loading from a v15 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V15)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v16(self):
"""Test direct loading from a v16 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V16)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v17(self):
"""Test direct loading from a v17 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V17)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v18(self):
"""Test direct loading from a v18 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V18)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v19(self):
"""Test direct loading from a v19 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V19)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v20(self):
"""Test direct loading from a v20 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V20)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v21(self):
"""Test direct loading from a v21 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V21)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v22(self):
"""Test direct loading from a v22 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V22)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
return {
'classifier_model_id': None,
'content': {
'audio_translations': {},
'html': content_str,
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': copy.deepcopy(
exp_domain.
SubtitledHtml.DEFAULT_SUBTITLED_HTML_DICT),
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'language_code': 'en',
'correctness_feedback_enabled': False,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_can_undergo_classification(self):
"""Test the can_undergo_classification() function."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_services.get_exploration_by_id(exploration_id)
state_with_training_data = exploration.states['Home']
state_without_training_data = exploration.states['End']
# A state with 786 training examples.
self.assertTrue(
state_with_training_data.can_undergo_classification())
# A state with no training examples.
self.assertFalse(
state_without_training_data.can_undergo_classification())
def test_get_training_data(self):
"""Test retrieval of training data."""
exploration_id = 'eid'
test_exp_filepath = os.path.join(
feconf.SAMPLE_EXPLORATIONS_DIR, 'classifier_demo_exploration.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exploration_id,
assets_list)
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states['text']
expected_training_data = [{
'answer_group_index': 1,
'answers': [u'cheerful', u'merry', u'ecstatic', u'glad',
u'overjoyed', u'pleased', u'thrilled', u'smile']}]
observed_training_data = state.get_training_data()
self.assertEqual(observed_training_data, expected_training_data)
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class StateIdMappingTests(test_utils.GenericTestBase):
"""Tests for StateIdMapping domain class."""
EXP_ID = 'eid'
def setUp(self):
"""Initialize owner and store default exploration before each test case.
"""
super(StateIdMappingTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
# Create a default exploration.
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id)
self.mapping = exp_services.get_state_id_mapping(
self.EXP_ID, self.exploration.version)
def test_that_correct_mapping_is_stored_for_new_exp(self):
"""Test that initial state id mapping is correct."""
expected_mapping = {
self.exploration.init_state_name: 0
}
self.assertEqual(self.mapping.exploration_id, self.EXP_ID)
self.assertEqual(self.mapping.exploration_version, 1)
self.assertEqual(
self.mapping.largest_state_id_used, 0)
self.assertDictEqual(self.mapping.state_names_to_ids, expected_mapping)
def test_that_mapping_remains_same_when_exp_params_changes(self):
"""Test that state id mapping is unchanged when exploration params are
changed."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 0)
def test_that_mapping_is_correct_when_new_state_is_added(self):
"""Test that new state id is added in state id mapping when new state is
added in exploration."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'new state',
}], 'Add state name')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0,
'new state': 1
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 1)
def test_that_mapping_is_correct_when_old_state_is_deleted(self):
"""Test that state id is removed from state id mapping when the
state is removed from exploration."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'new state',
}], 'Add state name')
exp_services.update_exploration(self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_DELETE_STATE,
'state_name': 'new state',
}], 'delete state')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 1)
def test_that_mapping_remains_when_state_is_renamed(self):
"""Test that state id mapping is changed accordingly when a state
is renamed in exploration."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'new state',
}], 'Add state name')
exp_services.update_exploration(self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'new state',
'new_state_name': 'state',
}], 'Change state name')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0,
'state': 1
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 1)
def test_that_mapping_is_changed_when_interaction_id_is_changed(self):
"""Test that state id mapping is changed accordingly when interaction
id of state is changed."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': self.exploration.init_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'MultipleChoiceInput'
}], 'Update interaction.')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 1,
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 1)
def test_that_mapping_is_correct_for_series_of_changes(self):
"""Test that state id mapping is changed accordingly for series
of add, rename, remove and update state changes."""
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'new state',
}, {
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'new state',
'new_state_name': 'state'
}, {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'extra state'
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state',
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'MultipleChoiceInput'
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'extra state',
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput'
}, {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'new state',
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'new state',
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput'
}], 'Heavy changes')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0,
'extra state': 1,
'new state': 2,
'state': 3,
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 3)
with self.swap(feconf, 'ENABLE_STATE_ID_MAPPING', True):
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [{
'cmd': exp_domain.CMD_DELETE_STATE,
'state_name': 'state',
}, {
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'extra state',
'new_state_name': 'state'
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'state',
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'MultipleChoiceInput'
}, {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'extra state'
}, {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': 'extra state',
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput'
}, {
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': 'new state',
'new_state_name': 'other state'
}], 'Heavy changes 2')
new_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
new_mapping = exp_services.get_state_id_mapping(
self.EXP_ID, new_exploration.version)
expected_mapping = {
new_exploration.init_state_name: 0,
'other state': 2,
'extra state': 4,
'state': 5
}
self.assertEqual(
new_mapping.exploration_version, new_exploration.version)
self.assertEqual(new_mapping.state_names_to_ids, expected_mapping)
self.assertEqual(new_mapping.largest_state_id_used, 5)
| 30.089031
| 80
| 0.602847
|
073a3a60d65039749141c86d3900c24c9d0654e6
| 15,316
|
py
|
Python
|
tensorflow/python/framework/sparse_tensor.py
|
Faagerholm/tensorflow
|
98e30b8748eb018f33836ac9269db67ab60483ab
|
[
"Apache-2.0"
] | 4
|
2021-02-01T01:01:11.000Z
|
2021-07-21T15:22:20.000Z
|
tensorflow/python/framework/sparse_tensor.py
|
Faagerholm/tensorflow
|
98e30b8748eb018f33836ac9269db67ab60483ab
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/sparse_tensor.py
|
Faagerholm/tensorflow
|
98e30b8748eb018f33836ac9269db67ab60483ab
|
[
"Apache-2.0"
] | 5
|
2020-12-22T10:17:47.000Z
|
2021-05-06T14:14:52.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
@tf_export("sparse.SparseTensor", "SparseTensor")
class SparseTensor(_TensorLike, composite_tensor.CompositeTensor):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of dense_shape `[N, ndims]`, which specifies
the indices of the elements in the sparse tensor that contain nonzero
values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]`
specifies that the elements with indexes of [1,3] and [2,4] have
nonzero values.
* `values`: A 1-D tensor of any type and dense_shape `[N]`, which supplies the
values for each element in `indices`. For example, given
`indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies
that element [1,3] of the sparse tensor has a value of 18, and element
[2,4] of the tensor has a value of 3.6.
* `dense_shape`: A 1-D int64 tensor of dense_shape `[ndims]`, which specifies
the dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse.reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not is_sparse(sparse_tensor_value):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
"""
with ops.name_scope(None, "SparseTensor", [indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices_shape = indices.shape.with_rank(2)
values_shape = values.shape.with_rank(1)
dense_shape_shape = dense_shape.shape.with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape.dims[0].merge_with(values_shape.dims[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape.dims[1].merge_with(dense_shape_shape.dims[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self._values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
@property
def _type_spec(self):
return SparseTensorSpec(self.shape, self.dtype)
def _shape_invariant_to_type_spec(self, shape):
# From the tf.while_loop docs: "If a loop variable is a SparseTensor, the
# shape invariant must be TensorShape([r]) where r is the rank of the dense
# tensor represented by the sparse tensor. It means the shapes of the three
# tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape
# invariant here is the shape of the SparseTensor.dense_shape property. It
# must be the shape of a vector.
if shape.ndims is not None and shape.ndims != 1:
raise ValueError("Expected a shape with 1 dimension")
rank = tensor_shape.dimension_value(shape[0])
return SparseTensorSpec(tensor_shape.unknown_shape(rank), self.dtype)
def consumers(self):
return self._consumers()
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "dense_shape"])
tf_export(v1=["SparseTensorValue"])(SparseTensorValue)
pywrap_tensorflow.RegisterType("SparseTensorValue", SparseTensorValue)
# TODO(b/133606651) Export this as tf.SparseTensorSpec.
class SparseTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.SparseTensor`."""
__slots__ = ["_dense_shape", "_dtype"]
value_type = property(lambda self: SparseTensor)
def __init__(self, dense_shape=None, dtype=dtypes.float32):
"""Constructs a type specification for a `tf.SparseTensor`.
Args:
dense_shape: The dense shape of the `SparseTensor`, or `None` to allow
any dense shape.
dtype: `tf.DType` of values in the `SparseTensor`.
"""
self._dense_shape = tensor_shape.as_shape(dense_shape)
self._dtype = dtypes.as_dtype(dtype)
def _serialize(self):
return (self._dense_shape, self._dtype)
@property
def _component_specs(self):
rank = self._dense_shape.ndims
num_values = None
return [
tensor_spec.TensorSpec([num_values, rank], dtypes.int64),
tensor_spec.TensorSpec([num_values], self._dtype),
tensor_spec.TensorSpec([rank], dtypes.int64)]
def _to_components(self, value):
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
return [value.indices, value.values, value.dense_shape]
def _from_components(self, tensor_list):
return SparseTensor(*tensor_list)
# The SparseTensorSpec tensor_list encoding uses (de)serialize_sparse ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
# but a `SparseTensorSpec` can also represent a batch of boxed
# `SparseTensor` objects with shape `(..., 3)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
value = SparseTensor.from_value(value)
return [gen_sparse_ops.serialize_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _to_batched_tensor_list(self, value):
dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
if self._dense_shape.merge_with(dense_shape).ndims == 0:
raise ValueError(
"Unbatching a sparse tensor is only supported for rank >= 1")
return [gen_sparse_ops.serialize_many_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _from_compatible_tensor_list(self, tensor_list):
tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype)
result = SparseTensor(*tensor_list)
rank = self._dense_shape.ndims
result.indices.set_shape([None, rank])
result.dense_shape.set_shape([rank])
return result
def _batch(self, batch_size):
return SparseTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._dense_shape),
self._dtype)
def _unbatch(self):
if self._dense_shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return SparseTensorSpec(self._dense_shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._dense_shape
def _to_legacy_output_classes(self):
return SparseTensor
@classmethod
def from_value(cls, value):
if isinstance(value, SparseTensor):
return cls(value.shape, value.dtype)
if isinstance(value, SparseTensorValue):
if isinstance(value.values, np.ndarray):
return cls(value.dense_shape, value.values.dtype)
else:
return cls.from_value(SparseTensor.from_value(value))
else:
raise TypeError("Expected SparseTensor or SparseTensorValue")
# TODO(b/133606651) Delete the SparseTensor registration when CompositeTensor
# is updated to define a _type_spec field (since registration will be
# automatic). Do *not* delete the SparseTensorValue registration.
type_spec.register_type_spec_from_value_converter(
SparseTensor, SparseTensorSpec.from_value)
type_spec.register_type_spec_from_value_converter(
SparseTensorValue, SparseTensorSpec.from_value)
@tf_export(v1=["convert_to_tensor_or_sparse_tensor"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):
"""Check whether `x` is sparse.
Check whether an object is a `tf.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
Args:
x: A python object to check.
Returns:
`True` iff `x` is a `tf.SparseTensor` or `tf.compat.v1.SparseTensorValue`.
"""
return isinstance(x, (SparseTensor, SparseTensorValue))
| 36.906024
| 80
| 0.717159
|
9bf61f81628046eaa73b588c7fdb26e08998dadc
| 67,925
|
py
|
Python
|
Incident-Response/Tools/grr/grr/server/grr_response_server/databases/db_paths_test.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/grr/grr/server/grr_response_server/databases/db_paths_test.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/grr/grr/server/grr_response_server/databases/db_paths_test.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server.databases import db
from grr_response_server.databases import db_test_utils
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestPathsMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of GRR path data.
"""
def testWritePathInfosValidatesClientId(self):
path = ["usr", "local"]
with self.assertRaises(ValueError):
self.db.WritePathInfos("", [rdf_objects.PathInfo.OS(components=path)])
def testWritePathInfosValidatesPathType(self):
path = ["usr", "local"]
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(ValueError):
self.db.WritePathInfos(client_id, [rdf_objects.PathInfo(components=path)])
def testWritePathInfosValidatesClient(self):
client_id = "C.0123456789012345"
with self.assertRaises(db.UnknownClientError) as context:
self.db.WritePathInfos(
client_id, [rdf_objects.PathInfo.OS(components=[], directory=True)])
self.assertEqual(context.exception.client_id, client_id)
def testWritePathInfosValidateConflictingWrites(self):
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(ValueError):
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"], directory=False),
rdf_objects.PathInfo.OS(components=["foo", "bar"], directory=True),
])
def testWritePathInfosMetadata(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(
client_id,
[rdf_objects.PathInfo.TSK(components=["foo", "bar"], directory=True)])
results = self.db.ReadPathInfos(client_id,
rdf_objects.PathInfo.PathType.TSK,
[("foo", "bar")])
result_path_info = results[("foo", "bar")]
self.assertEqual(result_path_info.path_type,
rdf_objects.PathInfo.PathType.TSK)
self.assertEqual(result_path_info.components, ["foo", "bar"])
self.assertEqual(result_path_info.directory, True)
def testWritePathInfosMetadataTimestampUpdate(self):
now = self.db.Now
client_id = db_test_utils.InitializeClient(self.db)
timestamp_0 = now()
self.db.WritePathInfos(client_id,
[rdf_objects.PathInfo.OS(components=["foo"])])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(result.components, ["foo"])
self.assertGreater(result.timestamp, timestamp_0)
self.assertLess(result.timestamp, now())
self.assertEqual(result.last_stat_entry_timestamp, None)
self.assertEqual(result.last_hash_entry_timestamp, None)
timestamp_1 = now()
stat_entry = rdf_client_fs.StatEntry(st_mode=42)
self.db.WritePathInfos(
client_id,
[rdf_objects.PathInfo.OS(components=["foo"], stat_entry=stat_entry)])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(result.components, ["foo"])
self.assertEqual(result.stat_entry.st_mode, 42)
self.assertGreater(result.timestamp, timestamp_1)
self.assertLess(result.timestamp, now())
self.assertGreater(result.last_stat_entry_timestamp, timestamp_1)
self.assertLess(result.last_stat_entry_timestamp, now())
timestamp_2 = now()
hash_entry = rdf_crypto.Hash(sha256=b"foo")
self.db.WritePathInfos(
client_id,
[rdf_objects.PathInfo.OS(components=["foo"], hash_entry=hash_entry)])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(result.components, ["foo"])
self.assertEqual(result.hash_entry.sha256, b"foo")
self.assertGreater(result.timestamp, timestamp_2)
self.assertLess(result.timestamp, now())
self.assertGreater(result.last_hash_entry_timestamp, timestamp_2)
self.assertLess(result.last_hash_entry_timestamp, now())
timestamp_3 = now()
self.db.WritePathInfos(
client_id,
[rdf_objects.PathInfo.OS(components=["foo"], directory=True)])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(result.components, ["foo"])
self.assertEqual(result.stat_entry.st_mode, 42)
self.assertEqual(result.hash_entry.sha256, b"foo")
self.assertTrue(result.directory)
self.assertGreater(result.timestamp, timestamp_3)
self.assertLess(result.timestamp, now())
self.assertGreater(result.last_stat_entry_timestamp, timestamp_1)
self.assertLess(result.last_stat_entry_timestamp, timestamp_2)
self.assertGreater(result.last_hash_entry_timestamp, timestamp_2)
self.assertLess(result.last_hash_entry_timestamp, timestamp_3)
timestamp_4 = now()
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.stat_entry.st_mode = 108
path_info.hash_entry.sha256 = b"norf"
self.db.WritePathInfos(client_id, [path_info])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(result.components, ["foo"])
self.assertEqual(result.stat_entry.st_mode, 108)
self.assertEqual(result.hash_entry.sha256, b"norf")
self.assertGreater(result.timestamp, timestamp_4)
self.assertGreater(result.last_stat_entry_timestamp, timestamp_4)
self.assertGreater(result.last_hash_entry_timestamp, timestamp_4)
self.assertLess(result.timestamp, now())
self.assertLess(result.last_stat_entry_timestamp, now())
self.assertLess(result.last_hash_entry_timestamp, now())
def testWritePathInfosStatEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
stat_entry = rdf_client_fs.StatEntry()
stat_entry.pathspec.path = "foo/bar"
stat_entry.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
stat_entry.st_mode = 1337
stat_entry.st_mtime = 108
stat_entry.st_atime = 4815162342
path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
self.db.WritePathInfos(client_id, [path_info])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[
(),
("foo",),
("foo", "bar"),
])
root_path_info = results[()]
self.assertFalse(root_path_info.HasField("stat_entry"))
foo_path_info = results[("foo",)]
self.assertFalse(foo_path_info.HasField("stat_entry"))
foobar_path_info = results[("foo", "bar")]
self.assertTrue(foobar_path_info.HasField("stat_entry"))
self.assertFalse(foobar_path_info.HasField("hash_entry"))
self.assertEqual(foobar_path_info.stat_entry.st_mode, 1337)
self.assertEqual(foobar_path_info.stat_entry.st_mtime, 108)
self.assertEqual(foobar_path_info.stat_entry.st_atime, 4815162342)
def testWritePathInfosHashEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
hash_entry = rdf_crypto.Hash()
hash_entry.sha256 = hashlib.sha256(b"foo").digest()
hash_entry.md5 = hashlib.md5(b"foo").digest()
hash_entry.num_bytes = len(b"foo")
path_info = rdf_objects.PathInfo.OS(
components=["foo", "bar", "baz"], hash_entry=hash_entry)
self.db.WritePathInfos(client_id, [path_info])
result = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"))
self.assertEqual(result.components, ["foo", "bar", "baz"])
self.assertTrue(result.HasField("hash_entry"))
self.assertFalse(result.HasField("stat_entry"))
self.assertEqual(result.hash_entry.sha256, hashlib.sha256(b"foo").digest())
self.assertEqual(result.hash_entry.md5, hashlib.md5(b"foo").digest())
self.assertEqual(result.hash_entry.num_bytes, len(b"foo"))
def testWritePathInfosValidatesHashEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
hash_entry = rdf_crypto.Hash()
hash_entry.md5 = hashlib.md5(b"foo").digest()
hash_entry.sha1 = hashlib.sha1(b"bar").digest()
path_info = rdf_objects.PathInfo.OS(
components=("foo", "bar", "baz"), hash_entry=hash_entry)
with self.assertRaises(ValueError):
self.db.WritePathInfos(client_id, [path_info])
def testWriteMultiplePathInfosHashEntry(self):
def SHA256(data: bytes) -> bytes:
return hashlib.sha256(data).digest()
def MD5(data: bytes) -> bytes:
return hashlib.md5(data).digest()
client_id = db_test_utils.InitializeClient(self.db)
files = {
"foo": b"4815162342",
"BAR": b"\xff\x00\xff",
"bAz": b"\x00" * 42,
"żółć": "Wpłynąłem na suchego przestwór oceanu".encode("utf-8"),
}
path_infos = []
for name, content in files.items():
content = name.encode("utf-8")
hash_entry = rdf_crypto.Hash()
hash_entry.sha256 = SHA256(content)
hash_entry.md5 = MD5(content)
hash_entry.num_bytes = len(content)
path_infos.append(
rdf_objects.PathInfo.OS(
components=["foo", "bar", "baz", name], hash_entry=hash_entry))
self.db.WritePathInfos(client_id, path_infos)
for name, content in files.items():
content = name.encode("utf-8")
result = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz", name))
self.assertEqual(result.components, ["foo", "bar", "baz", name])
self.assertTrue(result.HasField("hash_entry"))
self.assertFalse(result.HasField("stat_entry"))
self.assertEqual(result.hash_entry.sha256, SHA256(content))
self.assertEqual(result.hash_entry.md5, MD5(content))
self.assertEqual(result.hash_entry.num_bytes, len(content))
def testWritePathInfosHashAndStatEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
stat_entry = rdf_client_fs.StatEntry(st_mode=1337)
hash_entry = rdf_crypto.Hash(sha256=hashlib.sha256(b"foo").digest())
path_info = rdf_objects.PathInfo.OS(
components=["foo", "bar", "baz"],
stat_entry=stat_entry,
hash_entry=hash_entry)
self.db.WritePathInfos(client_id, [path_info])
result = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"))
self.assertEqual(result.components, ["foo", "bar", "baz"])
self.assertTrue(result.HasField("stat_entry"))
self.assertTrue(result.HasField("hash_entry"))
self.assertEqual(result.stat_entry, stat_entry)
self.assertEqual(result.hash_entry, hash_entry)
def testWritePathInfoHashAndStatEntrySeparateWrites(self):
client_id = db_test_utils.InitializeClient(self.db)
stat_entry = rdf_client_fs.StatEntry(st_mode=1337)
stat_entry_path_info = rdf_objects.PathInfo.OS(
components=["foo"], stat_entry=stat_entry)
stat_entry_timestamp = self.db.Now()
self.db.WritePathInfos(client_id, [stat_entry_path_info])
hash_entry = rdf_crypto.Hash(sha256=hashlib.sha256(b"foo").digest())
hash_entry_path_info = rdf_objects.PathInfo.OS(
components=["foo"], hash_entry=hash_entry)
hash_entry_timestamp = self.db.Now()
self.db.WritePathInfos(client_id, [hash_entry_path_info])
result = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
now = self.db.Now()
self.assertEqual(result.components, ["foo"])
self.assertTrue(result.HasField("stat_entry"))
self.assertTrue(result.HasField("hash_entry"))
self.assertEqual(result.stat_entry, stat_entry)
self.assertEqual(result.hash_entry, hash_entry)
self.assertGreater(result.last_stat_entry_timestamp, stat_entry_timestamp)
self.assertLess(result.last_stat_entry_timestamp, hash_entry_timestamp)
self.assertGreater(result.last_hash_entry_timestamp, hash_entry_timestamp)
self.assertLess(result.last_hash_entry_timestamp, now)
def testWritePathInfosExpansion(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz"]),
])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[
("foo",),
("foo", "bar"),
("foo", "bar", "baz"),
])
self.assertLen(results, 3)
foo = results[("foo",)]
self.assertEqual(foo.components, ["foo"])
self.assertTrue(foo.directory)
foobar = results[("foo", "bar")]
self.assertEqual(foobar.components, ["foo", "bar"])
self.assertTrue(foobar.directory)
foobarbaz = results[("foo", "bar", "baz")]
self.assertEqual(foobarbaz.components, ["foo", "bar", "baz"])
self.assertFalse(foobarbaz.directory)
def testWritePathInfosTypeSeparated(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo"], directory=True),
rdf_objects.PathInfo.TSK(components=["foo"], directory=False),
])
os_results = self.db.ReadPathInfos(client_id,
rdf_objects.PathInfo.PathType.OS,
[("foo",)])
self.assertLen(os_results, 1)
self.assertTrue(os_results[("foo",)].directory)
tsk_results = self.db.ReadPathInfos(client_id,
rdf_objects.PathInfo.PathType.TSK,
[("foo",)])
self.assertLen(tsk_results, 1)
self.assertFalse(tsk_results[("foo",)].directory)
def testWritePathInfosUpdates(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(
components=["foo", "bar", "baz"], directory=False),
])
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(
components=["foo", "bar", "baz"], directory=True),
])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[("foo", "bar", "baz")])
result_path_info = results[("foo", "bar", "baz")]
self.assertTrue(result_path_info.directory)
def testWritePathInfosUpdatesAncestors(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo"], directory=False),
])
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[("foo",)])
self.assertLen(results, 1)
self.assertTrue(results[("foo",)].directory)
def testWritePathInfosDuplicatedData(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
])
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[("foo", "bar")])
self.assertLen(results, 1)
result_path_info = results[("foo", "bar")]
self.assertEqual(result_path_info.components, ["foo", "bar"])
self.assertEqual(result_path_info.directory, False)
def testWritePathInfosStoresCopy(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo", "bar"])
path_info.stat_entry.st_size = 1337
path_info.hash_entry.sha256 = b"foo"
self.db.WritePathInfos(client_id, [path_info])
timestamp_1 = self.db.Now()
path_info.stat_entry.st_size = 42
path_info.hash_entry.sha256 = b"bar"
self.db.WritePathInfos(client_id, [path_info])
timestamp_2 = self.db.Now()
result_1 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar"),
timestamp=timestamp_1)
self.assertEqual(result_1.stat_entry.st_size, 1337)
self.assertEqual(result_1.hash_entry.sha256, b"foo")
result_2 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar"),
timestamp=timestamp_2)
self.assertEqual(result_2.stat_entry.st_size, 42)
self.assertEqual(result_2.hash_entry.sha256, b"bar")
def testMultiWritePathInfos(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
path_info_a_1 = rdf_objects.PathInfo.OS(components=["foo", "bar"])
path_info_a_1.stat_entry.st_size = 42
path_info_a_2 = rdf_objects.PathInfo.OS(components=["foo", "baz"])
path_info_a_2.hash_entry.md5 = b"aaa"
path_info_a_2.hash_entry.sha256 = b"ccc"
path_info_b_1 = rdf_objects.PathInfo.TSK(components=["norf", "thud"])
path_info_b_1.hash_entry.sha1 = b"ddd"
path_info_b_1.hash_entry.sha256 = b"bbb"
path_info_b_2 = rdf_objects.PathInfo.TSK(components=["quux", "blargh"])
path_info_b_2.stat_entry.st_mode = 1337
path_infos = {
client_a_id: [path_info_a_1, path_info_a_2],
client_b_id: [path_info_b_1, path_info_b_2],
}
self.db.MultiWritePathInfos(path_infos)
path_infos_a = self.db.ReadPathInfos(client_a_id,
rdf_objects.PathInfo.PathType.OS, [
("foo", "bar"),
("foo", "baz"),
])
self.assertEqual(path_infos_a[("foo", "bar")].stat_entry.st_size, 42)
self.assertEqual(path_infos_a[("foo", "baz")].hash_entry.md5, b"aaa")
self.assertEqual(path_infos_a[("foo", "baz")].hash_entry.sha256, b"ccc")
path_infos_b = self.db.ReadPathInfos(client_b_id,
rdf_objects.PathInfo.PathType.TSK, [
("norf", "thud"),
("quux", "blargh"),
])
self.assertEqual(path_infos_b[("norf", "thud")].hash_entry.sha1, b"ddd")
self.assertEqual(path_infos_b[("norf", "thud")].hash_entry.sha256, b"bbb")
self.assertEqual(path_infos_b[("quux", "blargh")].stat_entry.st_mode, 1337)
def testReadPathInfosEmptyComponentsList(self):
client_id = db_test_utils.InitializeClient(self.db)
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[])
self.assertEqual(results, {})
def testReadPathInfosNonExistent(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[
("foo", "bar"),
("foo", "baz"),
("quux", "norf"),
])
self.assertLen(results, 3)
self.assertIsNotNone(results[("foo", "bar")])
self.assertIsNone(results[("foo", "baz")])
self.assertIsNone(results[("quux", "norf")])
def testReadPathInfoValidatesTimestamp(self):
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(TypeError):
self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.REGISTRY,
components=("foo", "bar", "baz"),
timestamp=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
def testReadPathInfoNonExistent(self):
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(db.UnknownPathError) as ctx:
self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"))
self.assertEqual(ctx.exception.client_id, client_id)
self.assertEqual(ctx.exception.path_type, rdf_objects.PathInfo.PathType.OS)
self.assertEqual(ctx.exception.components, ("foo", "bar", "baz"))
def testReadPathInfoTimestampStatEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
pathspec = rdf_paths.PathSpec(
path="foo/bar/baz", pathtype=rdf_paths.PathSpec.PathType.OS)
stat_entry = rdf_client_fs.StatEntry(pathspec=pathspec, st_size=42)
self.db.WritePathInfos(client_id,
[rdf_objects.PathInfo.FromStatEntry(stat_entry)])
timestamp_1 = self.db.Now()
stat_entry = rdf_client_fs.StatEntry(pathspec=pathspec, st_size=101)
self.db.WritePathInfos(client_id,
[rdf_objects.PathInfo.FromStatEntry(stat_entry)])
timestamp_2 = self.db.Now()
stat_entry = rdf_client_fs.StatEntry(pathspec=pathspec, st_size=1337)
self.db.WritePathInfos(client_id,
[rdf_objects.PathInfo.FromStatEntry(stat_entry)])
timestamp_3 = self.db.Now()
path_info_last = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"))
self.assertEqual(path_info_last.stat_entry.st_size, 1337)
self.assertEqual(path_info_last.components, ["foo", "bar", "baz"])
path_info_1 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"),
timestamp=timestamp_1)
self.assertEqual(path_info_1.stat_entry.st_size, 42)
self.assertEqual(path_info_last.components, ["foo", "bar", "baz"])
path_info_2 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"),
timestamp=timestamp_2)
self.assertEqual(path_info_2.stat_entry.st_size, 101)
self.assertEqual(path_info_last.components, ["foo", "bar", "baz"])
path_info_3 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"),
timestamp=timestamp_3)
self.assertEqual(path_info_3.stat_entry.st_size, 1337)
self.assertEqual(path_info_last.components, ["foo", "bar", "baz"])
def testReadPathInfoTimestampHashEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.hash_entry = rdf_crypto.Hash(sha256=b"bar")
self.db.WritePathInfos(client_id, [path_info])
bar_timestamp = self.db.Now()
path_info.hash_entry = rdf_crypto.Hash(sha256=b"baz")
self.db.WritePathInfos(client_id, [path_info])
baz_timestamp = self.db.Now()
path_info.hash_entry = rdf_crypto.Hash(sha256=b"quux")
self.db.WritePathInfos(client_id, [path_info])
quux_timestamp = self.db.Now()
bar_path_info = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=bar_timestamp)
self.assertEqual(bar_path_info.hash_entry.sha256, b"bar")
baz_path_info = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=baz_timestamp)
self.assertEqual(baz_path_info.hash_entry.sha256, b"baz")
quux_path_info = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=quux_timestamp)
self.assertEqual(quux_path_info.hash_entry.sha256, b"quux")
def testReadPathInfosMany(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=["foo", "bar"])
path_info_1.stat_entry.st_mode = 42
path_info_1.hash_entry.md5 = b"foo"
path_info_1.hash_entry.sha256 = b"bar"
path_info_2 = rdf_objects.PathInfo.OS(components=["baz", "quux", "norf"])
path_info_2.hash_entry.sha256 = b"bazquuxnorf"
path_info_3 = rdf_objects.PathInfo.OS(components=["blargh"], directory=True)
path_info_3.stat_entry.st_size = 1337
self.db.WritePathInfos(client_id, [path_info_1, path_info_2, path_info_3])
results = self.db.ReadPathInfos(client_id, rdf_objects.PathInfo.PathType.OS,
[
("foo", "bar"),
("baz", "quux", "norf"),
("blargh",),
])
result_path_info_1 = results[("foo", "bar")]
self.assertEqual(result_path_info_1.components, ["foo", "bar"])
self.assertEqual(result_path_info_1.stat_entry.st_mode, 42)
self.assertEqual(result_path_info_1.hash_entry.md5, b"foo")
self.assertEqual(result_path_info_1.hash_entry.sha256, b"bar")
result_path_info_2 = results[("baz", "quux", "norf")]
self.assertEqual(result_path_info_2.components, ["baz", "quux", "norf"])
self.assertEqual(result_path_info_2.hash_entry.sha256, b"bazquuxnorf")
result_path_info_3 = results[("blargh",)]
self.assertEqual(result_path_info_3.components, ["blargh"])
self.assertEqual(result_path_info_3.stat_entry.st_size, 1337)
self.assertEqual(result_path_info_3.directory, True)
def testReadPathInfoTimestampStatAndHashEntry(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.stat_entry = rdf_client_fs.StatEntry(st_mode=42)
path_info.hash_entry = None
self.db.WritePathInfos(client_id, [path_info])
timestamp_1 = self.db.Now()
path_info.stat_entry = None
path_info.hash_entry = rdf_crypto.Hash(sha256=b"quux")
self.db.WritePathInfos(client_id, [path_info])
timestamp_2 = self.db.Now()
path_info.stat_entry = rdf_client_fs.StatEntry(st_mode=1337)
path_info.hash_entry = None
self.db.WritePathInfos(client_id, [path_info])
timestamp_3 = self.db.Now()
path_info.stat_entry = rdf_client_fs.StatEntry(st_mode=4815162342)
path_info.hash_entry = rdf_crypto.Hash(sha256=b"norf")
self.db.WritePathInfos(client_id, [path_info])
timestamp_4 = self.db.Now()
path_info_1 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_1)
self.assertEqual(path_info_1.stat_entry.st_mode, 42)
self.assertFalse(path_info_1.HasField("hash_entry"))
path_info_2 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_2)
self.assertEqual(path_info_2.stat_entry.st_mode, 42)
self.assertEqual(path_info_2.hash_entry.sha256, b"quux")
path_info_3 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_3)
self.assertEqual(path_info_3.stat_entry.st_mode, 1337)
self.assertEqual(path_info_3.hash_entry.sha256, b"quux")
path_info_4 = self.db.ReadPathInfo(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_4)
self.assertEqual(path_info_4.stat_entry.st_mode, 4815162342)
self.assertEqual(path_info_4.hash_entry.sha256, b"norf")
def testReadPathInfoOlder(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.stat_entry.st_mode = 42
path_info.hash_entry.sha256 = b"foo"
self.db.WritePathInfos(client_id, [path_info])
path_info = rdf_objects.PathInfo.OS(components=["bar"])
path_info.stat_entry.st_mode = 1337
path_info.hash_entry.sha256 = b"bar"
self.db.WritePathInfos(client_id, [path_info])
path_info = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(path_info.stat_entry.st_mode, 42)
self.assertEqual(path_info.hash_entry.sha256, b"foo")
path_info = self.db.ReadPathInfo(
client_id, rdf_objects.PathInfo.PathType.OS, components=("bar",))
self.assertEqual(path_info.stat_entry.st_mode, 1337)
self.assertEqual(path_info.hash_entry.sha256, b"bar")
def testListDescendantPathInfosAlwaysSucceedsOnRoot(self):
client_id = db_test_utils.InitializeClient(self.db)
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=())
self.assertEmpty(results)
def testListDescendantPathInfosNonexistentDirectory(self):
client_id = db_test_utils.InitializeClient(self.db)
with self.assertRaises(db.UnknownPathError):
self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
def testListDescendantPathInfosNotDirectory(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=("foo",), directory=False)
self.db.WritePathInfos(client_id, [path_info])
with self.assertRaises(db.NotDirectoryPathError):
self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
def testListDescendantPathInfosEmptyResult(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=("foo",), directory=True)
self.db.WritePathInfos(client_id, [path_info])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEmpty(results)
def testListDescendantPathInfosSingleResult(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertLen(results, 1)
self.assertEqual(results[0].components, ("foo", "bar"))
def testListDescendantPathInfosSingle(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz", "quux"]),
])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertLen(results, 3)
self.assertEqual(results[0].components, ("foo", "bar"))
self.assertEqual(results[1].components, ("foo", "bar", "baz"))
self.assertEqual(results[2].components, ("foo", "bar", "baz", "quux"))
def testListDescendantPathInfosBranching(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar", "quux"]),
rdf_objects.PathInfo.OS(components=["foo", "baz"]),
])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertLen(results, 3)
self.assertEqual(results[0].components, ("foo", "bar"))
self.assertEqual(results[1].components, ("foo", "bar", "quux"))
self.assertEqual(results[2].components, ("foo", "baz"))
def testListDescendantPathInfosLimited(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz", "quux"]),
rdf_objects.PathInfo.OS(components=["foo", "bar", "blargh"]),
rdf_objects.PathInfo.OS(components=["foo", "norf", "thud", "plugh"]),
])
results = self.db.ListDescendantPathInfos(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo",),
max_depth=2)
components = [tuple(path_info.components) for path_info in results]
self.assertIn(("foo", "bar"), components)
self.assertIn(("foo", "bar", "baz"), components)
self.assertIn(("foo", "bar", "blargh"), components)
self.assertNotIn(("foo", "bar", "baz", "quux"), components)
self.assertNotIn(("foo", "norf", "thud", "plugh"), components)
def testListDescendantPathInfosTypeSeparated(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["usr", "bin", "javac"]),
rdf_objects.PathInfo.TSK(components=["usr", "bin", "gdb"]),
])
os_results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("usr", "bin"))
self.assertLen(os_results, 1)
self.assertEqual(os_results[0].components, ("usr", "bin", "javac"))
tsk_results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.TSK, components=("usr", "bin"))
self.assertLen(tsk_results, 1)
self.assertEqual(tsk_results[0].components, ("usr", "bin", "gdb"))
def testListDescendantPathInfosAll(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
rdf_objects.PathInfo.OS(components=["baz", "quux"]),
])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=())
self.assertEqual(results[0].components, ("baz",))
self.assertEqual(results[1].components, ("baz", "quux"))
self.assertEqual(results[2].components, ("foo",))
self.assertEqual(results[3].components, ("foo", "bar"))
def testListDescendantPathInfosLimitedDirectory(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=["foo", "bar", "baz"])
path_info_1.stat_entry.st_mode = 108
path_info_2 = rdf_objects.PathInfo.OS(components=["foo", "bar"])
path_info_2.stat_entry.st_mode = 1337
path_info_3 = rdf_objects.PathInfo.OS(components=["foo", "norf", "quux"])
path_info_3.stat_entry.st_mode = 707
self.db.WritePathInfos(client_id, [path_info_1, path_info_2, path_info_3])
results = self.db.ListDescendantPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=(), max_depth=2)
self.assertLen(results, 3)
self.assertEqual(results[0].components, ("foo",))
self.assertEqual(results[1].components, ("foo", "bar"))
self.assertEqual(results[2].components, ("foo", "norf"))
self.assertEqual(results[1].stat_entry.st_mode, 1337)
def testListDescendantPathInfosDepthZero(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=("foo",))
path_info_2 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_3 = rdf_objects.PathInfo.OS(components=("baz",))
self.db.WritePathInfos(client_id, [path_info_1, path_info_2, path_info_3])
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
max_depth=0)
self.assertEmpty(results)
def testListDescendantPathInfosTimestampNow(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo", "bar", "baz"])
path_info.stat_entry.st_size = 1337
self.db.WritePathInfos(client_id, [path_info])
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=self.db.Now())
self.assertLen(results, 3)
self.assertEqual(results[0].components, ("foo",))
self.assertEqual(results[1].components, ("foo", "bar"))
self.assertEqual(results[2].components, ("foo", "bar", "baz"))
self.assertEqual(results[2].stat_entry.st_size, 1337)
def testListDescendantPathInfosTimestampMultiple(self):
client_id = db_test_utils.InitializeClient(self.db)
timestamp_0 = self.db.Now()
path_info_1 = rdf_objects.PathInfo.OS(components=["foo", "bar", "baz"])
path_info_1.stat_entry.st_size = 1
self.db.WritePathInfos(client_id, [path_info_1])
timestamp_1 = self.db.Now()
path_info_2 = rdf_objects.PathInfo.OS(components=["foo", "quux", "norf"])
path_info_2.stat_entry.st_size = 2
self.db.WritePathInfos(client_id, [path_info_2])
timestamp_2 = self.db.Now()
path_info_3 = rdf_objects.PathInfo.OS(components=["foo", "quux", "thud"])
path_info_3.stat_entry.st_size = 3
self.db.WritePathInfos(client_id, [path_info_3])
timestamp_3 = self.db.Now()
results_0 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_0)
self.assertEmpty(results_0)
results_1 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_1)
self.assertLen(results_1, 3)
self.assertEqual(results_1[0].components, ("foo",))
self.assertEqual(results_1[1].components, ("foo", "bar"))
self.assertEqual(results_1[2].components, ("foo", "bar", "baz"))
results_2 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_2)
self.assertLen(results_2, 5)
self.assertEqual(results_2[0].components, ("foo",))
self.assertEqual(results_2[1].components, ("foo", "bar"))
self.assertEqual(results_2[2].components, ("foo", "bar", "baz"))
self.assertEqual(results_2[3].components, ("foo", "quux"))
self.assertEqual(results_2[4].components, ("foo", "quux", "norf"))
results_3 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_3)
self.assertLen(results_3, 6)
self.assertEqual(results_3[0].components, ("foo",))
self.assertEqual(results_3[1].components, ("foo", "bar"))
self.assertEqual(results_3[2].components, ("foo", "bar", "baz"))
self.assertEqual(results_3[3].components, ("foo", "quux"))
self.assertEqual(results_3[4].components, ("foo", "quux", "norf"))
self.assertEqual(results_3[5].components, ("foo", "quux", "thud"))
def testListDescendantPathInfosTimestampStatValue(self):
client_id = db_test_utils.InitializeClient(self.db)
timestamp_0 = self.db.Now()
path_info = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info.stat_entry.st_size = 1337
self.db.WritePathInfos(client_id, [path_info])
timestamp_1 = self.db.Now()
path_info.stat_entry.st_size = 42
self.db.WritePathInfos(client_id, [path_info])
timestamp_2 = self.db.Now()
results_0 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_0)
self.assertEmpty(results_0)
results_1 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_1)
self.assertLen(results_1, 1)
self.assertEqual(results_1[0].components, ("foo", "bar"))
self.assertEqual(results_1[0].stat_entry.st_size, 1337)
results_2 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_2)
self.assertLen(results_2, 1)
self.assertEqual(results_2[0].components, ("foo", "bar"))
self.assertEqual(results_2[0].stat_entry.st_size, 42)
def testListDescendantPathInfosTimestampHashValue(self):
client_id = db_test_utils.InitializeClient(self.db)
timestamp_0 = self.db.Now()
path_info = rdf_objects.PathInfo.OS(components=("foo",))
path_info.hash_entry.md5 = b"quux"
path_info.hash_entry.sha256 = b"thud"
self.db.WritePathInfos(client_id, [path_info])
timestamp_1 = self.db.Now()
path_info.hash_entry.md5 = b"norf"
path_info.hash_entry.sha256 = b"blargh"
self.db.WritePathInfos(client_id, [path_info])
timestamp_2 = self.db.Now()
results_0 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_0)
self.assertEmpty(results_0)
results_1 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_1)
self.assertLen(results_1, 1)
self.assertEqual(results_1[0].components, ("foo",))
self.assertEqual(results_1[0].hash_entry.md5, b"quux")
self.assertEqual(results_1[0].hash_entry.sha256, b"thud")
results_2 = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=(),
timestamp=timestamp_2)
self.assertLen(results_2, 1)
self.assertEqual(results_2[0].components, ("foo",))
self.assertEqual(results_2[0].hash_entry.md5, b"norf")
self.assertEqual(results_2[0].hash_entry.sha256, b"blargh")
def testListDescendantPathInfosWildcards(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=("foo", "quux")),
rdf_objects.PathInfo.OS(components=("bar", "norf")),
rdf_objects.PathInfo.OS(components=("___", "thud")),
rdf_objects.PathInfo.OS(components=("%%%", "ztesch")),
])
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("___",))
self.assertLen(results, 1)
self.assertEqual(results[0].components, ("___", "thud"))
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("%%%",))
self.assertLen(results, 1)
self.assertEqual(results[0].components, ("%%%", "ztesch"))
def testListDescendantPathInfosManyWildcards(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=("%", "%%", "%%%")),
rdf_objects.PathInfo.OS(components=("%", "%%%", "%")),
rdf_objects.PathInfo.OS(components=("%%", "%", "%%%")),
rdf_objects.PathInfo.OS(components=("%%", "%%%", "%")),
rdf_objects.PathInfo.OS(components=("%%%", "%%", "%%")),
rdf_objects.PathInfo.OS(components=("__", "%%", "__")),
])
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("%",))
self.assertLen(results, 4)
self.assertEqual(results[0].components, ("%", "%%"))
self.assertEqual(results[1].components, ("%", "%%", "%%%"))
self.assertEqual(results[2].components, ("%", "%%%"))
self.assertEqual(results[3].components, ("%", "%%%", "%"))
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("%%",))
self.assertLen(results, 4)
self.assertEqual(results[0].components, ("%%", "%"))
self.assertEqual(results[1].components, ("%%", "%", "%%%"))
self.assertEqual(results[2].components, ("%%", "%%%"))
self.assertEqual(results[3].components, ("%%", "%%%", "%"))
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("__",))
self.assertLen(results, 2)
self.assertEqual(results[0].components, ("__", "%%"))
self.assertEqual(results[1].components, ("__", "%%", "__"))
def testListDescendantPathInfosWildcardsWithMaxDepth(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=("%", "%%foo", "%%%bar", "%%%%")),
rdf_objects.PathInfo.OS(components=("%", "%%foo", "%%%baz", "%%%%")),
rdf_objects.PathInfo.OS(components=("%", "%%quux", "%%%norf", "%%%%")),
rdf_objects.PathInfo.OS(components=("%", "%%quux", "%%%thud", "%%%%")),
rdf_objects.PathInfo.OS(components=("%%", "%%bar", "%%%quux")),
rdf_objects.PathInfo.OS(components=("%%", "%%baz", "%%%norf")),
rdf_objects.PathInfo.OS(components=("__", "__bar__", "__quux__")),
rdf_objects.PathInfo.OS(components=("__", "__baz__", "__norf__")),
rdf_objects.PathInfo.OS(components=("blargh",)),
rdf_objects.PathInfo.OS(components=("ztesch",)),
])
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("%",),
max_depth=2)
self.assertLen(results, 6)
self.assertEqual(results[0].components, ("%", "%%foo"))
self.assertEqual(results[1].components, ("%", "%%foo", "%%%bar"))
self.assertEqual(results[2].components, ("%", "%%foo", "%%%baz"))
self.assertEqual(results[3].components, ("%", "%%quux"))
self.assertEqual(results[4].components, ("%", "%%quux", "%%%norf"))
self.assertEqual(results[5].components, ("%", "%%quux", "%%%thud"))
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("%%",),
max_depth=1)
self.assertLen(results, 2)
self.assertEqual(results[0].components, ("%%", "%%bar"))
self.assertEqual(results[1].components, ("%%", "%%baz"))
results = self.db.ListDescendantPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("__",),
max_depth=1)
self.assertLen(results, 2)
self.assertEqual(results[0].components, ("__", "__bar__"))
self.assertEqual(results[1].components, ("__", "__baz__"))
def testListChildPathInfosRoot(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar"]),
rdf_objects.PathInfo.OS(components=["foo", "baz"]),
rdf_objects.PathInfo.OS(components=["quux", "norf"]),
])
results = self.db.ListChildPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=())
self.assertEqual(results[0].components, ("foo",))
self.assertTrue(results[0].directory)
self.assertEqual(results[1].components, ("quux",))
self.assertTrue(results[1].directory)
def testListChildPathInfosRootDeeper(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=("foo", "bar", "baz")),
rdf_objects.PathInfo.OS(components=("foo", "bar", "quux")),
rdf_objects.PathInfo.OS(components=("foo", "bar", "norf", "thud")),
])
results = self.db.ListChildPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=())
self.assertLen(results, 1)
self.assertEqual(results[0].components, ("foo",))
self.assertTrue(results[0].directory)
def testListChildPathInfosDetails(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo", "bar"])
path_info.stat_entry.st_size = 42
self.db.WritePathInfos(client_id, [path_info])
path_info = rdf_objects.PathInfo.OS(components=["foo", "baz"])
path_info.hash_entry.md5 = b"quux"
path_info.hash_entry.sha256 = b"norf"
self.db.WritePathInfos(client_id, [path_info])
results = self.db.ListChildPathInfos(
client_id, rdf_objects.PathInfo.PathType.OS, components=("foo",))
self.assertEqual(results[0].components, ("foo", "bar"))
self.assertEqual(results[0].stat_entry.st_size, 42)
self.assertEqual(results[1].components, ("foo", "baz"))
self.assertEqual(results[1].hash_entry.md5, b"quux")
self.assertEqual(results[1].hash_entry.sha256, b"norf")
def testListChildPathInfosDeepSorted(self):
client_id = db_test_utils.InitializeClient(self.db)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz", "quux"]),
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz", "norf"]),
rdf_objects.PathInfo.OS(components=["foo", "bar", "baz", "thud"]),
])
results = self.db.ListChildPathInfos(
client_id,
rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar", "baz"))
self.assertEqual(results[0].components, ("foo", "bar", "baz", "norf"))
self.assertEqual(results[1].components, ("foo", "bar", "baz", "quux"))
self.assertEqual(results[2].components, ("foo", "bar", "baz", "thud"))
def testListChildPathInfosTimestamp(self):
client_id = db_test_utils.InitializeClient(self.db)
timestamp_0 = self.db.Now()
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_1.stat_entry.st_size = 1
self.db.WritePathInfos(client_id, [path_info_1])
timestamp_1 = self.db.Now()
path_info_2 = rdf_objects.PathInfo.OS(components=("foo", "baz"))
path_info_2.stat_entry.st_size = 2
self.db.WritePathInfos(client_id, [path_info_2])
timestamp_2 = self.db.Now()
results_0 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_0)
self.assertEmpty(results_0)
results_1 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_1)
self.assertLen(results_1, 1)
self.assertEqual(results_1[0].components, ("foo", "bar"))
self.assertEqual(results_1[0].stat_entry.st_size, 1)
results_2 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
timestamp=timestamp_2)
self.assertLen(results_2, 2)
self.assertEqual(results_2[0].components, ("foo", "bar"))
self.assertEqual(results_2[0].stat_entry.st_size, 1)
self.assertEqual(results_2[1].components, ("foo", "baz"))
self.assertEqual(results_2[1].stat_entry.st_size, 2)
def testListChildPathInfosTimestampStatAndHashValue(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=("foo", "bar", "baz"))
path_info.stat_entry.st_size = 42
path_info.hash_entry.sha256 = b"quux"
self.db.WritePathInfos(client_id, [path_info])
timestamp_1 = self.db.Now()
path_info = rdf_objects.PathInfo.OS(components=("foo", "bar", "baz"))
path_info.stat_entry.st_size = 108
path_info.hash_entry.sha256 = b"norf"
self.db.WritePathInfos(client_id, [path_info])
timestamp_2 = self.db.Now()
path_info = rdf_objects.PathInfo.OS(components=("foo", "bar", "baz"))
path_info.stat_entry.st_size = 1337
path_info.hash_entry.sha256 = b"thud"
self.db.WritePathInfos(client_id, [path_info])
timestamp_3 = self.db.Now()
results_1 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar"),
timestamp=timestamp_1)
self.assertLen(results_1, 1)
self.assertEqual(results_1[0].components, ("foo", "bar", "baz"))
self.assertEqual(results_1[0].stat_entry.st_size, 42)
self.assertEqual(results_1[0].hash_entry.sha256, b"quux")
results_2 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar"),
timestamp=timestamp_2)
self.assertLen(results_2, 1)
self.assertEqual(results_2[0].components, ("foo", "bar", "baz"))
self.assertEqual(results_2[0].stat_entry.st_size, 108)
self.assertEqual(results_2[0].hash_entry.sha256, b"norf")
results_3 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo", "bar"),
timestamp=timestamp_3)
self.assertLen(results_3, 1)
self.assertEqual(results_3[0].components, ("foo", "bar", "baz"))
self.assertEqual(results_3[0].stat_entry.st_size, 1337)
self.assertEqual(results_3[0].hash_entry.sha256, b"thud")
def testListChildPathInfosBackslashes(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=("\\", "\\\\", "\\\\\\"))
path_info_2 = rdf_objects.PathInfo.OS(components=("\\", "\\\\\\", "\\\\"))
path_info_3 = rdf_objects.PathInfo.OS(components=("\\", "foo\\bar", "baz"))
self.db.WritePathInfos(client_id, [path_info_1, path_info_2, path_info_3])
results_0 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("\\",))
self.assertLen(results_0, 3)
self.assertEqual(results_0[0].components, ("\\", "\\\\"))
self.assertEqual(results_0[1].components, ("\\", "\\\\\\"))
self.assertEqual(results_0[2].components, ("\\", "foo\\bar"))
results_1 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("\\", "\\\\"))
self.assertLen(results_1, 1)
self.assertEqual(results_1[0].components, ("\\", "\\\\", "\\\\\\"))
results_2 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("\\", "\\\\\\"))
self.assertLen(results_2, 1)
self.assertEqual(results_2[0].components, ("\\", "\\\\\\", "\\\\"))
results_3 = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("\\", "foo\\bar"))
self.assertLen(results_3, 1)
self.assertEqual(results_3[0].components, ("\\", "foo\\bar", "baz"))
def testListChildPathInfosTSKRootVolume(self):
client_id = db_test_utils.InitializeClient(self.db)
volume = "\\\\?\\Volume{2d4fbbd3-0000-0000-0000-100000000000}"
path_info = rdf_objects.PathInfo.TSK(components=(volume, "foobar.txt"))
path_info.stat_entry.st_size = 42
self.db.WritePathInfos(client_id, [path_info])
results = self.db.ListChildPathInfos(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.TSK,
components=(volume,))
self.assertLen(results, 1)
self.assertEqual(results[0].components, (volume, "foobar.txt"))
self.assertEqual(results[0].stat_entry.st_size, 42)
def testReadPathInfosHistoriesEmpty(self):
client_id = db_test_utils.InitializeClient(self.db)
result = self.db.ReadPathInfosHistories(client_id,
rdf_objects.PathInfo.PathType.OS,
[])
self.assertEqual(result, {})
def testReadPathInfosHistoriesDoesNotRaiseOnUnknownClient(self):
results = self.db.ReadPathInfosHistories("C.FFFF111122223333",
rdf_objects.PathInfo.PathType.OS,
[("foo",)])
self.assertEqual(results[("foo",)], [])
def testReadPathInfosHistoriesWithSingleFileWithSingleHistoryItem(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.stat_entry.st_size = 42
path_info.hash_entry.sha256 = b"quux"
then = self.db.Now()
self.db.WritePathInfos(client_id, [path_info])
now = self.db.Now()
path_infos = self.db.ReadPathInfosHistories(
client_id, rdf_objects.PathInfo.PathType.OS, [("foo",)])
self.assertLen(path_infos, 1)
pi = path_infos[("foo",)]
self.assertLen(pi, 1)
self.assertEqual(pi[0].stat_entry.st_size, 42)
self.assertEqual(pi[0].hash_entry.sha256, b"quux")
self.assertBetween(pi[0].timestamp, then, now)
def testReadPathInfosHistoriesWithTwoFilesWithSingleHistoryItemEach(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=["foo"])
path_info_1.stat_entry.st_mode = 1337
path_info_2 = rdf_objects.PathInfo.OS(components=["bar"])
path_info_2.hash_entry.sha256 = b"quux"
then = self.db.Now()
self.db.WritePathInfos(client_id, [path_info_1, path_info_2])
now = self.db.Now()
path_infos = self.db.ReadPathInfosHistories(
client_id, rdf_objects.PathInfo.PathType.OS, [("foo",), ("bar",)])
self.assertLen(path_infos, 2)
pi = path_infos[("bar",)]
self.assertLen(pi, 1)
self.assertEqual(pi[0].components, ("bar",))
self.assertEqual(pi[0].hash_entry.sha256, b"quux")
self.assertBetween(pi[0].timestamp, then, now)
pi = path_infos[("foo",)]
self.assertLen(pi, 1)
self.assertEqual(pi[0].components, ("foo",))
self.assertEqual(pi[0].stat_entry.st_mode, 1337)
self.assertBetween(pi[0].timestamp, then, now)
def testReadPathInfosHistoriesWithTwoFilesWithTwoHistoryItems(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=["foo"])
path_info_2 = rdf_objects.PathInfo.OS(components=["bar"])
timestamp_1 = self.db.Now()
path_info_1.stat_entry.st_mode = 1337
self.db.WritePathInfos(client_id, [path_info_1])
timestamp_2 = self.db.Now()
path_info_1.stat_entry.st_mode = 1338
self.db.WritePathInfos(client_id, [path_info_1])
timestamp_3 = self.db.Now()
path_info_2.stat_entry.st_mode = 109
self.db.WritePathInfos(client_id, [path_info_2])
timestamp_4 = self.db.Now()
path_info_2.stat_entry.st_mode = 110
self.db.WritePathInfos(client_id, [path_info_2])
timestamp_5 = self.db.Now()
path_infos = self.db.ReadPathInfosHistories(
client_id, rdf_objects.PathInfo.PathType.OS, [("foo",), ("bar",)])
self.assertLen(path_infos, 2)
pi = path_infos[("bar",)]
self.assertLen(pi, 2)
self.assertEqual(pi[0].components, ("bar",))
self.assertEqual(pi[0].stat_entry.st_mode, 109)
self.assertBetween(pi[0].timestamp, timestamp_3, timestamp_4)
self.assertEqual(pi[1].components, ("bar",))
self.assertEqual(pi[1].stat_entry.st_mode, 110)
self.assertBetween(pi[1].timestamp, timestamp_4, timestamp_5)
pi = path_infos[("foo",)]
self.assertLen(pi, 2)
self.assertEqual(pi[0].components, ("foo",))
self.assertEqual(pi[0].stat_entry.st_mode, 1337)
self.assertBetween(pi[0].timestamp, timestamp_1, timestamp_2)
self.assertEqual(pi[1].components, ("foo",))
self.assertEqual(pi[1].stat_entry.st_mode, 1338)
self.assertBetween(pi[1].timestamp, timestamp_2, timestamp_3)
def testReadPathInfoHistoryTimestamp(self):
client_id = db_test_utils.InitializeClient(self.db)
path_info = rdf_objects.PathInfo.OS(components=["foo"])
path_info.stat_entry.st_size = 0
self.db.WritePathInfos(client_id, [path_info])
path_info.stat_entry.st_size = 1
self.db.WritePathInfos(client_id, [path_info])
path_info.stat_entry.st_size = 2
self.db.WritePathInfos(client_id, [path_info])
cutoff = self.db.Now()
path_info.stat_entry.st_size = 1337
self.db.WritePathInfos(client_id, [path_info])
path_infos = self.db.ReadPathInfoHistory(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=("foo",),
cutoff=cutoff)
self.assertLen(path_infos, 3)
self.assertEqual(path_infos[0].stat_entry.st_size, 0)
self.assertEqual(path_infos[1].stat_entry.st_size, 1)
self.assertEqual(path_infos[2].stat_entry.st_size, 2)
def _WriteBlobReferences(self):
blob_ref_1 = rdf_objects.BlobReference(
offset=0, size=42, blob_id=rdf_objects.BlobID(b"01234567" * 4))
blob_ref_2 = rdf_objects.BlobReference(
offset=42, size=42, blob_id=rdf_objects.BlobID(b"01234568" * 4))
hash_id_1 = rdf_objects.SHA256HashID(b"0a1b2c3d" * 4)
hash_id_2 = rdf_objects.SHA256HashID(b"0a1b2c3e" * 4)
data = {
hash_id_1: [blob_ref_1],
hash_id_2: [blob_ref_1, blob_ref_2],
}
self.db.WriteHashBlobReferences(data)
return hash_id_1, hash_id_2
def testReadLatestPathInfosReturnsNothingForNonExistingPaths(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
path_1 = db.ClientPath.OS(client_a_id, components=("foo", "baz"))
path_2 = db.ClientPath.TSK(client_b_id, components=("foo", "baz"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences(
[path_1, path_2])
self.assertEqual(results, {path_1: None, path_2: None})
def testReadLatestPathInfosReturnsNothingWhenNoFilesCollected(self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
self.db.WritePathInfos(client_a_id, [path_info_1])
path_info_2 = rdf_objects.PathInfo.TSK(components=("foo", "baz"))
self.db.WritePathInfos(client_b_id, [path_info_2])
path_1 = db.ClientPath.OS(client_a_id, components=("foo", "bar"))
path_2 = db.ClientPath.TSK(client_b_id, components=("foo", "baz"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences(
[path_1, path_2])
self.assertEqual(results, {path_1: None, path_2: None})
def testReadLatestPathInfosFindsTwoCollectedFilesWhenTheyAreTheOnlyEntries(
self):
client_a_id = db_test_utils.InitializeClient(self.db)
client_b_id = db_test_utils.InitializeClient(self.db)
hash_id_1, hash_id_2 = self._WriteBlobReferences()
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_1.hash_entry.sha256 = hash_id_1.AsBytes()
self.db.WritePathInfos(client_a_id, [path_info_1])
path_info_2 = rdf_objects.PathInfo.TSK(components=("foo", "baz"))
path_info_2.hash_entry.sha256 = hash_id_2.AsBytes()
self.db.WritePathInfos(client_b_id, [path_info_2])
path_1 = db.ClientPath.OS(client_a_id, components=("foo", "bar"))
path_2 = db.ClientPath.TSK(client_b_id, components=("foo", "baz"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences(
[path_1, path_2])
self.assertCountEqual(results.keys(), [path_1, path_2])
self.assertEqual(results[path_1].hash_entry, path_info_1.hash_entry)
self.assertEqual(results[path_2].hash_entry, path_info_2.hash_entry)
self.assertTrue(results[path_1].timestamp)
self.assertTrue(results[path_2].timestamp)
def testReadLatestPathInfosCorrectlyFindsCollectedFileWithNonLatestEntry(
self):
client_id = db_test_utils.InitializeClient(self.db)
hash_id, _ = self._WriteBlobReferences()
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_1.hash_entry.sha256 = hash_id.AsBytes()
self.db.WritePathInfos(client_id, [path_info_1])
path_info_2 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
self.db.WritePathInfos(client_id, [path_info_2])
path = db.ClientPath.OS(client_id, components=("foo", "bar"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences([path])
self.assertCountEqual(results.keys(), [path])
self.assertEqual(results[path].hash_entry, path_info_1.hash_entry)
self.assertTrue(results[path].timestamp)
def testReadLatestPathInfosCorrectlyFindsLatestOfTwoCollectedFiles(self):
client_id = db_test_utils.InitializeClient(self.db)
hash_id_1, hash_id_2 = self._WriteBlobReferences()
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_1.hash_entry.sha256 = hash_id_1.AsBytes()
self.db.WritePathInfos(client_id, [path_info_1])
path_info_2 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_2.hash_entry.sha256 = hash_id_2.AsBytes()
self.db.WritePathInfos(client_id, [path_info_2])
path = db.ClientPath.OS(client_id, components=("foo", "bar"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences([path])
self.assertCountEqual(results.keys(), [path])
self.assertEqual(results[path].hash_entry, path_info_2.hash_entry)
self.assertTrue(results[path].timestamp)
def testReadLatestPathInfosCorrectlyFindsLatestCollectedFileBeforeTimestamp(
self):
client_id = db_test_utils.InitializeClient(self.db)
hash_id_1, hash_id_2 = self._WriteBlobReferences()
path_info_1 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_1.hash_entry.sha256 = hash_id_1.AsBytes()
self.db.WritePathInfos(client_id, [path_info_1])
time_checkpoint = self.db.Now()
path_info_2 = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info_2.hash_entry.sha256 = hash_id_2.AsBytes()
self.db.WritePathInfos(client_id, [path_info_2])
path = db.ClientPath.OS(client_id, components=("foo", "bar"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences(
[path], max_timestamp=time_checkpoint)
self.assertCountEqual(results.keys(), [path])
self.assertEqual(results[path].hash_entry, path_info_1.hash_entry)
self.assertTrue(results[path].timestamp)
def testReadLatestPathInfosIncludesStatEntryIfThereIsOneWithSameTimestamp(
self):
client_id = db_test_utils.InitializeClient(self.db)
hash_id, _ = self._WriteBlobReferences()
path_info = rdf_objects.PathInfo.OS(components=("foo", "bar"))
path_info.stat_entry = rdf_client_fs.StatEntry(st_mode=42)
path_info.hash_entry.sha256 = hash_id.AsBytes()
self.db.WritePathInfos(client_id, [path_info])
path = db.ClientPath.OS(client_id, components=("foo", "bar"))
results = self.db.ReadLatestPathInfosWithHashBlobReferences([path])
self.assertCountEqual(results.keys(), [path])
self.assertEqual(results[path].stat_entry, path_info.stat_entry)
self.assertEqual(results[path].hash_entry, path_info.hash_entry)
self.assertTrue(results[path].timestamp)
def testWriteLongPathInfosWithCommonPrefix(self):
client_id = db_test_utils.InitializeClient(self.db)
prefix = ("foobarbaz",) * 303
quux_components = prefix + ("quux",)
norf_components = prefix + ("norf",)
self.db.WritePathInfos(client_id, [
rdf_objects.PathInfo.OS(components=quux_components),
rdf_objects.PathInfo.OS(components=norf_components),
])
quux_path_info = self.db.ReadPathInfo(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=quux_components)
self.assertEqual(quux_path_info.components, quux_components)
norf_path_info = self.db.ReadPathInfo(
client_id=client_id,
path_type=rdf_objects.PathInfo.PathType.OS,
components=norf_components)
self.assertEqual(norf_path_info.components, norf_components)
# This file is a test library and thus does not require a __main__ block.
| 38.59375
| 80
| 0.681693
|
67e5f4d81ec655b27d240c9001979a3e6f1a44d0
| 3,453
|
py
|
Python
|
ajaxuploader/views/base.py
|
skoczen/django-ajax-uploader
|
90cbd93c3d80d047da596e2348edb9619dee3852
|
[
"BSD-3-Clause"
] | 75
|
2015-02-09T22:49:57.000Z
|
2021-01-31T23:47:39.000Z
|
ajaxuploader/views/base.py
|
skoczen/django-ajax-uploader
|
90cbd93c3d80d047da596e2348edb9619dee3852
|
[
"BSD-3-Clause"
] | 13
|
2015-02-27T03:01:30.000Z
|
2020-11-18T10:11:53.000Z
|
ajaxuploader/views/base.py
|
skoczen/django-ajax-uploader
|
90cbd93c3d80d047da596e2348edb9619dee3852
|
[
"BSD-3-Clause"
] | 29
|
2015-02-09T22:50:16.000Z
|
2019-12-25T06:41:43.000Z
|
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.core.files.base import File
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseBadRequest, Http404, HttpResponseNotAllowed
from ajaxuploader.backends.local import LocalUploadBackend
from ajaxuploader.signals import file_uploaded
class AjaxFileUploader(object):
def __init__(self, backend=None, **kwargs):
if backend is None:
backend = LocalUploadBackend
self.get_backend = lambda: backend(**kwargs)
def __call__(self, request, *args, **kwargs):
return self._ajax_upload(request, *args, **kwargs)
def _ajax_upload(self, request, *args, **kwargs):
if request.method == "POST":
if request.is_ajax():
# the file is stored raw in the request
upload = request
is_raw = True
# AJAX Upload will pass the filename in the querystring if it
# is the "advanced" ajax upload
try:
if 'qqfile' in request.GET:
filename = request.GET['qqfile']
else:
filename = request.REQUEST['qqfilename']
except KeyError:
return HttpResponseBadRequest("AJAX request not valid")
# not an ajax upload, so it was the "basic" iframe version with
# submission via form
else:
is_raw = False
if len(request.FILES) == 1:
# FILES is a dictionary in Django but Ajax Upload gives
# the uploaded file an ID based on a random number, so it
# cannot be guessed here in the code. Rather than editing
# Ajax Upload to pass the ID in the querystring, observe
# that each upload is a separate request, so FILES should
# only have one entry. Thus, we can just grab the first
# (and only) value in the dict.
upload = request.FILES.values()[0]
else:
raise Http404("Bad Upload")
filename = upload.name
backend = self.get_backend()
# custom filename handler
filename = (backend.update_filename(request, filename, *args, **kwargs)
or filename)
# save the file
backend.setup(filename, *args, **kwargs)
success = backend.upload(upload, filename, is_raw, *args, **kwargs)
if success:
file_uploaded.send(sender=self.__class__, backend=backend, request=request)
# callback
extra_context = backend.upload_complete(request, filename, *args, **kwargs)
# let Ajax Upload know whether we saved it or not
ret_json = {'success': success, 'filename': filename}
if extra_context is not None:
ret_json.update(extra_context)
# although "application/json" is the correct content type, IE throws a fit
return HttpResponse(json.dumps(ret_json, cls=DjangoJSONEncoder), content_type='text/html; charset=utf-8')
else:
response = HttpResponseNotAllowed(['POST'])
response.write("ERROR: Only POST allowed")
return response
| 42.109756
| 117
| 0.584419
|
ef491a4dd2d23e78f303972dac0bb5b86f25ea5d
| 5,415
|
py
|
Python
|
inference/utils/common_utils.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
inference/utils/common_utils.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
inference/utils/common_utils.py
|
videetparekh/latentai-sdk-examples
|
2104c097045105957ef7403b09b5a2c114677147
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 by LatentAI Inc.
# All rights reserved.
# This file is part of the LEIP(tm) SDK,
# and is released under the "LatentAI Commercial Software License".
# Please see the LICENSE file that should have been included as part of
# this package.
#
# @file common_utils.py
#
# @author Videet Parekh
#
# @date Wed, 16 Dec 20
from .preprocessors import ImagePreprocessor
import json
import glob
import os
from collections import OrderedDict
import numpy as np
from PIL import Image
import logging
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
import tensorflow as tf # noqa: E402
def collect_image(test_path, data_type, preprocessor, shape):
im = Image.open(test_path)
rgb_im = im.convert('RGB')
rgb_im = rgb_im.resize(shape[1:3])
data = np.array(rgb_im)[np.newaxis, :].astype(data_type)
return preprocessor(data)
def set_json_field(json_obj, field, value):
if json_obj is None:
json_obj = {}
segments = field.split(".")
assert(len(segments) > 0), \
"field cannot be empty: {}".format(field)
field = segments.pop()
ptr = json_obj
for i in segments:
if (not i) in ptr:
ptr[i] = {}
if type(ptr[i]).__name__ != "dict":
ptr[i] = {}
ptr = ptr[i]
ptr[field] = value
return json_obj
def load_json(path, ordered=False):
if os.path.exists(path):
with open(path, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict) if ordered else json.load(f)
else:
return {}
def write_json(path, data):
with open(path, 'w') as f:
return json.dump(data, f, indent=4)
def collect_preprocessor(preprocessor):
imgPreprocessor = ImagePreprocessor()
return getattr(imgPreprocessor, preprocessor.lower())
def parse_input_shapes(shape_str, batch_size=1):
shape = [part.strip() for part in shape_str.split(",")]
shape[0] = batch_size
return tuple(map(int, shape))
def load_index(path):
"""
Load a testset file with newline separated tests
where each line is a input image path,
followed by a space, followed by the class number.
Return N random items from that set as a test subset.
"""
base = '/'.join(path.split("/")[0:-1])
parsed_index = []
with open(path, "r") as test_set_file:
testset = test_set_file.read()
testset = testset.strip()
testset = testset.split("\n")
for line in testset:
line = line.split(" ")
if len(line) == 2:
line[0] = os.path.join(base, line[0])
line[1] = int(line[1])
parsed_index.append(line)
return parsed_index
def get_numeric_loglevel(loglevel):
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
return numeric_level
def load_keras_model(path):
return tf.keras.models.load_model(path)
def create_leip_runtime_module(base, context):
# Collect TVM Context Definitions for execution engine
ctx = get_tvm_context(context)
# Check for legacy runtime artifacts
if os.path.isfile(os.path.join(base, "modelDescription.json")) and os.path.isfile(os.path.join(base, "modelParams.params")):
# Load the graph, functions and weights of the function into memory
return create_multi_artifact_runtime(base, ctx)
return create_single_artifact_runtime(base, ctx)
def create_multi_artifact_runtime(base, ctx):
graph = load_json(os.path.join(base, "modelDescription.json"))
lib = tvm.runtime.load_module(os.path.join(base, "modelLibrary.so"))
params = read_binary_file(os.path.join(base, "modelParams.params"))
# Reimann
cast_params = get_cast_params(params, base, os.path.isfile(os.path.join(base, "quantParams.params")))
graph = json.dumps(graph)
# Create TVM runtime module and load weights
module = graph_runtime.create(graph, lib, ctx)
module.load_params(cast_params)
return module
def create_single_artifact_runtime(base, ctx):
lib = tvm.runtime.load_module(os.path.join(base, "modelLibrary.so"))
return tvm.contrib.graph_runtime.GraphModule(lib['default'](ctx))
def get_tvm_context(context):
return tvm.device(context) if context in ['cuda', 'cpu', 'gpu'] else tvm.cpu(0)
def get_cast_params(loaded_params, base, quant_params_exist):
if quant_params_exist:
quantization_file = glob.glob(os.path.join(base, "quantParams.params"))[0]
loaded_params_qparams = read_binary_file(quantization_file)
return dequantize(loaded_params, loaded_params_qparams)
return loaded_params
def read_binary_file(path):
with open(path, "rb") as f:
bindata = bytearray(f.read())
return bindata
def dequantize(params, q_params):
dequantized_dict = {}
q_params_dict = relay.load_param_dict(q_params)
params_dict = relay.load_param_dict(params)
for k, v in params_dict.items():
quant_arr = params_dict[k].asnumpy()
q_params = q_params_dict[k].asnumpy()
scale = q_params[3]
zpoint = q_params[4]
dequant_array = np.multiply(scale, (quant_arr - zpoint)).astype(np.float32)
dequantized_dict[k] = tvm.runtime.ndarray.array(dequant_array)
return relay.save_param_dict(dequantized_dict)
| 29.752747
| 128
| 0.682733
|
ca65a1d31a020f1618a22db5745636441be1d2dd
| 8,245
|
py
|
Python
|
vae_restore_seg.py
|
mevah/Unsupervised-Lesion-Detection-via-Image-Restoration-with-a-Normative-Prior
|
bbedc446cfe6b028e6e9f9d150931e1e1cccf367
|
[
"MIT"
] | null | null | null |
vae_restore_seg.py
|
mevah/Unsupervised-Lesion-Detection-via-Image-Restoration-with-a-Normative-Prior
|
bbedc446cfe6b028e6e9f9d150931e1e1cccf367
|
[
"MIT"
] | null | null | null |
vae_restore_seg.py
|
mevah/Unsupervised-Lesion-Detection-via-Image-Restoration-with-a-Normative-Prior
|
bbedc446cfe6b028e6e9f9d150931e1e1cccf367
|
[
"MIT"
] | 1
|
2022-02-19T11:25:14.000Z
|
2022-02-19T11:25:14.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 16:03:45 2020
@author: himeva
"""
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 28 15:26:16 2018
Restoration on VAE
@author: syou
"""
import numpy as np
import tensorflow as tf
import os
import pandas as pd
from vars_layers import input_set
from vae_graph import q_zx, p_xz, loss
from utils import num2str, path_make, data_load
from datetime import datetime
import h5py
# Assign GPU no
os.environ["CUDA_VISIBLE_DEVICES"]=os.environ['SGE_GPU'] #<GPU_NO>
# Parameter Settings
sequence_number = 1
mode = 'FsTVRestoration'
model = 'VanillaVAESegloss'
start_time = datetime.utcnow().isoformat()
rho = np.array([2.2])
batchsize = 60 # restoration batch
imageshape = [240,240]
dim_z = 1 # latent variable z's channel
dim_x = imageshape[0]*imageshape[1] # dimension of input
clipstd = [0.0,1.0]
interval = 500 # the interval to restore the output
niter = 1 # number of intervals
restore_steps = niter * interval
stepa = 99 # step of changing alpha
alphas = np.array([5e-3,2e-3]) # step size
gradient_clip = True
clipbound = 50
dataset_train = 'BraTS2018train'
dataset_test = 'BraTS2018test'
datapath = path_make('/scratch_net/biwidl201/himeva/MasterThesis-Meva/','data','')
datapath = path_make('','data','')
"""
The same VAE graph
"""
# input
tf.reset_default_graph()
x, x_reshape = input_set(dim_x, imageshape[0], imageshape[1])
x_p, x_p_reshape = input_set(dim_x, imageshape[0], imageshape[1])
bmask_in = tf.placeholder(shape = [None]+ imageshape , dtype = tf.float32)
bmask_reshape = tf.reshape(bmask_in, shape = [-1, dim_x])
rho_ = tf.placeholder(tf.float32, shape=())
# the same graph applied
qzx_kernels = np.tile([3,3,3,3,3,3,1], [2,1]) #
qzx_channels = [64,64,64,64,64,64]
z_sampled, z_mean, z_std = q_zx(x_reshape,dim_z,qzx_kernels[0],qzx_kernels[1],qzx_channels)
pxz_kernels = np.tile([1,3,3,3,3,3,3], [2,1])
pxz_channels = [64, 64, 64, 64, 64, 64, 1]
xz_mean, xz_logvarinv = p_xz(z_sampled, pxz_kernels[0], pxz_kernels[1], pxz_channels, clipstd)
_, z_loss, _ = loss(z_mean, z_std, xz_mean, xz_logvarinv, x_reshape)
l2 = tf.reduce_sum(tf.squared_difference(tf.reshape(xz_mean, [-1, dim_x]), x), axis = 1)
xz_std = tf.exp(- 0.5 * xz_logvarinv)
# the loss for the restoration
TV = tf.image.total_variation(tf.subtract(x_p_reshape, x_reshape))
loss1 = -1*(l2 + z_loss)
loss2 = - rho_ * TV
# Gradient dloss/dy
grads = tf.gradients([loss1,loss2], [x])[0]
upperbound = tf.cast(tf.fill(tf.shape(grads), clipbound), dtype = tf.float32)
lowerbound = tf.cast( tf.fill(tf.shape(grads),-1*clipbound), dtype = tf.float32)
clipgrads = tf.clip_by_value(grads, lowerbound , upperbound, name = 'cliped_updating_gradient')
gradimage = tf.reshape(clipgrads,shape = tf.stack([-1] + imageshape))
gradsmasked = tf.multiply(clipgrads , bmask_reshape)
# tensorflow saver setting
saver = tf.train.Saver(tf.trainable_variables())
tvars = tf.trainable_variables()
sess = tf.Session()
# set trained model path
modelpath = os.path.join(path_make(model,'CamCANT2','TrainModel'+ num2str(sequence_number)) , 'model','')
# load trained model
if os.path.exists(modelpath):
file = open(modelpath+'model/checkpoint','r') # this will read the first line of the save models. change if one will
checkpoint = file.readline()
file.close()
sess.run(tf.global_variables_initializer())
saver.restore(sess,checkpoint[24:-2])
ckpt = tf.train.get_checkpoint_state(modelpath)
trained_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
else:
raise ValueError('No Trained Model Imported')
# setup tensorboard log
merged = tf.summary.merge_all()
# gathering information of parameter settings
info = [['mode', 'slice' + mode], ['sequence number',sequence_number],
['batch size',batchsize], ['dimension of latent variable',dim_z],
['dimension of input image patch',dim_x], ['std lowerbound @ p_x_z output',clipstd],
['trained step of the model',trained_step], ['lambda(s)', rho],
['step size, alpha', alphas], ['restoration steps', restore_steps],
['gradient_clip', gradient_clip], ['simulation',True],
['clipbound for restoration gradient', clipbound],
['model path', modelpath], ['start_time(UTC)',start_time]]
infodf = pd.DataFrame(info, columns = ['name','value'])
# Restore BraTSLGG
restoredata = 'BraTSLGG'
MRtest, MRlabel = data_load(process = 'Test', dataset = 'BraTSLGG', datapath = datapath)
bmasks = h5py.File( datapath + 'BraTSLGG_data.hdf5', 'r')
for rho0 in rho:
batchsize = 60
batch = bmasks['Mask'].shape[0]/batchsize
indeces = np.arange(batch*batchsize)
print 'restore for rho ', rho0
savepath = path_make(model,restoredata, 'Dataslicehe0.06' + mode + num2str(sequence_number)+ '/' + str(rho0))
if not os.path.exists(savepath):
os.makedirs(savepath)
infodf.to_csv(savepath + 'Parameter_settings.csv', index=False)
np.save(os.path.join(savepath, 'index.npy'), indeces)
for k in np.arange(batch+1):
if k == batch:
batchsize2 = bmasks['Label'].shape[0] - batch*batchsize +1
index = range(bmasks['Label'].shape[0]-batchsize2, bmasks['Label'].shape[0])
batchsize = batchsize2
else:
index = list(indeces[k*batchsize:(k+1)*batchsize] )
bmask = bmasks['Mask'][list(index)][:,22:180,17:215]
rawData = MRtest[list(index)][:,22:180,17:215]
labels = MRlabel[list(index)][:,22:180,17:215]
step_restore = np.zeros(list(rawData.shape) + [niter])
MC2restore = rawData.reshape(-1,dim_x)
MC_paint = MC2restore.copy()
print 'Start restoration of image batch ', k
for step in range(restore_steps):
if step > stepa:
alpha = alphas[-1]
else:
alpha = alphas[0]
MC2restore += alpha * sess.run(gradsmasked,feed_dict = {x: MC2restore, x_p: MC_paint, rho_: rho0, bmask_in: bmask})
if step == restore_steps - 1:
step_restore[:,:,:,-1] = MC2restore.reshape(batchsize,imageshape[0],imageshape[1])
np.save(os.path.join(savepath, 'restored_images' + str(k) +'.npy'), step_restore)
# Restore BraTSHGG
restoredata = 'BraTSHGG'
MRtest, MRlabel = data_load(process = 'Test', dataset = 'BraTSHGG', datapath = datapath)
for rho0 in rho:
batchsize = 60
batch = bmasks['Mask'].shape[0]/batchsize
indeces = np.arange(batch*batchsize)
print 'restore for rho ', rho0
savepath = path_make(model,restoredata, 'Dataslicehe0.06' + mode + num2str(sequence_number)+ '/' + str(rho0))
if not os.path.exists(savepath):
os.makedirs(savepath)
infodf.to_csv(savepath + 'Parameter_settings.csv', index=False)
np.save(os.path.join(savepath, 'index.npy'), indeces)
for k in np.arange(batch+1):
if k == batch:
batchsize2 = bmasks['Label'].shape[0] - batch*batchsize
index = range(bmasks['Label'].shape[0]-batchsize2, bmasks['Label'].shape[0])
batchsize = batchsize2
else:
index = list(indeces[k*batchsize:(k+1)*batchsize] )
bmask = bmasks['Mask'][list(index)][:,22:180,17:215]
rawData = MRtest[list(index)][:,22:180,17:215]
labels = MRlabel[list(index)][:,22:180,17:215]
step_restore = np.zeros(list(rawData.shape) + [niter])
MC2restore = rawData.reshape(-1,dim_x)
MC_paint = MC2restore.copy()
print 'Start restoration of image batch ', k
for step in range(restore_steps):
if step > stepa:
alpha = alphas[-1]
else:
alpha = alphas[0]
MC2restore += alpha * sess.run(gradsmasked,feed_dict = {x: MC2restore, x_p: MC_paint, rho_: rho0, bmask_in: bmask})
if step == restore_steps - 1:
step_restore[:,:,:,-1] = MC2restore.reshape(batchsize,imageshape[0],imageshape[1])
np.save(os.path.join(savepath, 'restored_images' + str(k) +'.npy'), step_restore)
print 'Restoration Finished'
sess.close()
| 38.348837
| 131
| 0.654821
|
87e00060c54c376d4c1793eca6a3c0a219a7ce92
| 15,312
|
py
|
Python
|
verto/tests/InteractiveTagTest.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 4
|
2017-04-10T06:09:54.000Z
|
2019-05-04T02:07:40.000Z
|
verto/tests/InteractiveTagTest.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 268
|
2017-04-03T20:40:46.000Z
|
2022-02-04T20:10:08.000Z
|
verto/tests/InteractiveTagTest.py
|
uccser/kordac
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 1
|
2019-01-07T15:46:31.000Z
|
2019-01-07T15:46:31.000Z
|
import markdown
from unittest.mock import Mock
from collections import defaultdict
from verto.VertoExtension import VertoExtension
from verto.processors.InteractiveTagBlockProcessor import InteractiveTagBlockProcessor
from verto.errors.ArgumentMissingError import ArgumentMissingError
from verto.errors.ArgumentValueError import ArgumentValueError
from verto.tests.ProcessorTest import ProcessorTest
class InteractiveTagTest(ProcessorTest):
'''The interactive processor is a simple tag with a complex
output that relies on external systems.
When writing tests whether or not the thumbnail is externally
or internally linked will changed output. If the thumbnail is
internal then the required files must be modified to include
this image.
'''
def __init__(self, *args, **kwargs):
'''Set processor name in class for file names.
'''
ProcessorTest.__init__(self, *args, **kwargs)
self.processor_name = 'interactive-tag'
self.tag_argument = 'interactive'
self.ext = Mock()
self.ext.jinja_templates = {'interactive': ProcessorTest.loadJinjaTemplate(self, 'interactive')}
self.ext.processor_info = ProcessorTest.loadProcessorInfo(self)
self.ext.required_files = defaultdict(set)
def test_whole_page_text(self):
'''Test whole page interactive with text is ignored.
'''
test_string = self.read_test_file(self.processor_name, 'whole_page_text.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([False, False, False], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[self.verto_extension])
expected_string = self.read_test_file(self.processor_name, 'whole_page_text_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': set(),
'images': set(),
'page_scripts': set(),
}
self.assertEqual(self.verto_extension.required_files, required_files)
def test_iframe_parameters(self):
'''Test iframe interactive with parameters is correctly parsed.
'''
test_string = self.read_test_file(self.processor_name, 'iframe_parameters.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[self.verto_extension])
expected_string = self.read_test_file(self.processor_name, 'iframe_parameters_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': {
"binary-cards"
},
'images': set(),
'page_scripts': set(),
}
self.assertEqual(self.verto_extension.required_files, required_files)
def test_in_page_missing_name(self):
'''Test ArgumentMissingError is raised when interactive name is not given.
'''
test_string = self.read_test_file(self.processor_name, 'in_page_missing_name.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
self.assertRaises(ArgumentMissingError, lambda x: markdown.markdown(x, extensions=[self.verto_extension]), test_string)
def test_missing_type(self):
'''Test ArgumentMissingError is raised when interactive type is not given.
'''
test_string = self.read_test_file(self.processor_name, 'missing_type.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
self.assertRaises(ArgumentMissingError, lambda x: markdown.markdown(x, extensions=[self.verto_extension]), test_string)
def test_invalid_type(self):
'''Test ArgumentValueError is raised when interactive type is not valid.
'''
test_string = self.read_test_file(self.processor_name, 'invalid_type.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
self.assertRaises(ArgumentValueError, lambda x: markdown.markdown(x, extensions=[self.verto_extension]), test_string)
def test_multiple_interactives(self):
'''Test multiple interactives in one file are all correctly parsed.
'''
test_string = self.read_test_file(self.processor_name, 'multiple_interactives.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([False, True, False, True, False, True, False], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[self.verto_extension])
expected_string = self.read_test_file(self.processor_name, 'multiple_interactives_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': {
'binary-cards',
'arrows',
'flying-boxes'
},
'images': {
'binarycards.png'
},
'page_scripts': {
'interactive/flying-boxes/scripts.html'
},
}
self.assertEqual(self.verto_extension.required_files, required_files)
def test_default_thumbnail_in_required_files(self):
'''Test the thumbnail for a whole page interactive is required.'''
verto_extension_default = VertoExtension(
processors=[self.processor_name],
)
test_string = self.read_test_file(self.processor_name, 'whole_page_without_thumbnail_parameter.md')
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_default])
self.assertEqual(
verto_extension_default.required_files['images'],
set(['interactives/binary-cards/img/thumbnail.png'])
)
def test_default_thumbnail_not_in_required_files_with_override(self):
'''Test the thumbnail for a whole page interactive is not required when overriden.'''
verto_extension_default_thumbnail_override = VertoExtension(
processors=[self.processor_name],
settings={'add_default_interactive_thumbnails_to_required_files': False}
)
test_string = self.read_test_file(self.processor_name, 'whole_page_without_thumbnail_parameter.md')
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_default_thumbnail_override])
self.assertEqual(
verto_extension_default_thumbnail_override.required_files['images'],
set()
)
def test_custom_thumbnail_in_required_files(self):
'''Test the custom thumbnail for a whole page interactive is required.'''
verto_extension_default = VertoExtension(
processors=[self.processor_name],
)
test_string = self.read_test_file(self.processor_name, 'whole_page_with_thumbnail_parameter.md')
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_default])
self.assertEqual(
verto_extension_default.required_files['images'],
set(['binarycards.png'])
)
def test_custom_thumbnail_not_in_required_files_with_override(self):
'''Test the custom thumbnail for a whole page interactive is not required when overriden.'''
verto_extension_custom_thumbnail_override = VertoExtension(
processors=[self.processor_name],
settings={'add_custom_interactive_thumbnails_to_required_files': False}
)
test_string = self.read_test_file(self.processor_name, 'whole_page_with_thumbnail_parameter.md')
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_custom_thumbnail_override])
self.assertEqual(
verto_extension_custom_thumbnail_override.required_files['images'],
set()
)
def test_custom_arguments_parameters_true(self):
'''Tests to ensure that interactive tag is rendered correctly when parameters argument is required.
'''
settings = {
'processor_argument_overrides': {
'interactive-tag': {
'parameters': True
}
}
}
verto_extension_custom_rules = VertoExtension(
processors=[self.processor_name],
settings=settings
)
test_string = self.read_test_file(self.processor_name, 'parameters_true.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([False, True, False], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_custom_rules])
expected_string = self.read_test_file(self.processor_name, 'parameters_true_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
def test_custom_arguments_thumbnail_true(self):
'''Tests to ensure that interactive tag is rendered correctly when thumbnail argument is required.
'''
settings = {
'processor_argument_overrides': {
'interactive-tag': {
'thumbnail': True
}
}
}
verto_extension_custom_rules = VertoExtension(
processors=[self.processor_name],
settings=settings
)
test_string = self.read_test_file(self.processor_name, 'thumbnail_true.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([False, True, False], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_custom_rules])
expected_string = self.read_test_file(self.processor_name, 'thumbnail_true_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
def test_custom_arguments_parameters_and_thumbnail_true(self):
'''Tests to ensure that interactive tag is rendered correctly when type argument is not required and parameters argument is required.
'''
settings = {
'processor_argument_overrides': {
'interactive-tag': {
'parameters': True,
'thumbnail': True
}
}
}
verto_extension_custom_rules = VertoExtension(
processors=[self.processor_name],
settings=settings
)
test_string = self.read_test_file(self.processor_name, 'parameters_and_thumbnail_true.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([False, True, False], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension_custom_rules])
expected_string = self.read_test_file(self.processor_name, 'parameters_and_thumbnail_true_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
# ~
# Doc Tests
# ~
def test_doc_example_in_page(self):
'''Example of an in-page interactive.
'''
test_string = self.read_test_file(self.processor_name, 'doc_example_in_page_usage.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[self.verto_extension])
expected_string = self.read_test_file(self.processor_name, 'doc_example_in_page_usage_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': {
'binary-cards'
},
'images': set(),
'page_scripts': {
'interactive/binary-cards/scripts.html'
},
}
self.assertEqual(self.verto_extension.required_files, required_files)
def test_doc_example_iframe(self):
'''Example of an iframe interactive.
'''
test_string = self.read_test_file(self.processor_name, 'doc_example_iframe_usage.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
converted_test_string = markdown.markdown(test_string, extensions=[self.verto_extension])
expected_string = self.read_test_file(self.processor_name, 'doc_example_iframe_usage_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': {
'binary-cards'
},
'images': set(),
'page_scripts': set(),
}
self.assertEqual(self.verto_extension.required_files, required_files)
def test_doc_example_override_html(self):
'''Example showing overriding the html-template.
'''
test_string = self.read_test_file(self.processor_name, 'doc_example_override_html.md')
blocks = self.to_blocks(test_string)
self.assertListEqual([True], [InteractiveTagBlockProcessor(self.ext, self.md.parser).test(blocks, block) for block in blocks], msg='"{}"'.format(test_string))
html_template = self.read_test_file(self.processor_name, 'doc_example_override_html_template.html', strip=True)
verto_extension = VertoExtension([self.processor_name], html_templates={self.tag_argument: html_template})
converted_test_string = markdown.markdown(test_string, extensions=[verto_extension])
expected_string = self.read_test_file(self.processor_name, 'doc_example_override_html_expected.html', strip=True)
self.assertEqual(expected_string, converted_test_string)
required_files = {
'interactives': {
'binary-cards'
},
'images': {
'binarycards.png'
},
'page_scripts': set(),
}
self.assertEqual(verto_extension.required_files, required_files)
| 46.969325
| 206
| 0.685867
|
50b46680f9e1bf708b16f8142498e38c2e805278
| 2,812
|
py
|
Python
|
covid-chestxray-dataset-master/scripts/browser.py
|
dhruv-varshney/dhrCOVID-19-Testing-Using-X-Ray-Images-Web-App-Development
|
4c37df7e47d32af255c26729d6c454759819b8c3
|
[
"MIT"
] | 2
|
2021-10-06T15:17:33.000Z
|
2021-11-03T05:20:55.000Z
|
covid-chestxray-dataset-master/scripts/browser.py
|
dhruv-varshney/dhrCOVID-19-Testing-Using-X-Ray-Images-Web-App-Development
|
4c37df7e47d32af255c26729d6c454759819b8c3
|
[
"MIT"
] | null | null | null |
covid-chestxray-dataset-master/scripts/browser.py
|
dhruv-varshney/dhrCOVID-19-Testing-Using-X-Ray-Images-Web-App-Development
|
4c37df7e47d32af255c26729d6c454759819b8c3
|
[
"MIT"
] | null | null | null |
import os
import shutil
from selenium import webdriver
import time
import tempfile
chromedriver_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"../../chromedriver"
)
)
class Browser(webdriver.Chrome):
def __init__(self, downloads_dir=None):
self.tempdir = None
if downloads_dir is None:
self.tempdir = tempfile.TemporaryDirectory()
self.downloads_dir = self.tempdir.name
print("Using temporary directory", self.downloads_dir)
else:
self.downloads_dir = downloads_dir
options = webdriver.ChromeOptions()
if self.downloads_dir is not None:
options.add_experimental_option("prefs",{"download.default_directory":self.downloads_dir})
options.gpu = False
options.headless = False
options.add_extension('extension.crx')
#print(chromedriver_path)
desired = options.to_capabilities()
desired['loggingPrefs'] = { 'performance': 'ALL'}
super().__init__(
desired_capabilities=desired,
executable_path = chromedriver_path
)
def get_local(self, path):
self.get("file://"+ os.path.abspath(path))
def save_mhtml(self, filename):
wait_time = 10
start_time = time.time()
#https://stackoverflow.com/questions/39327032/how-to-get-the-latest-file-in-a-folder-using-python
print("Saving from MHTML")
files_before = os.listdir(self.downloads_dir)
print("listed files before")
self.execute_script("""
var data = { type: "FROM_PAGE", text: "page.mhtml" };
window.postMessage(data, "*");
""")
print("executed script")
while True:
files_after = os.listdir(self.downloads_dir)
new_files = list(set(files_after).difference(set(files_before)))
if len(new_files) > 1:
print("Too many files!")
time.sleep(5)
continue
elif len(new_files) == 1:
most_recent_download = os.path.join(self.downloads_dir, new_files[0])
if most_recent_download.endswith("crdownload"):
#Still downloading
continue
else:
print("waiting five seconds just in case")
downloaded = True
time.sleep(5)
shutil.move(most_recent_download, filename)
break
else:
if time.time() - start_time > wait_time:
#Start over
return self.save_mhtml(filename)
def close(self):
super().close()
if self.tempdir is not None:
self.tempdir.cleanup()
| 33.879518
| 105
| 0.575391
|
776d42043b5d8a85e12297cc6fba24583c7e873b
| 9,590
|
py
|
Python
|
VoiceAssistant.py
|
Lee-Kevin/VoiceAssistant
|
da37dc6b2bb57d7def5595d515502a37786ff853
|
[
"MIT"
] | null | null | null |
VoiceAssistant.py
|
Lee-Kevin/VoiceAssistant
|
da37dc6b2bb57d7def5595d515502a37786ff853
|
[
"MIT"
] | null | null | null |
VoiceAssistant.py
|
Lee-Kevin/VoiceAssistant
|
da37dc6b2bb57d7def5595d515502a37786ff853
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from evernote.api.client import EvernoteClient
from HTMLParser import HTMLParser
import talkey
from weather import weatherReport
import threading
import time
# import library using grovepi
from grovepi import *
from grove_oled import *
from Nettime import TimeUpdate
logging.basicConfig(level='INFO')
# define a global threading lock
Global_Lock = threading.Lock()
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.ToDo = []
self.Flag = None
def handle_starttag(self, tag, attrs):
logging.info("Encountered a start tag: %s, %s", tag,attrs)
if tag == "en-todo":
logging.info( "this is to do tag:")
if len(attrs) == 0: # Here is the things that need to be done
self.Flag = True
logging.info("Here is need to be done")
else:
if (attrs[0][0] == "checked" and attrs[0][1] == "true"):
logging.info("Here is already done")
def handle_data(self, data):
#print("Encountered some data :", data)
if self.Flag == True:
logging.info(data)
self.Flag = False
self.ToDo.append(data)
else:
pass
def GetResult(self):
result = self.ToDo
self.ToDo = []
return result
# 3bee4c0c-2caf-413c-9e49-d51da6fcdc8c
dev_token = "S=s1:U=92b7b:E=15d39d06877:C=155e21f3928:P=1cd:A=en-devtoken:V=2:H=1304173954fbc76d7432cdf262f7b228"
noteGuid = "1e77d88b-49e6-4410-aaf5-c85c3bb70a0d"
tts = talkey.Talkey()
tts.say("This is a test")
# Sign in the Evernote
client = None
noteStore = None
def SignInEvernote():
global client,noteStore
result = False
try:
client = EvernoteClient(token=dev_token)
userStore = client.get_user_store()
user = userStore.getUser() # here will throw an error
logging.info(user.username)
noteStore = client.get_note_store()
result = True
except Exception, e:
logging.warn(e)
return result
def GetNoteContent(noteGuid):
global noteStore
content = None
try:
content = noteStore.getNoteContent(noteGuid)
except Exception,e:
logging.warn(e)
return content
#parser = MyHTMLParser()
#parser.feed(content)
#This is the Time Out var.
TimeOutIndex = 0
weatherSpeach = None
city = "shenzhen"
weather = weatherReport(city)
def weatherInformation(weather):
speach = None
if weather.getWeather() == True:
speach = ("The weather is %s. Temperature: %.1f. Humidity: %.1f%%. Wind speed: %.1f meters per second" % (weather.weather_desc,weather.temperature,weather.humidity,weather.wind_speed))
logging.info(speach)
return speach
# A new class that used to manage the thread
class GetWeatherInfoThread(threading.Thread):
def __init__(self,timeout = 1.0):
threading.Thread.__init__(self)
self.timeout = timeout
self._running = True
self.weatherSpeach = None
self.subthread = None
def terminate(self):
self._running = False
def runloop(self,TimeInterval):
self._running = True
def TargetFun(self, _TimeInterval):
while self._running:
global weather
speach = weatherInformation(weather)
if speach != None:
global Global_Lock
Global_Lock.acquire()
self.weatherSpeach = speach
Global_Lock.release()
else:
pass
import time
time.sleep(_TimeInterval)
self.subthread = threading.Thread(target=TargetFun,args=(self, TimeInterval,))
self.subthread.start()
def isRunning(self):
if self.subthread.is_alive():
return True
else:
return False
# A new class that used to manage the thread
class GetEvernoteThread(threading.Thread):
def __init__(self,timeout = 1.0):
threading.Thread.__init__(self)
self.timeout = timeout
self._running = True
self.content = None
self.subthread = None
def terminate(self):
self._running = False
def runloop(self,TimeInterval,noteGuid):
self._running = True
def TargetFun(self, _TimeInterval,_noteGuid):
while self._running:
content = GetNoteContent(_noteGuid)
if content != None:
global Global_Lock
Global_Lock.acquire()
self.content = content
Global_Lock.release()
else:
pass
import time
time.sleep(_TimeInterval)
self.subthread = threading.Thread(target=TargetFun,args=(self, TimeInterval,noteGuid))
self.subthread.start()
def isRunning(self):
if self.subthread.is_alive():
return True
else:
return False
class OLEDUpdateThread(threading.Thread):
def __init__(self,timeout = 1.0):
threading.Thread.__init__(self)
oled_init()
oled_clearDisplay()
oled_setNormalDisplay()
oled_setVerticalMode()
time.sleep(.1)
oled_setTextXY(0,1) #Print "WEATHER" at line 1
oled_putString("Assistant")
oled_setTextXY(1,0) #Print "WEATHER" at line 1
oled_putString("------------")
oled_setTextXY(3,1) #Print "WEATHER" at line 1
oled_putString("----- --:--")
oled_setTextXY(5,0) #Print "WEATHER" at line 1
oled_putString("------------")
self.timeout = timeout
self._running = True
self.subthread = None
def terminate(self):
self._running = False
def runloop(self,TimeInterval):
self._running = True
def TargetFun(self,_TimeInterval):
while self._running:
try:
global weather
oled_setTextXY(3,1) #Print "WEATHER" at line 1
oled_putString(TimeUpdate())
oled_setTextXY(6,1) #Print "WEATHER" at line 1
oled_putString(weather.weather_desc)
oled_setTextXY(8,1) #Print "WEATHER" at line 1
oled_putString(str(weather.temperature)+"*C")
oled_setTextXY(10,1) #Print "WEATHER" at line 1
oled_putString("Power@Seeed")
import time
time.sleep(_TimeInterval)
logging.info("This is OLED threading")
except Exception,e:
logging.warn(e)
self.subthread = threading.Thread(target=TargetFun,args=(self, TimeInterval))
self.subthread.start()
def isRunning(self):
if self.subthread.is_alive():
return True
else:
return False
# The handle of door port
HandlePort = 14 # A0 Port
pinMode(HandlePort,"INPUT")
def ifButtonPressed():
global HandlePort,Task1,Task2
try:
sensor_value = analogRead(HandlePort)
# logging.info("The Sensor Value: %s",str(sensor_value))
if sensor_value > 800:
return True
else:
return False
except Exception,e:
logging.warn(e)
if __name__ == "__main__":
Task1Weather = GetWeatherInfoThread()
Task1Weather.runloop(50) # The Time Interval is 5 second
Task3OLED = OLEDUpdateThread()
Task3OLED.runloop(55)
SignResult = SignInEvernote()
while False == SignResult:
TimeOutIndex = TimeOutIndex + 1
logging.info(TimeOutIndex)
if TimeOutIndex == 10:
logging.warn("Still Can't Sign in the Evernote")
TimeOutIndex = 0
break
SignResult = SignInEvernote()
time.sleep(5)
logging.warn("Can't Sign in the Evernote")
# At here we're successed sign in the evernote
Task2Evernote = GetEvernoteThread()
if True == SignResult:
Task2Evernote.runloop(10,noteGuid)
else:
pass
parser = MyHTMLParser()
logging.info("Next we'll in loopever")
while True:
try:
if True == ifButtonPressed():
logging.info("There is someone want to go out")
logging.info(Task1Weather.weatherSpeach)
if Task1Weather.weatherSpeach != None:
tts.say(Task1Weather.weatherSpeach)
else:
pass
if Task2Evernote.content != None:
parser.feed(Task2Evernote.content)
content = parser.GetResult()
for result in content:
logging.info("The result is :%s",result)
tts.say(result)
if not SignResult:
tts.say("Don't forget take out the garbage")
time.sleep(30)
else :
pass
time.sleep(.1)
except KeyboardInterrupt:
Task1Weather.terminate()
Task2Evernote.terminate()
Task3OLED.terminate()
exit()
except Exception, e:
logging.info(e)
| 31.136364
| 192
| 0.565589
|
a667fcd4135d13e37e601156e5153914d11cb4bb
| 1,599
|
py
|
Python
|
knowledge_repo/app/deploy/gunicorn.py
|
johri21/knowledge-repo
|
d22a459ed129bd2eebbfec32e180069bcaf83d9a
|
[
"Apache-2.0"
] | 1
|
2019-04-09T20:11:15.000Z
|
2019-04-09T20:11:15.000Z
|
knowledge_repo/app/deploy/gunicorn.py
|
johri21/knowledge-repo
|
d22a459ed129bd2eebbfec32e180069bcaf83d9a
|
[
"Apache-2.0"
] | null | null | null |
knowledge_repo/app/deploy/gunicorn.py
|
johri21/knowledge-repo
|
d22a459ed129bd2eebbfec32e180069bcaf83d9a
|
[
"Apache-2.0"
] | 1
|
2019-05-06T13:22:14.000Z
|
2019-05-06T13:22:14.000Z
|
""" gunicorn.py
Utilities for running the knowledge app via gunicorn.
Adapted from example in http://docs.gunicorn.org/en/stable/custom.html.
"""
from __future__ import absolute_import
from gunicorn.app.base import BaseApplication
from .common import KnowledgeDeployer
class GunicornDeployer(BaseApplication, KnowledgeDeployer):
_registry_keys = ['gunicorn']
def __init__(self, *args, **kwargs):
KnowledgeDeployer.__init__(self, *args, **kwargs)
BaseApplication.__init__(self)
def load_config(self):
env_args = self.cfg.parser().parse_args(self.cfg.get_cmd_args_from_env())
# Load up environment configuration.
for key, value in vars(env_args).items():
if key != 'args' and value is not None:
self.cfg.set(key, value)
# Update the configuration with the options specified via KnowledgeDeployer
options = {
'bind': u'{}:{}'.format(self.host, self.port),
'workers': self.workers,
'timeout': self.timeout,
}
if self.app.config['DEPLOY_HTTPS']:
options['certfile'] = self.app.config['SSL_CERT']['cert']
options['keyfile'] = self.app.config['SSL_CERT']['key']
for key, value in options.items():
self.cfg.set(key, value)
def load(self):
return self.builder_func()
def run(self):
if not self.app.check_thread_support():
raise RuntimeError("Database configuration is not suitable for deployment (not thread-safe).")
return BaseApplication.run(self)
| 31.98
| 106
| 0.644153
|
3cda8396fd1d40f0796a5f2653bbd5d9e74679e8
| 895
|
py
|
Python
|
tlssecondopinion/urls.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | null | null | null |
tlssecondopinion/urls.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | 4
|
2017-04-13T02:51:42.000Z
|
2017-04-13T02:53:12.000Z
|
tlssecondopinion/urls.py
|
MiWCryptAnalytics/tlssecondopinion
|
f1eebf753cc898ba546bf1371f3ce1ea848d17d6
|
[
"BSD-2-Clause"
] | null | null | null |
"""tlssecondopinion URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from . import views
import tlsscanner
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^scan/', include('tlsscanner.urls')),
]
| 34.423077
| 77
| 0.699441
|
0a0209ad5dbb303b1a095721bb81092d757703f0
| 399
|
py
|
Python
|
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
numba_compile.py
|
saturdays/hyperlearn
|
42b80c2b83c43d8bb84730efc3a9614e6affc01d
|
[
"BSD-3-Clause"
] | null | null | null |
print("******* Now compiling Numba and LLVM code..... *******")
print("******* This can be VERY SLOW. Please wait.... *******\n"
"Progress: |||||||||||||||", end = "")
from hyperlearn.numba.funcs import *
print("|||||||||||||||", end = "")
from hyperlearn.utils import *
print("|||||||||||||||")
from hyperlearn.stats import *
print("******* Code has been successfully compiled!:) *******")
| 23.470588
| 64
| 0.516291
|
eef2d22aab0bd829ef7e53418473d6a91f19fa09
| 6,219
|
py
|
Python
|
uuv_control/uuv_control_utils/scripts/set_thruster_state.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
uuv_control/uuv_control_utils/scripts/set_thruster_state.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
uuv_control/uuv_control_utils/scripts/set_thruster_state.py
|
jpliquid/testActions2
|
6f314fa1430f654e5943e47ac278101b9c24f938
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from uuv_gazebo_ros_plugins_msgs.srv import SetThrusterState
from plankton_utils.time import time_in_float_sec
from plankton_utils.time import is_sim_time
def build_service_name(ns, thruster_id, service_name) -> str :
return '/%s/thrusters/id_%d/%s' % (ns, thruster_id, service_name)
#==============================================================================
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node(
'set_thrusters_states',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# node.set_parameters([sim_time])
node.get_logger().info('Set the state of thrusters for vehicle, namespace=' + node.get_namespace())
starting_time = 0.0
if node.has_parameter('starting_time'):
starting_time = node.get_parameter('starting_time').value
node.get_logger().info('Starting time={} s'.format(starting_time))
duration = -1.0
if node.has_parameter('duration'):
duration = node.get_parameter('duration').value
if duration <= 0.0:
raise RuntimeError('Duration not set or negative, leaving node...')
node.get_logger().info('Duration [s]=', ('Inf.' if duration < 0 else str(duration)))
if node.has_parameter('is_on'):
is_on = bool(node.get_parameter('is_on').get_parameter_value().bool_value)
else:
raise RuntimeError('is_on state flag not provided')
thruster_id = -1
if node.has_parameter('thruster_id'):
if node.get_parameter('thruster_id').type_ == rclpy.Parameter.Type.INTEGER:
thruster_id = node.get_parameter('thruster_id').get_parameter_value().integer_value()
else:
raise RuntimeError('Thruster ID not given')
if thruster_id < 0:
raise RuntimeError('Invalid thruster ID')
node.get_logger().info('Setting state of thruster #{} as {}'.format(thruster_id, 'ON' if is_on else 'OFF'))
vehicle_name = node.get_namespace().replace('/', '')
srv_name = build_service_name(vehicle_name, thruster_id, 'set_thruster_state')
try:
set_state = node.create_client(SetThrusterState, srv_name)
except Exception as e:
raise RuntimeError('Service call failed, error=' + str(e))
if not set_state.wait_for_service(timeout_sec=2):
raise RuntimeError('Service %s not available! Closing node...' %(srv_name))
FREQ = 100
rate = node.create_rate(FREQ)
thread = threading.Thread(target=rclpy.spin, args=(node,), daemon=True)
thread.start()
while time_in_float_sec(node.get_clock().now()) < starting_time:
#Just a guard for really short timeouts
if 1.0 / FREQ < starting_time:
rate.sleep()
# rate = node.create_rate(100)
# while time_in_float_sec(node.get_clock().now()) < starting_time:
# rate.sleep()
req = SetThrusterState.Request()
req.on = is_on
success = set_state.call(req)
future = set_state.call_async(req)
#NB : spining is done from another thread
while not future.done():
try:
response = future.result()
except Exception as e:
node.get_logger().error('Service call ' + srv_name + ' failed, error=' + str(e)):
else:
node.get_logger().info('Time={} s'.format(time_in_float_sec(node.get_clock().now().get_time())))
node.get_logger().info('Current state of thruster #{}={}'.format(thruster_id, 'ON' if is_on else 'OFF'))
# if success:
# print('Time={} s'.format(time_in_float_sec(node.get_clock().now().get_time())))
# print('Current state of thruster #{}={}'.format(thruster_id, 'ON' if is_on else 'OFF'))
if duration > 0:
while time_in_float_sec(node.get_clock().now()) < starting_time + duration:
if 1.0 / FREQ < starting_time + duration:
rate.sleep()
# rate = node.create_rate(100)
# while time_in_float_sec(node.get_clock().now()) < starting_time + duration:
# rate.sleep()
req.on = not is_on
success = set_state.call(req)
while not future.done():
try:
response = future.result()
except Exception as e:
node.get_logger().error('Service call ' + srv_name + ' failed, error=' + str(e)):
else:
node.get_logger().info('Time={} s'.format(time_in_float_sec(node.get_clock().now().get_time())))
node.get_logger().info('Returning to previous state of thruster #{}={}'.format(thruster_id, 'ON' if not is_on else 'OFF'))
# if success:
# print('Time={} s'.format(time_in_float_sec(node.get_clock().now().get_time())))
# print('Returning to previous state of thruster #{}={}'.format(thruster_id, 'ON' if not is_on else 'OFF'))
node.get_logger().info('Leaving node...')
node.destroy_node()
rclpy.shutdown()
thread.join()
#==============================================================================
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Something went wrong: ' + str(e))
| 38.153374
| 138
| 0.644798
|
992511a0d9d3aa34cdce51276881f3f775e4493f
| 439
|
py
|
Python
|
dice_prog/mult_diceroll.py
|
fededev01/programs-py
|
62c633d4958540f98c0914020ad123fda55504dc
|
[
"Apache-2.0"
] | null | null | null |
dice_prog/mult_diceroll.py
|
fededev01/programs-py
|
62c633d4958540f98c0914020ad123fda55504dc
|
[
"Apache-2.0"
] | null | null | null |
dice_prog/mult_diceroll.py
|
fededev01/programs-py
|
62c633d4958540f98c0914020ad123fda55504dc
|
[
"Apache-2.0"
] | null | null | null |
import random
import time
roll_again = "yes"
while roll_again == "yes" or roll_again == "y":
print("Rolling the dices...")
time.sleep(2)
fir = random.randint(1,6)
sec = random.randint(1,6)
print("Your first number is: ")
time.sleep(2)
print(fir)
print("Your second number is: ")
time.sleep(2)
print(sec)
roll_again=input("Vuoi continuare a giocare? (yes/y)")
print("You ended the game!")
| 23.105263
| 58
| 0.624146
|
0713dfcc7991ceaf2417e066944d9c380f603416
| 953
|
py
|
Python
|
security_monkey/watchers/iam/iam_user.py
|
cncoder/security_monkey
|
7d14c00e6c18a0edf87830ff4191007b6296b945
|
[
"Apache-2.0"
] | null | null | null |
security_monkey/watchers/iam/iam_user.py
|
cncoder/security_monkey
|
7d14c00e6c18a0edf87830ff4191007b6296b945
|
[
"Apache-2.0"
] | null | null | null |
security_monkey/watchers/iam/iam_user.py
|
cncoder/security_monkey
|
7d14c00e6c18a0edf87830ff4191007b6296b945
|
[
"Apache-2.0"
] | 1
|
2019-06-15T14:03:34.000Z
|
2019-06-15T14:03:34.000Z
|
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from cloudaux.aws.iam import list_users
from cloudaux.orchestration.aws.iam.user import get_user
class IAMUser(CloudAuxWatcher):
index = 'iamuser'
i_am_singular = 'IAM User'
i_am_plural = 'IAM Users'
def __init__(self, *args, **kwargs):
super(IAMUser, self).__init__(*args, **kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = [
"PasswordLastUsed",
"AccessKeys$*$LastUsedDate",
"AccessKeys$*$Region",
"AccessKeys$*$ServiceName",
"_version"]
self.override_region = 'universal'
def get_name_from_list_output(self, item):
return item['UserName']
def _get_regions(self):
return ['cn-north-1']
def list_method(self, **kwargs):
return list_users(**kwargs)
def get_method(self, item, **kwargs):
return get_user(item, **kwargs)
| 28.878788
| 60
| 0.644281
|
cd4a875e9eb95fab65008f88e3afc6bf01fc7cd4
| 3,553
|
py
|
Python
|
test/test_heartbeat_monitoring.py
|
Namyalg/mongo-python-driver
|
fc85a24888e3c1fe556eb8f755aeecb053b5815e
|
[
"Apache-2.0"
] | null | null | null |
test/test_heartbeat_monitoring.py
|
Namyalg/mongo-python-driver
|
fc85a24888e3c1fe556eb8f755aeecb053b5815e
|
[
"Apache-2.0"
] | null | null | null |
test/test_heartbeat_monitoring.py
|
Namyalg/mongo-python-driver
|
fc85a24888e3c1fe556eb8f755aeecb053b5815e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the monitoring of the server heartbeats."""
import sys
import threading
sys.path[0:0] = [""]
from test import IntegrationTest, client_knobs, unittest
from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until
from pymongo.errors import ConnectionFailure
from pymongo.hello import Hello, HelloCompat
from pymongo.monitor import Monitor
class TestHeartbeatMonitoring(IntegrationTest):
def create_mock_monitor(self, responses, uri, expected_results):
listener = HeartbeatEventListener()
with client_knobs(
heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1
):
class MockMonitor(Monitor):
def _check_with_socket(self, *args, **kwargs):
if isinstance(responses[1], Exception):
raise responses[1]
return Hello(responses[1]), 99
m = single_client(
h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool
)
expected_len = len(expected_results)
# Wait for *at least* expected_len number of results. The
# monitor thread may run multiple times during the execution
# of this test.
wait_until(lambda: len(listener.events) >= expected_len, "publish all events")
try:
# zip gives us len(expected_results) pairs.
for expected, actual in zip(expected_results, listener.events):
self.assertEqual(expected, actual.__class__.__name__)
self.assertEqual(actual.connection_id, responses[0])
if expected != "ServerHeartbeatStartedEvent":
if isinstance(actual.reply, Hello):
self.assertEqual(actual.duration, 99)
self.assertEqual(actual.reply._doc, responses[1])
else:
self.assertEqual(actual.reply, responses[1])
finally:
m.close()
def test_standalone(self):
responses = (
("a", 27017),
{HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1},
)
uri = "mongodb://a:27017"
expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"]
self.create_mock_monitor(responses, uri, expected_results)
def test_standalone_error(self):
responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE"))
uri = "mongodb://a:27017"
# _check_with_socket failing results in a second attempt.
expected_results = [
"ServerHeartbeatStartedEvent",
"ServerHeartbeatFailedEvent",
"ServerHeartbeatStartedEvent",
"ServerHeartbeatFailedEvent",
]
self.create_mock_monitor(responses, uri, expected_results)
if __name__ == "__main__":
unittest.main()
| 37.797872
| 100
| 0.650155
|
ff0a6249fe000bcd8206c203319547d893f8d93d
| 8,559
|
py
|
Python
|
YOLO/Stronger-yolo-pytorch/dataset/augment/image.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 12
|
2020-03-25T01:24:22.000Z
|
2021-09-18T06:40:16.000Z
|
YOLO/Stronger-yolo-pytorch/dataset/augment/image.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 1
|
2020-04-22T07:52:36.000Z
|
2020-04-22T07:52:36.000Z
|
YOLO/Stronger-yolo-pytorch/dataset/augment/image.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 4
|
2020-03-25T01:24:26.000Z
|
2020-09-20T11:29:09.000Z
|
import cv2
import numpy as np
import random
from PIL import Image
import matplotlib.pyplot as plt
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
img = src[y0:y0+h,x0:x0+w,:]
img=cv2.resize(img,(w,h))
return img
def random_flip(src, px=0, py=0, copy=False):
"""Randomly flip image along horizontal and vertical with probabilities.
Parameters
----------
src : mxnet.nd.NDArray
Input image with HWC format.
px : float
Horizontal flip probability [0, 1].
py : float
Vertical flip probability [0, 1].
copy : bool
If `True`, return a copy of input
Returns
-------
mxnet.nd.NDArray
Augmented image.
tuple
Tuple of (flip_x, flip_y), records of whether flips are applied.
"""
flip_y = np.random.choice([False, True], p=[1 - py, py])
flip_x = np.random.choice([False, True], p=[1 - px, px])
if flip_y:
src = np.flipud(src)
if flip_x:
src = np.fliplr(src)
if copy:
src = src.copy()
return src, (flip_x, flip_y)
def random_color_distort(src, brightness_delta=32, contrast_low=0.5, contrast_high=1.5,
saturation_low=0.5, saturation_high=1.5, hue_delta=18):
"""Randomly distort image color space.
Note that input image should in original range [0, 255].
Parameters
----------
src : mxnet.nd.NDArray
Input image as HWC format.
brightness_delta : int
Maximum brightness delta. Defaults to 32.
contrast_low : float
Lowest contrast. Defaults to 0.5.
contrast_high : float
Highest contrast. Defaults to 1.5.
saturation_low : float
Lowest saturation. Defaults to 0.5.
saturation_high : float
Highest saturation. Defaults to 1.5.
hue_delta : int
Maximum hue delta. Defaults to 18.
Returns
-------
mxnet.nd.NDArray
Distorted image in HWC format.
"""
def brightness(src, delta, p=0.5):
"""Brightness distortion."""
if np.random.uniform(0, 1) > p:
delta = np.random.uniform(-delta, delta)
src += delta
return src
return src
def contrast(src, low, high, p=0.5):
"""Contrast distortion"""
if np.random.uniform(0, 1) > p:
alpha = np.random.uniform(low, high)
src *= alpha
return src
return src
def saturation(src, low, high, p=0.5):
"""Saturation distortion."""
if np.random.uniform(0, 1) > p:
alpha = np.random.uniform(low, high)
gray = src * np.array([[[0.299, 0.587, 0.114]]])
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
return src
def hue(src, delta, p=0.5):
"""Hue distortion"""
if np.random.uniform(0, 1) > p:
alpha = random.uniform(-delta, delta)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
t = np.dot(np.dot(ityiq, bt), tyiq).T
src = np.dot(src, np.array(t))
return src
return src
src = src.astype('float32')
# brightness
src = brightness(src, brightness_delta)
# color jitter
if np.random.randint(0, 2):
src = contrast(src, contrast_low, contrast_high)
src = saturation(src, saturation_low, saturation_high)
src = hue(src, hue_delta)
else:
src = saturation(src, saturation_low, saturation_high)
src = hue(src, hue_delta)
src = contrast(src, contrast_low, contrast_high)
return src
def impad_to_square(img, pad_size):
'''Pad an image to ensure each edge to equal to pad_size.
Args
---
img: [height, width, channels]. Image to be padded
pad_size: Int.
Returns
---
ndarray: The padded image with shape of
[pad_size, pad_size, channels].
'''
shape = (pad_size, pad_size, img.shape[-1])
pad = np.zeros(shape, dtype=img.dtype)
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
def impad_to_multiple(img, divisor):
'''Pad an image to ensure each edge to be multiple to some number.
Args
---
img: [height, width, channels]. Image to be padded.
divisor: Int. Padded image edges will be multiple to divisor.
Returns
---
ndarray: The padded image.
'''
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
shape = (pad_h, pad_w, img.shape[-1])
pad = np.zeros(shape, dtype=img.dtype)
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
def img_resize(img, out_size):
'''Resize image while keeping the aspect ratio.
Args
---
img: [height, width, channels]. The input image.
out_size: Tuple of 2 integers. the image will be rescaled
as large as possible within the scale,(w,h)
Returns
---
np.ndarray: the scaled image.
'''
# h, w = img.shape[:2]
# max_long_edge = max(out_size)
# max_short_edge = min(out_size)
# scale_factor = min(max_long_edge / max(h, w),
# max_short_edge / min(h, w))
#
# new_size = (int(w * float(scale_factor) + 0.5),
# int(h * float(scale_factor) + 0.5))
rescaled_img = cv2.resize(
img, out_size, interpolation=cv2.INTER_LINEAR)
return rescaled_img
def imnormalize(img, mean, std):
'''Normalize the image.
Args
---
img: [height, width, channel]
mean: Tuple or np.ndarray. [3]
std: Tuple or np.ndarray. [3]
Returns
---
np.ndarray: the normalized image.
'''
img=img/255.0
img = (img - mean) / std
return img.astype(np.float32)
def imdenormalize(norm_img, mean, std):
'''Denormalize the image.
Args
---
norm_img: [height, width, channel]
mean: Tuple or np.ndarray. [3]
std: Tuple or np.ndarray. [3]
Returns
---
np.ndarray: the denormalized image.
'''
img = norm_img * std + mean
return img.astype(np.float32)
def random_expand(src, max_ratio=2, keep_ratio=True):
"""Random expand original image with borders, this is identical to placing
the original image on a larger canvas.
Parameters
----------
src : mxnet.nd.NDArray
The original image with HWC format.
max_ratio : int or float
Maximum ratio of the output image on both direction(vertical and horizontal)
fill : int or float or array-like
The value(s) for padded borders. If `fill` is numerical type, RGB channels
will be padded with single value. Otherwise `fill` must have same length
as image channels, which resulted in padding with per-channel values.
keep_ratio : bool
If `True`, will keep output image the same aspect ratio as input.
Returns
-------
mxnet.nd.NDArray
Augmented image.
tuple
Tuple of (offset_x, offset_y, new_width, new_height)
"""
if max_ratio <= 1:
return src, (0, 0, src.shape[1], src.shape[0])
h, w, c = src.shape
ratio_x = random.uniform(1, max_ratio)
if keep_ratio:
ratio_y = ratio_x
else:
ratio_y = random.uniform(1, max_ratio)
oh, ow = int(h * ratio_y), int(w * ratio_x)
off_y = random.randint(0, oh - h)
off_x = random.randint(0, ow - w)
dst=np.zeros(shape=(oh,ow,c))
dst[off_y:off_y + h, off_x:off_x + w, :] = src
return dst, (off_x, off_y, ow, oh)
def makeImgPyramids(imgs,scales,flip=False):
rescaled_imgs=[]
for scale in scales:
rescaled_img=[]
for img in imgs:
scaled_img=cv2.resize(img,dsize=(scale,scale))
rescaled_img.append(scaled_img)
rescaled_imgs.append(np.array(rescaled_img))
if not flip:
return rescaled_imgs
else:
fliped_imgs=[]
for pyramid in rescaled_imgs:
fliped_img=[np.fliplr(img) for img in pyramid]
fliped_imgs.append(np.array(fliped_img))
return rescaled_imgs+fliped_imgs
| 26.416667
| 87
| 0.621217
|
7c03113b5d02d4bd5598e8556d08e3de523476de
| 15,215
|
py
|
Python
|
CurrUncertEllipses_ChiSqProb.py
|
hadjt/CurrUncertEllipses
|
d64dca9f4d18e2526748bb5980bf793c74fbe3cc
|
[
"CC0-1.0"
] | null | null | null |
CurrUncertEllipses_ChiSqProb.py
|
hadjt/CurrUncertEllipses
|
d64dca9f4d18e2526748bb5980bf793c74fbe3cc
|
[
"CC0-1.0"
] | null | null | null |
CurrUncertEllipses_ChiSqProb.py
|
hadjt/CurrUncertEllipses
|
d64dca9f4d18e2526748bb5980bf793c74fbe3cc
|
[
"CC0-1.0"
] | null | null | null |
from netCDF4 import Dataset,num2date
import pdb
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime#,timedelta
#import xarray
#lon = np.arange(-19.888889,12.99967+1/9.,1/9.)
#lat = np.arange(40.066669,65+1/15.,1/15.)
#import os
from scipy.stats import chi2
from CurrUncertEllipses import *
def main():
curr_uncert_prob_threshold_perc_data_in_xsd_table()
fig = curr_uncert_prob_threshold_perc_data_in_xsd_figure()
def curr_uncert_prob_threshold_perc_data_in_xsd_figure():
# Calculate and plot the percentage of data within an uncertainty ellipse
# of a given size (in terms of standard deviations).
#
# As well deriving these values for the chi-squared distribution table, two
# numerical methods are used (See Tinker et al. (2022) for details).
#
# Produces Appendix Figure 7 in Tinker et al. 2022
#Array of Standard deviations
n_std_mat = np.arange(0,3.2,0.1)
# precentage of data within ellipse of a given standard deviation size, using the:
#Statistical theoretical method (using chi squared probabilty tables)
stat_sd_plev_mat = data_within_xsd_chi_sq(n_std_mat = n_std_mat)
#Gaussian distribution method (Integrating a bivariate Gaussian distribution within the ellipse)
gauss_sd_plev_mat = data_within_xsd_gauss_integ(n_std_mat = n_std_mat)
#Random data method (asking the proprotion of a random bivariate gaussian data set is within an ellipse).
rand_sd_plev_mat = data_within_xsd_random_cnt(n_std_mat = n_std_mat,npnts = 10000)# 100 = 1min, 1000 = 1 min # 10000 = 2 mins
print('Start plotting',datetime.now())
fig = plt.figure()
fig.set_figheight(4)
fig.set_figwidth(6.0)
plt.subplots_adjust(top=0.95,bottom=0.15,left=0.15,right=0.95,hspace=0.2,wspace=0.2)
plt.plot([0,nstd_cutoff(90),nstd_cutoff(90)],[90,90,0],'0.75')
plt.plot([0,nstd_cutoff(95),nstd_cutoff(95)],[95,95,0],'0.75')
plt.text(0.1,95,'95%', ha = 'left', va = 'center')
plt.text(0.1,90,'90%', ha = 'left', va = 'center')
plt.text(nstd_cutoff(90),5,'%.2f'%nstd_cutoff(90), ha = 'center', va = 'center')
plt.text(nstd_cutoff(95),5,'%.2f'%nstd_cutoff(95), ha = 'center', va = 'center')
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1),'r', lw = 2, label = 'Random')
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1) + 2*rand_sd_plev_mat.std(axis = 1),'r-', lw = 1)
plt.plot(n_std_mat, 100.*rand_sd_plev_mat.mean(axis = 1) - 2*rand_sd_plev_mat.std(axis = 1),'r-', lw = 1)
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1),'b', lw = 2, label = 'Distr Integ')
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1) + 2*gauss_sd_plev_mat.std(axis = 1),'b-', lw = 1)
plt.plot(n_std_mat, 100.*gauss_sd_plev_mat.mean(axis = 1) - 2*gauss_sd_plev_mat.std(axis = 1),'b-', lw = 1)
plt.plot(n_std_mat, 100.*stat_sd_plev_mat,'k--', lw = 2, label = 'Chi Sq')
plt.xlabel('Size of uncertainty ellipse\n(number of standard deviation)')
plt.ylabel('% Data within uncertainty ellipse')
plt.ylim([0,100])
plt.xlim([0,3])
plt.legend()
print('Return handle',datetime.now())
return fig
def curr_uncert_prob_threshold_perc_data_in_xsd_table():
# Produce a table of probabilty thresholds for ellipse size.
# Produces Appendix Table 1 in Tinker et al. 2022
perc_lev_mat = np.array([50, 75,90, 95, 97.5, 99,99.5 ])
p_lev = 1-(perc_lev_mat/100.)
chi_sq_table_vals_mat = nstd_cutoff(perc_lev_mat)**2
nstd_thresh_size_mat = nstd_cutoff(perc_lev_mat)
print('')
print('------------------------------------------------------------------------------------')
print('')
print('Uncertainty Ellipse size (in standard deviations) and data coverage (%),,Chi Squared Distribution Table (with 2 degrees of freedom),,')
print('Percentage of data within Uncertainty Ellipse,Size of uncertainty ellipse (# standard deviations),Critical value,Probability of exceeding the critical value')
for ii,jj,kk,ll in zip(perc_lev_mat,nstd_thresh_size_mat,p_lev,chi_sq_table_vals_mat,):print('%.1f%%,%.4f,%.3f,%.3f'%(ii,jj,kk,ll))
print('')
print('------------------------------------------------------------------------------------')
print('')
def nstd_cutoff(percent_val):
#For a given percentage value, how big (in standard deviations)
#must the ellipse be to capture that precengate of data
#
# Based on the Chi-squared inverse survival function
nstd = np.sqrt(chi2.isf(1-percent_val/100, 2))
return nstd
def data_within_xsd_chi_sq(n_std_mat = np.arange(0,3,0.1)):
# following:
#https://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix
# To calculate the amount of data within an ellipse of size x std devs,
# we can use the chi squared probabilty table.
#chi squared probability table:
#https://people.richland.edu/james/lecture/m170/tbl-chi.html
#c&p 2df row and headers:
chi2_prob_2df = np.array([0.010,0.020,0.051,0.103,0.211,4.605,5.991,7.378,9.210,10.597])
chi2_prob_plev = np.array([0.995, 0.99, 0.975, 0.95, 0.90, 0.10, 0.05, 0.025, 0.01, 0.005])
# this can be created with python scipy.stats chi2:
# https://stackoverflow.com/questions/32301698/how-to-build-a-chi-square-distribution-table
chi_sq_prob_2df_table = chi2.isf(chi2_prob_plev, 2)
# plotting 1-chi2_prob_plev against np.sqrt(chi2_prob_plev) gives you the
# required number of std devs (sqrt(chi2_prob_plev)) to encapsulate x % of
# data (1-chi2_prob_plev).
# for a given array of standard deviations, we can use this approach to
# calculate the percentage data within the corresponding ellipse.
# rather than using the inverse survival function, we now use the
# survival function
chi2_pval_mat = 1-chi2.sf(n_std_mat**2, 2)
return chi2_pval_mat #, chi2_prob_plev, chi_sq_prob_2df_table
def data_within_xsd_gauss_integ_val(U_mean = 0.,U_var = 1.,V_mean = 0.,V_var = 1.,UV_corr = 0.5, n_std = 1.96, plotting = False, verbose = True, npnt_counting = 151, n_std_limits = 2):
# To calculate the amount of data within an ellipse of size x std devs,
# we can integrate a bivariate gaussian distribution surface within the ellipse.
# We do this numerically, so this is a semi-numerical semi-analytical method.
#
# We created a decretised bivariate gaussian distribution surface
# (for a given means, variaences and covariance (actually correlation).
# We find the (near constant) value of the surface around the ellipse, and
# then (numerically) integrate the values of the surface that are greater
# than this value.
#Covariance from Pearsons Correlation.
UV_cov = UV_corr*np.sqrt(U_var)*np.sqrt(V_var)
#details of the ellipse
X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,X_elip_phi_cos,Y_elip_phi_cos = confidence_ellipse_uv_stats_parametric_equation(U_mean,V_mean,U_var, V_var, UV_cov)
twoaltone = np.array(([-1,1]))
ang = np.linspace(-np.pi,np.pi, 100)
#limits of the Gaussian surface
Xlim_val = n_std_limits*n_std*(X_elip_amp)
Ylim_val = n_std_limits*n_std*(Y_elip_amp)
if Xlim_val <(4*(X_elip_amp)):Xlim_val = (4*(X_elip_amp))
if Ylim_val <(4*(Y_elip_amp)):Ylim_val = (4*(Y_elip_amp))
Xlim = Xlim_val*twoaltone+U_mean
Ylim = Ylim_val*twoaltone+V_mean
# x and y mesh for the surface
tmpx_test = np.linspace(np.min((Xlim)),np.max((Xlim)),npnt_counting)
tmpy_test = np.linspace(np.min((Ylim)),np.max((Ylim)),npnt_counting)
tmpx_test_mat,tmpy_test_mat = np.meshgrid(tmpx_test,tmpy_test)
tmpdx = np.diff(tmpx_test).mean()
tmpdy = np.diff(tmpy_test).mean()
# the uncertainty ellipse
Xo = n_std*(X_elip_amp*np.sin(ang + X_elip_phi))+U_mean
Yo = n_std*(Y_elip_amp*np.sin(ang + Y_elip_phi))+V_mean
#Calcuate the Gaussian Surface over the x and y mesh, and around the ellipse
gauss = gauss_func_2d(tmpx_test_mat,tmpy_test_mat,U_mean,V_mean,U_var,V_var,UV_cov)[0]
gauss_ell = gauss_func_2d(Xo,Yo,U_mean,V_mean,U_var,V_var,UV_cov)[0]
# find the values that distribution values that are greater than the (mean)
# ellipse distribution value
ind_inside_ell = gauss>=gauss_ell.mean()
# The infinite bivariate distrbution surface should integrate to 1.
# By integrating the full decretised distribution, we get an idea of the
# error term
p_val_full_decrete_dist = gauss.sum()*tmpdx*tmpdy
# Integrating the values greater than the ellipse values is equivalent to
# integrating the values within the ellipse.
p_val = gauss[ind_inside_ell].sum()*tmpdx*tmpdy
if plotting:
ax = []
ax.append(plt.subplot(2,2,1))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,gauss)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
ax.append(plt.subplot(2,2,2))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,ind_inside_ell)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
ax.append(plt.subplot(2,2,3))
plt.pcolormesh(tmpx_test_mat,tmpy_test_mat,)
plt.contour(tmpx_test_mat,tmpy_test_mat,gauss, [gauss_ell.mean()], colors = 'y')
plt.plot(Xo,Yo,'r--')
if verbose: print(n_std, p_val)
return p_val, p_val_full_decrete_dist
#plt.show()
def data_within_xsd_random_cnt_val(U_mean = 0,U_var = 1,V_mean = 0,V_var = 1,UV_corr=0., npnts = 100000,n_std_mat = np.arange(0,3,0.01)):
# To calculate the amount of data within an ellipse of size x std devs,
# we can create a random data with a bivariate normal distribution for a
# given set of means, variance and covariance (actually correlation).
# We can then fit an ellipse to these data (for a given number of standard
# deviations), and calucate the precentage of points within the ellipse.
# We then cycle through a range of standard deviations (n_std_mat)
#Covariance from Pearsons Correlation.
UV_cov = UV_corr*np.sqrt(U_var)*np.sqrt(V_var)
#Create a random data with a bivariate normal distribution
U_mat,V_mat = np.random.multivariate_normal([U_mean,V_mean], [[U_var,UV_cov],[UV_cov,V_var]], npnts).T
#cycle through a range of elipses sizes of varying standard deviations
n_perc_joint_mat = n_std_mat.copy()*0.
for ni,n_std in enumerate(n_std_mat):
#for a given standard deviation:
#find the uncertainty ellipse, a details of it:
X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,X_elip_phi_cos,Y_elip_phi_cos = confidence_ellipse_uv_mat_parametric_equation(U_mat.reshape(-1,1,1),V_mat.reshape(-1,1,1), n_std = n_std)
qmax,qmin, ecc, theta_max, zero_ang = ellipse_parameters_from_parametric_equation(X_elip_amp,Y_elip_amp,X_elip_phi,Y_elip_phi,U_mean,V_mean)
# find the ellipse foci (important for asking whether a point is within an ellipse or not)
foci_max,foci_x_1,foci_y_1,foci_x_2,foci_y_2 = find_parameteric_ellipse_foci(qmax, qmin,theta_max,U_mean,V_mean,n_std)
# Ask which of our random data set are within the ellipse
pnt_inside_ell_sig_1,foci_pnt_foci_dist_sig = point_inside_parameteric_ellipse(U_mat.reshape(-1,1,1),V_mat.reshape(-1,1,1),n_std, foci_x_1,foci_y_1,foci_x_2,foci_y_2,qmax)
# Record the percentage of data within our ellipse.
n_perc_joint_mat[ni] = pnt_inside_ell_sig_1.sum()/pnt_inside_ell_sig_1.size
# Repeat of a univariate normal discribution.
# ask which points are within x standard deviation of the mean
n_perc_single_mat = n_std_mat.copy()*0.
U_std = U_mat.std()
for ni,n_std in enumerate(n_std_mat):n_perc_single_mat[ni] = (np.abs((U_mat[:]-U_mean)/U_std)<=n_std).sum()/U_mat.size#((np.abs(U_mat)/U_std)<n_std).sum()/U_mat.size
return n_perc_joint_mat, n_perc_single_mat
################################################################################
def data_within_xsd_gauss_integ(n_std_mat = np.arange(0,3,0.1), U_mean_mat = np.arange(-1.5,1.,0.5), V_mean_mat = np.arange(-1.5,2,0.5), U_var_mat = np.arange(0.25,1.5,0.25), V_var_mat = np.arange(0.25,1.5,0.25), UV_corr_mat = np.arange(-0.75,1.00,0.25)):
# To calculate the amount of data within an ellipse of size x std devs,
# we can integrate a bivariate gaussian distribution surface within the ellipse.
#
# Here we cycle through a range of values mean, variance and covarinace
# (actually correlations) and apply data_within_xsd_gauss_integ_val to
# create an ensemble of results, to show that there is very little dependence
# on the shape and location of the ellipse.
uv_ms_c_lst = [(U_mean,U_var,V_mean,V_var,UV_corr) for U_mean in U_mean_mat for V_mean in V_mean_mat for U_var in U_var_mat for V_var in V_var_mat for UV_corr in UV_corr_mat]
uv_ms_c_mat =np.array(uv_ms_c_lst)
print('Start Gaussian method',datetime.now()) # 2min run time
gauss_sd_plev_lst = []
for (U_mean,U_var,V_mean,V_var,UV_corr) in uv_ms_c_lst[:]:
gauss_sd_plev_lst_curr_it = []
for n_std in n_std_mat:
gauss_sd_plev_lst_curr_it.append(data_within_xsd_gauss_integ_val(U_mean = U_mean,U_var = U_var,V_mean = V_mean,V_var = V_var,UV_corr=UV_corr,n_std = n_std, plotting = False, verbose = False)[0])
gauss_sd_plev_lst.append(gauss_sd_plev_lst_curr_it)
gauss_sd_plev_mat = np.array(gauss_sd_plev_lst)
print('Stop Gaussian method',datetime.now())
return gauss_sd_plev_mat.T
def data_within_xsd_random_cnt(n_std_mat = np.arange(0,3,0.1),npnts = 1000, U_mean_mat = np.arange(-1.5,1.,0.5), V_mean_mat = np.arange(-1.5,2,0.5), U_var_mat = np.arange(0.25,1.5,0.25), V_var_mat = np.arange(0.25,1.5,0.25), UV_corr_mat = np.arange(-0.75,1.00,0.25)): # 1e4 = 10 mins, 1e3 = 2 mins
# To calculate the amount of data within an ellipse of size x std devs,
# we can create a random data with a bivariate normal distribution for a
# given set of means, variance and covariance (actually correlation).
#
# Here we cycle through a range of values mean, variance and covarinace
# (actually correlations) and apply data_within_xsd_gauss_integ_val to
# create an ensemble of results, to show that there is very little dependence
# on the shape and location of the ellipse.
uv_ms_c_lst = [(U_mean,U_var,V_mean,V_var,UV_corr) for U_mean in U_mean_mat for V_mean in V_mean_mat for U_var in U_var_mat for V_var in V_var_mat for UV_corr in UV_corr_mat]
uv_ms_c_mat =np.array(uv_ms_c_lst)
print('Start random method',datetime.now()) # 2min run time
rand_sd_plev_lst = []
for (U_mean,U_var,V_mean,V_var,UV_corr) in uv_ms_c_lst[:]:
rand_sd_plev_lst.append(data_within_xsd_random_cnt_val(U_mean = U_mean,U_var = U_var,V_mean = V_mean,V_var = V_var,UV_corr=UV_corr,npnts = npnts,n_std_mat = n_std_mat)[0])
rand_sd_plev_mat = np.array(rand_sd_plev_lst)
print('Stop random method',datetime.now()) # 2min run time
return rand_sd_plev_mat.T
###################################################################
if __name__ == "__main__":
main()
| 45.553892
| 297
| 0.700559
|
b1ab1e18a0d320ad1c67938a99877338a8f1515e
| 3,680
|
py
|
Python
|
tests/platonpm/_utils/test_registry_utils.py
|
shinnng/platon.py
|
3197fac3839896290210da04dd0d45f0bdc731ce
|
[
"MIT"
] | null | null | null |
tests/platonpm/_utils/test_registry_utils.py
|
shinnng/platon.py
|
3197fac3839896290210da04dd0d45f0bdc731ce
|
[
"MIT"
] | null | null | null |
tests/platonpm/_utils/test_registry_utils.py
|
shinnng/platon.py
|
3197fac3839896290210da04dd0d45f0bdc731ce
|
[
"MIT"
] | null | null | null |
import pytest
from platonpm.exceptions import (
PlatonPMValidationError,
)
from platonpm.validation.uri import (
validate_registry_uri,
)
@pytest.mark.parametrize(
"uri",
(
# no package id in uri
("erc1319://zeppelinos.platon"),
("erc1319://zeppelinos.platon:1"),
("erc1319://zeppelinos.platon:1/"),
("erc1319://packages.zeppelinos.platon"),
("erc1319://packages.zeppelinos.platon:1"),
("erc1319://packages.zeppelinos.platon:1/"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601:1"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601:1/"),
# with package id in uri
("erc1319://zeppelinos.platon/erc20/"),
("erc1319://zeppelinos.platon:1/erc20/"),
("erc1319://zeppelinos.platon:1/erc20//"),
("erc1319://zeppelinos.platon/erc20@1.0.0"),
("erc1319://zeppelinos.platon:1/erc20@1.0.0"),
("erc1319://zeppelinos.platon:1/erc20@1.0.0/"),
("erc1319://packages.zeppelinos.platon/erc20@"),
("erc1319://packages.zeppelinos.platon:1/erc20@"),
("erc1319://packages.zeppelinos.platon:1/erc20@/"),
("erc1319://packages.zeppelinos.platon/erc20@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/erc20@1.0.0/"),
("erc1319://packages.platon.platon/greeter@%3E%3D1.0.2%2C%3C2"),
("erc1319://packages.platon.platon:1/greeter@%3E%3D1.0.2%2C%3C2"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601/erc20@1.0.0"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601:1/erc20@1.0.0"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601:1/erc20@1.0.0/"),
("erc1319://0xd3CdA913deB6f67967B99D67aCDFa1712C293601:1/erc20@1.0.0/deployments/ERC139")
),
)
def test_is_registry_uri_validates(uri):
assert validate_registry_uri(uri) is None
@pytest.mark.parametrize(
"uri",
(
# invalid authority
("erc1319://zeppelinos.platon:333/erc20@1.0.0"),
("erc1319://packages.zeppelinos.com:1/erc20@1.0.0"),
("erc1319://package.manager.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319://packageszeppelinoseth:1/erc20@1.0.0"),
("erc1319://0xd3cda913deb6f67967b99d67acdfa1712c293601:1/erc20@1.0.0"),
# invalid package name
("erc1319://packages.zeppelinos.platon/@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/@1.0.0/"),
("erc1319://packages.zeppelinos.platon/!rc20?@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/!rc20?@1.0.0"),
("erc1319://packages.zeppelinos.platon:1/!rc20?@1.0.0/"),
# malformed
("erc1319packageszeppelinosetherc20@1.0.0"),
("erc1319:packages.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319:packages.zeppelinos.platon:1/erc20@1.0.0/"),
("erc1319:/packages.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319:/packages.zeppelinos.platon:1/erc20@1.0.0/"),
("erc1319/packages.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319//packages.zeppelinos.platon:1/erc20@1.0.0"),
("erc1319packages.zeppelinos.platon:1/erc20@1.0.0"),
# wrong scheme
("http://packages.zeppelinos.platon:1/erc20@1.0.0"),
("ercXX://packages.zeppelinos.platon:1/erc20@1.0.0"),
# no path
("erc1319://"),
("1234"),
),
)
def test_is_registry_uri_raises_exception_for_invalid_uris(uri):
with pytest.raises(PlatonPMValidationError):
validate_registry_uri(uri)
| 43.294118
| 97
| 0.63913
|
8fc1fca748815b002b23b5bed79698a778a5feac
| 15,554
|
py
|
Python
|
core/language_core.py
|
macwinlin-studio/GithuT
|
fc00080f9d65f585a78cebca936fb53a7441f718
|
[
"Apache-2.0"
] | null | null | null |
core/language_core.py
|
macwinlin-studio/GithuT
|
fc00080f9d65f585a78cebca936fb53a7441f718
|
[
"Apache-2.0"
] | null | null | null |
core/language_core.py
|
macwinlin-studio/GithuT
|
fc00080f9d65f585a78cebca936fb53a7441f718
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sqlite3
from os.path import isfile as os_path_isfile
from os.path import isdir as os_path_isdir
from os import rmdir as os_rmdir
from locale import getdefaultlocale as getlanguage
from platform import platform
from os import popen as os_popen
# project: GitHub Tools Language Core
# file: language_core.py
# author: MacWinLin Studio CGK Team
# email: githut@macwinlin.ml
# version: 22w24a
# Publish only on GitHub and MacWinLin Studio's GitLab.
# Copyright 2022 MacWinLin Studio.All rights reserved.
# Code
# Define Read Language
def readLanguage():
con = sqlite3.connect('.mwl-githut-data.db')
cur = con.cursor()
cur.execute('select * from data')
cache = cur.fetchone()
cur.close()
con.close()
return cache[1]
# Define Create Databese
def cdatabase():
con = sqlite3.connect('.mwl-githut-data.db')
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS data(id INTEGER PRIMARY KEY,language TEXT NOT NULL DEFAULT 'en-us',htoken INTEGER NOT NULL DEFAULT 0,token TEXT,alogin INTEGER NOT NULL DEFAULT 0,developM INTEGER NOT NULL DEFAULT 0,version TEXT NOT NULL DEFAULT 'a0.2-22w23a',updateT INTEGER NOT NULL DEFAULT 1,feedback INTEGER NOT NULL DEFAULT 0,feedback_admin TEXT,feedback_link TEXT NOT NULL DEFAULT 'https://githut.macwinlin.ml/feedback.json',clearToken INTEGER NOT NULL DEFAULT 0,ups TEXT NOT NULL DEFAULT 'https://githut.macwinlin.ml/update',debug INTEGER NOT NULL DEFAULT 0)")
cur.execute("INSERT INTO data values (1,'en-us',0,'123456',0,0,'a0.2-22w24a',1,0,'123456','https://githut.macwinlin.ml',0,'https://githut.macwinlin.ml/update',0)")
con.commit()
# Change Default Language,Only Support Simplified Chinese,Can Add
if 'zh' in getlanguage()[0]:
cur.execute("UPDATE data SET language='zh-cn' WHERE id=1")
con.commit()
cur.close()
con.close()
if 'Windows' in platform():
os_popen('attrib +H .mwl-githut-data.db')
# Define Read Database
def rdatabase():
return ['id','language','htoken','token','alogin','developM','version','updateT','feedback','feedback_admin']
# Create Database
if not os_path_isfile('.mwl-githut-data.db'):
if os_path_isdir('.mwl-githut-data.db'):
os_rmdir('.mwl-githut-data.db')
cdatabase()
# Language Function
class languageC():
def __init__(self):
# Add Language Please Change This List
# Language Module Won't Load Language,Please Use Reload Function To Load Language
self.lList = ['zh-cn','en-us']
def reload(self):
if readLanguage() == 'zh-cn':
self.aboutPMT = '关于程序'
self.aboutPMTgtv = '版本:alpha 0.2'
self.aboutPMTpgv = 'PyGithub版本:1.55'
self.aboutPMTpyv = '上次测试Python版本:3.9.1'
self.aboutPMTgithub = '打开GitHub仓库,请使用githut -po'
self.aboutAMT = '关于作者'
self.aboutAMTauthor = '作者:MacWinLin工作室'
self.aboutAMTmainD = '主开发者:Xinxin2021'
self.aboutAMTverN = '版本更新时间:2022年5月21日'
self.aboutAMTblog = '打开MWL工作室博客,请使用githut -ao'
self.githutH = '''您可以使用这些命令:
-a | --author 关于作者
-ao | --author-open 打开MacWinLin工作室博客
-p | --program 关于程序
-po | --program-open 打开GitHub仓库
-l | --license 查看开源协议
-c | --copyright 查看版权信息'''
self.help = '''你可以使用这些命令:
help 帮助
githut 关于
config 配置信息
login 登录
redata 重新生成数据库
backup 备份数据库
create 创建任何
import 导入备份
feedback 反馈至CGK团队'''
self.peToken = '请输入Token!'
self.hToken = '请先删除Token。'
self.nhToken = '请先添加Token。'
self.configH = '''您可以使用这些命令:
language <language> 更改GithuT语言
token [add | remove] <github-token> 更改GitHub Token
autologin [-y/--yes | -n/--no] 启用/禁用自动登录
develop [-y/--yes | -n/--no] 启用/禁用开发模式
feedback <link> 更改反馈服务器链接
ct [-y/--yes | -n/--no] 退出前清除Token
update [-y/--yes | -n/--no] 启用/禁用自动检查更新
UPS <link> 更改更新服务器链接'''
self.licenseE = '无法加载协议,请确认您连接到网络。'
self.accountH = '''您可以使用这些命令:
-u | --user 设置账号类型为个人
-o | --organization 设置账号类型为组织'''
self.iso = '账号类型已设置为组织。'
self.isu = '账号类型已设置为个人。'
self.terror = '无法登陆,请确认您的Token正确!'
self.loginS = '登录成功!'
self.loginSU = '用户名:'
self.Lname = '。名称:'
self.alY = '自动登录已启用!'
self.alN = '自动登录已禁用!'
self.NalY = '请先禁用自动登录!'
self.NalN = '请先启用自动登录!'
self.alogin = '开始自动登录!'
self.rdata = '已删除数据库文件!'
self.adata = '已重新生成数据库!'
self.errorInfo = '错误信息:'
self.ploginT = '请先登录!'
self.cerror = '命令错误。'
self.notc = '不是命令。'
self.createH = '''您可以使用以下命令:
repo <RepoName> 创建GitHub仓库'''
self.crepoE = '无法新建仓库!'
self.crepoS = '仓库新建成功!'
self.drepoE = '无法删除仓库!'
self.drepoS = '仓库删除成功!'
self.dmY = '开发模式已启用!'
self.dmN = '开发模式已禁用!'
self.NdmY = '请先禁用开发模式!'
self.NdmN = '请先启用开发模式!'
self.deleteH = '''您可以使用以下命令:
repo <RepoName> 删除GitHub仓库'''
self.couldtGetFeedbackServerInfo = '无法获取反馈服务器信息。'
self.feedbackServerClose = '反馈服务器已关闭。'
self.notSupportIPv6 = '您的电脑不支持IPv6。'
self.feedbackGetAdmin = '请输入您的反馈用户名:'
self.feedbackGetPassword = '请输入您的反馈密码:'
self.couldtLogin = '无法登录。'
self.loginSuccess = '登录成功。'
self.passwordError = '密码错误,请输入正确的密码。'
self.userNotFound = '此用户不存在。'
self.serverError = '反馈服务器错误。'
self.unknownError = '未知错误。'
self.feedbackType = '请选择反馈类型(bug/warn/debug):'
self.feedbackInfo = '请输入反馈信息(单独输入":w"以退出):\n'
self.couldtFeedback = '无法反馈至"'
self.feedbackSuccess = '反馈成功。'
self.blocked = '您的反馈账号已被封禁。'
self.ctY = '清除Token已启用!'
self.ctN = '清除Token已禁用!'
self.NctY = '请先禁用清除Token!'
self.NctN = '请先启用清除Token!'
self.acuY = '自动检查更新已启用!'
self.acuN = '自动检查更新已禁用!'
self.NacuY = '请先禁用自动检查更新!'
self.NacuN = '请先启用自动检查更新!'
else:
# About Program Part
self.aboutPMT = 'About Program'
self.aboutPMTgtv = 'Version:alpha 0.2'
self.aboutPMTpgv = 'PyGithub Version:1.55'
self.aboutPMTpyv = 'Last Test Python Version:3.9.1'
self.aboutPMTgithub = 'Open GitHub Repository,Please Use githut -po'
# About Author Part
self.aboutAMT = 'About Author'
self.aboutAMTauthor = 'Author:MacWinLin Studio'
self.aboutAMTmainD = 'Main Developer:Xinxin2021'
self.aboutAMTverN = 'Version Updated Time:May 21, 2022'
self.aboutAMTblog = 'Open MWL Studio\'s Blog,Please Use githut -am'
# Githut Helper Text
self.githutH = '''You can use these command:
-a | --author About Author
-ao | --author-open Open MacWinLin Studio's Blog
-p | --program About Program
-po | --program-open Open GitHub Repository
-l | --license See This Project's License
-c | --copyright See This Project's Copyright'''
# Global Helper Text
self.help = '''You can use these command:
help Helper
githut About
config Config Information
login Login GitHub Account
redata Rebuild Database
backup Backup Database
create Create Any
import Import Backup
feedback Feedback to MWL CGK Team'''
# About Token Text
self.peToken = 'Please enter token!'
self.hToken = 'Please delete the token first.'
self.nhToken = 'Please add the token first.'
self.configH = '''You can use these command:
language <language> Change GithuT Language
token [add | remove] <github-token> Change GitHub Token
autologin [-y/--yes | -n/--no] Enable/Disable Autologin
develop [-y/--yes | -n/--no] Enable/Disable Develop Mode
feedback <link> Change Feedback Server Link
ct [-y/--yes | -n/--no] Clear Token In Exit Program
update [-y/--yes | -n/--no] Enable/Disable Auto Check Update
UPS <link> Change Update Server
debug [-y/--yes | -n/--no] Enable/Disable Debug Mode'''
self.licenseE = 'Could\'t load license,please confirm you connect the network.'
self.accountH = '''You can use these command:
-u | --user Set The Account Type To Individual
-o | --organization Set The Account Type To Organization'''
# Set Account Type
self.iso = 'The Account Type Is Set To Organization.'
self.isu = 'The Account Type Is Set To Individual.'
# Login
self.terror = 'Could\'t login,please confirm your token is right!'
self.loginS = 'Login successful!'
self.loginSU = 'Username:'
self.Lname = '.Name:'
# Autologin
self.alY = 'Autologin is enable now!'
self.alN = 'Autologin is disable now!'
self.NalY = 'Please disable autologin first!'
self.NalN = 'Please enable autologin first!'
self.alogin = 'Autologin started!'
# Rebuild Database
self.rdata = 'Removed database file!'
self.adata = 'Rebuilded database!'
# All
self.errorInfo = 'Error Info:'
self.ploginT = 'Please login first!'
self.cerror = 'Command error.'
self.notc = ' is not a command.'
# Create
self.createH = '''You can use these command:
repo <RepoName> Create GitHub Repository'''
# =Repo
self.crepoE = 'Could\'t create repository.'
self.crepoS = 'The repository was created successfully!'
# Develop Mode
self.dmY = 'Develop mode is enable now!'
self.dmN = 'Develop mode is disable now!'
self.NdmY = 'Please disable develop mode first!'
self.NdmN = 'Please enable develop mode first!'
# Delete
self.deleteH = '''You can use these command:
repo <RepoName> Delete GitHub Repository'''
# =Repo
self.drepoE = 'Could\'t delete repository.'
self.drepoS = 'The repository was deleted successfully!'
# Feedback
self.couldtGetFeedbackServerInfo = 'Could\'t get feedback server information.'
self.feedbackServerClose = 'Feedback server is closed.'
self.notSupportIPv6 = 'Your computer not support IPv6.'
self.feedbackGetAdmin = 'Please enter your feedback username:'
self.feedbackGetPassword = 'Please enter your feedback passowrd:'
self.couldtLogin = 'Could\'t login.'
self.loginSuccess = 'Login successful.'
self.passwordError = 'Password error,please enter right password.'
self.userNotFound = 'This user is not exist.'
self.serverError = 'Feedback server error.'
self.unknownError = 'Unknown error.'
self.feedbackType = 'Please select feedback type(bug/warn/debug):'
self.feedbackInfo = 'Please enter feedback info(alone enter ":w" to exit):\n'
self.couldtFeedback = 'Could\'t feedback to "'
self.feedbackSuccess = 'Feedback successful.'
self.blocked = 'Your feedback account was blocked.'
# Clear Token
self.ctY = 'Clear token is enable now!'
self.ctN = 'Clear token is disable now!'
self.NctY = 'Please disable clear token first!'
self.NctN = 'Please enable clear token first!'
# Auto Check Update
self.acuY = 'Auto check update is enable now!'
self.acuN = 'Auto check update is disable now!'
self.NacuY = 'Please disable auto check update first!'
self.NacuN = 'Please enable auto check update first!'
# Debug Mode
self.debugY = 'Debug mode is enable now!'
self.debugN = 'Debug mode is disable now!'
self.NdebugY = 'Please disable debug mode first!'
self.NdebugN = 'Please enable debug mode first!'
class BackupLanguage():
def __init__(self):
self.lList = ['zh-cn','en-us']
def reload(self):
if readLanguage() == 'zh-cn':
self.replace = '你想覆盖原来的文件吗?(y/n)'
self.replaceE = '请输入正确的选项!'
self.filename = '保存成功。文件:'
self.errorInfo = '错误信息:'
self.openE = '打开备份文件错误!'
self.path = '请输入备份文件路径:(示例:/home/githut/GithuT/backup-2022-4-23-305931.backup)'
self.pathE = '路径错误,请输入正确的路径!'
self.isBackup = '√ 它是一个备份文件。'
self.structrue = '√ 备份文件结构正确。'
self.itCanUse = '√ 它有备份数据。'
self.backupC = '备份已取消。'
self.canOpen = '√ 备份文件已打开。'
self.cantOpen = '× 备份文件无法打开。'
self.numberE = '× 它的文件数量不为2。'
self.structrueE = '× 备份文件结构错误。'
self.sha256E = '× 无法验证备份文件。'
self.jsonE = '× 无法加载备份数据。'
self.json = '√ 加载备份数据。'
self.readE = '× 无法读取数据。'
self.read = '√ 读取备份数据。'
self.verE = '× 备份文件版本低。'
self.ver = '√ 支持这个备份文件版本。'
self.importS = '√ 导入备份文件至数据库。'
self.importE = '× 无法导入备份文件至数据库。'
self.lenE = '× 备份文件数据过大/过小。'
else:
self.replace = 'Do you want to replace the previous?(y/n)'
self.replaceE = 'Please enter right option!'
self.filename = 'Save successfully.File:'
self.errorInfo = 'Error Info:'
self.openE = 'Open backup file error!'
self.path = 'Please enter backup file path:(e.g. /home/githut/GithuT/backup-2022-4-23-305931.backup)'
self.pathE = 'Path error,please enter right path!'
self.backupC = 'Backup was cancelled.'
self.canOpen = '√ The backup file can open.'
self.cantOpen = '× The backup file could\'t open.'
self.isBackup = '√ It\'s a backup file.'
self.structrue = '√ The backup file structure is correct.'
self.itCanUse = '√ It have backup datas.'
self.numberE = '× Its file number not is two.'
self.structrueE = '× The backup file structrue error.'
self.sha256E = '× Could\'t verify backup file.'
self.jsonE = '× Could\'t load backup datas.'
self.json = '√ Loaded backup datas.'
self.readE = '× Could\'t read datas.'
self.read = '√ Read backup datas.'
self.verE = '× Backup file version is low.'
self.ver = '√ Support this backup file version.'
self.importS = '√ Imported backup file to database.'
self.importE = '× Could\'t import backup file to database.'
self.lenE = '× Backup file data number is big/small.'
class UpdateLanguage():
def __init__(self):
self.lList = ['zh-cn','en-us']
def reload(self):
if readLanguage() == 'zh-cn':
self.haveNew = '有新版本,是否安装?(y/n)'
self.downloadE = '无法获取更新。'
else:
self.haveNew = 'Have new version,is it install?(y/n)'
self.downloadE = 'Could\'t get update.'
| 45.479532
| 577
| 0.577986
|
0ac49721987d44c1e81cab0aae66fe6470c2d844
| 1,692
|
py
|
Python
|
adminmgr/media/code/A3/task3/BD_114_345_411_727_XQgmU0b.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/A3/task3/BD_114_345_411_727_XQgmU0b.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/A3/task3/BD_114_345_411_727_XQgmU0b.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
import findspark
findspark.init()
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
import requests
def splitHashtags(hashtags):
for i in hashtags:
yield (i,1)
def sprint(p):
if not p.isEmpty():
out=p.collect()
j=0
top5=""
for i in out:
if(j<5):
top5=top5+str(i[0])+","#+str(i[1])+" "
j=j+1
else:
break
top5=top5[:-1]
print(top5)
conf=SparkConf()
conf.setAppName("BigData")
sc=SparkContext(conf=conf)
ssc=StreamingContext(sc,1)
ssc.checkpoint("~/Downloads/checkpoint_BIGDATA")
dataStream=ssc.socketTextStream("localhost",9009)
hashTags=dataStream.flatMap(lambda x: splitHashtags(x.split(';')[7].split(','))).\
filter(lambda y: len(y[0])>=1)
hashTagsCount=hashTags.\
reduceByKeyAndWindow(lambda agg,obj:agg+obj,None,int(sys.argv[1]),int(sys.argv[2]))
commonHashTags=hashTagsCount.\
transform(lambda rdd: rdd.sortBy(lambda x: x[0]))\
.map(lambda x: (x[1],x[0]))\
.transform(lambda rdd: rdd.sortByKey(False))\
.map(lambda y:(y[1],y[0]))
'''
commonHashTags=hashTagsCount.\
transform(lambda rdd: rdd.sortBy(lambda x:x[0].swapcase()))\
.map(lambda x: (x[1],x[0]))\
.transform(lambda rdd: rdd.sortByKey(False))\
.map(lambda y:(y[1],y[0]))
'''
commonHashTags.foreachRDD(sprint)
ssc.start()
ssc.awaitTermination(25)
ssc.stop()
| 29.172414
| 91
| 0.565603
|
42ba5c247744521660ea54e804287b7a1b7b72ab
| 287
|
py
|
Python
|
torch_tools/models/util/lambda_fn.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
torch_tools/models/util/lambda_fn.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
torch_tools/models/util/lambda_fn.py
|
gregunz/TorchTools
|
19a33f2e4cd38f86b74bd732949516df66f9e24f
|
[
"MIT"
] | null | null | null |
from torch import nn, Tensor
class Lambda(nn.Module):
def __init__(self, lambda_fn, *modules):
super().__init__()
self.lambda_fn = lambda_fn
self.modules = nn.ModuleList(modules)
def forward(self, x: Tensor) -> Tensor:
return self.lambda_fn(x)
| 23.916667
| 45
| 0.644599
|
517ac628d27ce111b6dee99a810dabab81af6b0d
| 1,174
|
py
|
Python
|
third_party/gpus/compress_find_cuda_config.py
|
storypku/tensorflow
|
71fdb8266f5c0b099993b09e616a8eddeb664f04
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/compress_find_cuda_config.py
|
storypku/tensorflow
|
71fdb8266f5c0b099993b09e616a8eddeb664f04
|
[
"Apache-2.0"
] | null | null | null |
third_party/gpus/compress_find_cuda_config.py
|
storypku/tensorflow
|
71fdb8266f5c0b099993b09e616a8eddeb664f04
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
| 31.72973
| 80
| 0.691652
|
ef9bbded2a4a7a5c4f6824e261e2d8ef26909b96
| 48,359
|
py
|
Python
|
tensorflow/contrib/lite/python/lite_test.py
|
caszkgui/tensorflow
|
2db20be49c660a0c475cb57fe0935791d66433ed
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/lite/python/lite_test.py
|
caszkgui/tensorflow
|
2db20be49c660a0c475cb57fe0935791d66433ed
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/lite/python/lite_test.py
|
caszkgui/tensorflow
|
2db20be49c660a0c475cb57fe0935791d66433ed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.post_training_quantize = True
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFlexMode(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.converter_mode = lite.ConverterMode.TOCO_FLEX_ALL
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Flex delegate before inference.',
str(error.exception))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location interally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
def testFloatTocoConverter(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self):
keras.backend.clear_session()
def _getSequentialModel(self):
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testSequentialModel(self):
"""Test a Sequential tf.keras model with default inputs."""
keras_file = self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
keras_file = self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
keras_file = self._getSequentialModel()
# Passing in shape of invalid input array has no impact as long as all input
# arrays have a shape.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'invalid-input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
keras_file = self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testFunctionalModel(self):
"""Test a Functional tf.keras model with default inputs."""
with session.Session().as_default():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
with session.Session().as_default():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
os.remove(keras_file)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(2, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelTocoConverter(self):
"""Test a Sequential tf.keras model with deprecated TocoConverter."""
keras_file = self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
if __name__ == '__main__':
test.main()
| 41.226769
| 105
| 0.687876
|
d4e1f9d735a4cdce9505227516f57a581ec32e6e
| 24,092
|
py
|
Python
|
python/mnrl.py
|
Agnishom/mnrl
|
11c94ab3c984971a87fce5fc3fb0274dc007e5ff
|
[
"BSD-3-Clause"
] | 8
|
2017-06-06T19:55:20.000Z
|
2021-11-14T16:55:43.000Z
|
python/mnrl.py
|
Agnishom/mnrl
|
11c94ab3c984971a87fce5fc3fb0274dc007e5ff
|
[
"BSD-3-Clause"
] | null | null | null |
python/mnrl.py
|
Agnishom/mnrl
|
11c94ab3c984971a87fce5fc3fb0274dc007e5ff
|
[
"BSD-3-Clause"
] | 4
|
2017-08-03T18:06:18.000Z
|
2021-06-23T18:22:23.000Z
|
# Kevin Angstadt
# angstadt {at} umich.edu
# University of Virginia
#
# Python objects for manipulating MNRL files
import json
import mnrlerror
import jsonschema
import os
def loadMNRL(filename):
with open(os.path.dirname(os.path.abspath(__file__))+"/mnrl-schema.json", "r") as s:
schema = json.load(s)
with open(filename, "r") as f:
json_string = f.read()
try:
jsonschema.validate(json.loads(json_string),schema)
except jsonschema.exceptions.ValidationError as e:
print "ERROR:", e
return None
# parse into MNRL
d = MNRLDecoder()
return d.decode(json_string)
class MNRLDefs(object):
(ENABLE_ON_ACTIVATE_IN,
ENABLE_ON_START_AND_ACTIVATE_IN,
ENABLE_ALWAYS,
ENABLE_ON_LAST,
TRIGGER_ON_THRESHOLD,
HIGH_ON_THRESHOLD,
ROLLOVER_ON_THRESHOLD) = range(7)
H_STATE_INPUT = STATE_INPUT = "i"
H_STATE_OUTPUT = UP_COUNTER_OUTPUT = BOOLEAN_OUTPUT = "o"
UP_COUNTER_COUNT = "cnt"
UP_COUNTER_RESET = "rst"
BOOLEAN_TYPES = {
'and': 1,
'or': 1,
'nor': 1,
'not': 1,
'nand': 1
}
@staticmethod
def fromMNRLEnable(en):
if en == "onActivateIn":
return MNRLDefs.ENABLE_ON_ACTIVATE_IN
elif en == "onStartAndActivateIn":
return MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN
elif en == "onLast":
return MNRLDefs.ENABLE_ON_LAST
elif en == "always":
return MNRLDefs.ENABLE_ALWAYS
else:
raise mnrlerror.EnableError(en)
@staticmethod
def fromMNRLReportEnable(en):
if en == "always":
return MNRLDefs.ENABLE_ALWAYS
elif en == "onLast":
return MNRLDefs.ENABLE_ON_LAST
else:
raise mnrlerror.ReportEnableError(en)
@staticmethod
def toMNRLEnable(en):
if en == MNRLDefs.ENABLE_ON_ACTIVATE_IN:
return "onActivateIn"
elif en == MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN:
return "onStartAndActivateIn"
elif en == MNRLDefs.ENABLE_ALWAYS:
return "always"
elif en == MNRLDefs.ENABLE_ON_LAST:
return "onLast"
else:
raise mnrlerror.EnableError(en)
@staticmethod
def toMNRLReportEnable(en):
if en == MNRLDefs.ENABLE_ALWAYS:
return "always"
elif en == MNRLDefs.ENABLE_ON_LAST:
return "onLast"
else:
raise mnrlerror.ReportEnableError(en)
@staticmethod
def fromMNRLCounterMode(m):
if m == "trigger":
return MNRLDefs.TRIGGER_ON_THRESHOLD
elif m == "high":
return MNRLDefs.HIGH_ON_THRESHOLD
elif m == "rollover":
return MNRLDefs.ROLLOVER_ON_THRESHOLD
else:
raise mnrlerror.UpCounterModeError(m)
@staticmethod
def toMNRLCounterMode(m):
if m == MNRLDefs.TRIGGER_ON_THRESHOLD:
return "trigger"
elif m == MNRLDefs.HIGH_ON_THRESHOLD:
return "high"
elif m == MNRLDefs.ROLLOVER_ON_THRESHOLD:
return "rollover"
else:
raise mnrlerror.UpCounterModeError(m)
class MNRLNetwork(object):
"""Represents the top level of a MNRL file."""
def __init__(self, id):
"""Create a MNRL Network with an id set to 'id'"""
self.id = id
self.nodes = dict()
self._nodes_added = 0
def toJSON(self):
return json.dumps({
'id' : self.id,
'nodes' : [json.loads(e.toJSON()) for _,e in self.nodes.iteritems()]
})
def exportToFile(self, filename):
"""Save the MNRL Network to filename"""
with open(filename,"w") as f:
json.dump(json.loads(self.toJSON()), f, indent=2)
def getNodeById(self, id):
"""Return the element from the MNRL network with the given ID"""
try:
return self.nodes[id]
except KeyError:
raise mnrlerror.UnknownNode(id)
def addNode(self,theNode):
"""Add a MNRL Node object to the Network. Note that this may assign an
ID to the node if none exists."""
theNode.id = self._getUniqueNodeId(theNode.id)
self.nodes[theNode.id] = theNode
return theNode
def addState(self,
outputSymbols,
enable = MNRLDefs.ENABLE_ON_ACTIVATE_IN,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
reportId = None,
latched = False,
attributes = {}
):
"""Create a state, add it to the network, and return it"""
id = self._getUniqueNodeId(id)
state = State(outputSymbols, enable=enable, id=id, report=report, reportEnable=reportEnable, reportId=reportId, latched=latched, attributes=attributes)
self.nodes[id] = state
return state
def addHState(self,
symbols,
enable = MNRLDefs.ENABLE_ON_ACTIVATE_IN,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
reportId = None,
latched = False,
attributes = {}
):
"""Create a homogenous state, add it to the network, and return it"""
id = self._getUniqueNodeId(id)
hState = HState(symbols,enable=enable,id=id,report=report,reportEnable=reportEnable,reportId=reportId,latched=latched,attributes=attributes)
self.nodes[id] = hState
return hState
def addUpCounter(self,
threshold,
mode = MNRLDefs.HIGH_ON_THRESHOLD,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
reportId = None,
attributes = {}
):
"""Create an up counter, add it to the network, and return it"""
id = self._getUniqueNodeId(id)
new_counter = UpCounter(threshold, mode=mode, id=id, report=report,reportEnable=reportEnable, reportId=reportId, attributes=attributes)
self.nodes[id] = new_counter
return new_counter
def addBoolean(self,
booleanType,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
enable = MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN,
reportId = None,
attributes = {}
):
"""Create a Boolean node, add it to the network, and return it"""
id = self._getUniqueNodeId(id)
try:
number_of_ports = MNRLDefs.BOOLEAN_TYPES[booleanType]
except KeyError:
raise mnrlerror.InvalidGateType(booleanType)
boolean = Boolean(booleanType,portCount=number_of_ports,id=id,enable=enable,report=report,reportEnable=reportEnable,reportId=reportId,attributes=attributes)
self.nodes[id] = boolean
return boolean
def addConnection(self, source, destination):
"""Add a connection between node 'source' (id,port) and 'destination' (id,port)"""
(s_id,
s_port,
s_node,
s_output_width,
s_output,
d_id,
d_port,
d_node,
d_input_width,
d_input) = self.__getConnectionNodeInformation(source, destination)
if s_output_width != d_input_width:
raise mnrlerror.PortWidthMismatch(s_output_width, d_input_width)
s_output.append({
'id': d_id,
'portId': d_port
})
d_input.append({
'id': s_id,
'portId': s_port
})
def removeConnection(self, source, destination):
"""Remove a connection between 'source' (id,port) and 'destination'
(id,port). If no connection exists, do nothing."""
(s_id,
s_port,
s_node,
s_output_width,
s_output,
d_id,
d_port,
d_node,
d_input_width,
d_input) = self.__getConnectionNodeInformation(source, destination)
# remove the connection
try:
s_output.remove({
'id': d_id,
'portId': d_port
})
except ValueError:
pass # don't care
try:
d_input.remove({
'id': s_id,
'portId': s_port
})
except ValueError:
pass # don't care
def _getUniqueNodeId(self,id):
"""return a unique ID for the MNRL network. If an ID is passed in and is
unique, it will be returned."""
if id is None:
id = "_" + str(self._nodes_added)
self._nodes_added += 1
if id in self.nodes:
raise mnrlerror.MNRLDuplicateIdError('This MNRL id already exists: ' + id)
return id
def __getConnectionNodeInformation(self, source, destination):
try:
s_id, s_port = source
d_id, d_port = destination
except ValueError:
raise mnrlerror.InvalidConnection()
s_node = self.getNodeById(s_id)
d_node = self.getNodeById(d_id)
try:
s_output_width, s_output = s_node.outputDefs[s_port]
except KeyError:
raise mnrlerror.UnknownPort(s_id,s_port)
try:
d_input_width, d_input = d_node.inputDefs[d_port]
except KeyError:
raise mnrlerror.UnknownPort(d_id,d_port)
return (s_id,
s_port,
s_node,
s_output_width,
s_output,
d_id,
d_port,
d_node,
d_input_width,
d_input)
class MNRLNode(object):
def __init__(self,
id = None,
enable = MNRLDefs.ENABLE_ON_ACTIVATE_IN,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
inputDefs = [],
outputDefs = [],
attributes = {}
):
self.id = id
if enable not in [
MNRLDefs.ENABLE_ALWAYS,
MNRLDefs.ENABLE_ON_ACTIVATE_IN,
MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN,
MNRLDefs.ENABLE_ON_LAST
]:
raise mnrlerror.EnableError(enable)
self.enable = enable
self.report = report
if reportEnable not in [
MNRLDefs.ENABLE_ALWAYS,
MNRLDefs.ENABLE_ON_LAST
]:
raise mnrlerror.ReportEnableError(reportEnable)
self.reportEnable = reportEnable
#validate input ports
self.inputDefs = self.__validate_ports(inputDefs,"input")
#validate output ports
self.outputDefs = self.__validate_ports(outputDefs,"output")
self.attributes = attributes
def toJSON(self):
# define the enable string
enable_string = MNRLDefs.toMNRLEnable(self.enable)
# properly define input ports (drop the connections)
inputDefs = list()
for port_id,(width,_) in self.inputDefs.iteritems():
inputDefs.append({
'portId': port_id,
'width': width
})
# properly define output ports
outputDefs = list()
for port_id,(width,connection_list) in self.outputDefs.iteritems():
outputDefs.append({
'portId': port_id,
'width': width,
'activate': connection_list
})
dump_obj = {
'id' : self.id,
'report' : self.report,
'enable' : enable_string,
'inputDefs' : inputDefs,
'outputDefs' : outputDefs,
'attributes' : self.attributes
}
if self.reportEnable != MNRLDefs.ENABLE_ALWAYS:
dump_obj['reportEnable'] = MNRLDefs.toMNRLReportEnable(self.reportEnable)
return json.dumps(dump_obj)
def getOutputConnections(self):
"""Returns the output connections dict of portid => (width, conn_list)"""
return self.outputDefs
def getInputConnections(self):
"""Returns the input connections dict of portid => (width, conn_list)"""
return self.inputDefs
def __validate_ports(self,port_def,inout):
'''Returns a dictionary of ports. Keys are the port id's; each maps to a
width and list of connections tuple.'''
portDefs = dict()
try:
for port_id,width in port_def:
# check that the port_id is a string
if isinstance(port_id, basestring):
if port_id in portDefs:
raise mnrlerror.DuplicatePortId(port_id)
else:
if isinstance(width, int):
portDefs[port_id] = (width, [])
else:
raise mnrlerror.InvalidPortWidth(width)
else:
raise mnrlerror.PortIdError(port_id, "the ID is not a string")
except ValueError:
raise mnrlerror.PortDefError(inout)
return portDefs
class State(MNRLNode):
"""A state has one input port and multiple output ports. Output ports are
enabled by seaparate symbol sets"""
def __init__(self,
outputSymbols,
enable = MNRLDefs.ENABLE_ON_ACTIVATE_IN,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
latched = False,
reportId = None,
attributes = {}
):
symbolSet = dict()
# outputSymbols is a tuple:
# ("outputId","symbolSet")
outputDefs = []
try:
for output_id, symbol_set in outputSymbols:
if isinstance(output_id, basestring):
symbolSet[output_id] = symbol_set
outputDefs.append((output_id,1))
else:
raise mnrlerror.PortDefError("output")
except ValueError:
raise mnrlerror.InvalidStateOutputSymbols()
super(State,self).__init__(
id = id,
enable = enable,
report = report,
reportEnable = reportEnable,
inputDefs = [(MNRLDefs.H_STATE_INPUT,1)],
outputDefs = outputDefs,
attributes = attributes
)
self.reportId = reportId
self.latched = latched
self.outputSymbols = symbolSet
def toJSON(self):
j = json.loads(super(State, self).toJSON())
j.update({'type' : 'state'})
if self.reportId is not None:
j['attributes'].update({'reportId':self.reportId})
j['attributes'].update({
'latched' : self.latched,
'symbolSet' : self.outputSymbols
})
return json.dumps(j)
class HState(MNRLNode):
"""Object representation of a homogeneous state. A homogenous state only has
one input port and one output port."""
def __init__(self,
symbols,
enable = MNRLDefs.ENABLE_ON_ACTIVATE_IN,
id = None,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
latched = False,
reportId = None,
attributes = {}
):
super(HState, self).__init__(
id = id,
enable = enable,
report = report,
reportEnable = reportEnable,
inputDefs = [(MNRLDefs.H_STATE_INPUT,1)],
outputDefs = [(MNRLDefs.H_STATE_OUTPUT,1)],
attributes = attributes
)
self.latched = latched
self.reportId = reportId
self.symbols = symbols
def toJSON(self):
j = json.loads(super(HState, self).toJSON())
j.update({'type' : 'hState'})
if self.reportId is not None:
j['attributes'].update({'reportId':self.reportId})
j['attributes'].update({
'latched' : self.latched,
'symbolSet' : self.symbols
})
return json.dumps(j)
class UpCounter(MNRLNode):
def __init__(self,
threshold,
mode = MNRLDefs.HIGH_ON_THRESHOLD,
id = None,
report = False,
reportId = None,
attributes = {}
):
#validate that the threshold is a non-negative int
if not (isinstance(threshold, int) and threshold >= 0):
raise mnrlerror.UpCounterThresholdError(threshold)
#validate mode
if mode not in [
MNRLDefs.TRIGGER_ON_THRESHOLD,
MNRLDefs.HIGH_ON_THRESHOLD,
MNRLDefs.ROLLOVER_ON_THRESHOLD
]:
raise mnrlerror.UpCounterModeError(mode)
super(UpCounter,self).__init__(
id = id,
enable = MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN, #a counter is always active
report = report,
reportEnable = reportEnable,
inputDefs = [
(MNRLDefs.UP_COUNTER_COUNT, 1),
(MNRLDefs.UP_COUNTER_RESET, 1)
],
outputDefs = [
(MNRLDefs.UP_COUNTER_OUTPUT, 1)
],
attributes = attributes
)
self.reportId = reportId
self.threshold = threshold
self.mode = mode
def toJSON(self):
j = json.loads(super(UpCounter, self).toJSON())
j.update({'type' : 'upCounter'})
if self.reportId is not None:
j['attributes'].update({'reportId':self.reportId})
j['attributes'].update({
'mode' : MNRLDefs.toMNRLCounterMode(self.mode),
'threshold' : self.threshold,
})
return json.dumps(j)
class Boolean(MNRLNode):
def __init__(self,
gateType,
portCount = 1,
id = None,
enable = MNRLDefs.ENABLE_ON_START_AND_ACTIVATE_IN,
report = False,
reportEnable = MNRLDefs.ENABLE_ALWAYS,
reportId = None,
attributes = {}
):
if isinstance(gateType, basestring):
if not (isinstance(portCount, int) and portCount > 0):
raise mnrlerror.InvalidGatePortCount(portCount)
# seems semi-valid, let's create it
inputDefs = []
for i in range(portCount):
inputDefs.append(("b" + str(i),1))
super(Boolean, self).__init__(
id = id,
enable = enable,
report = report,
reportEnable = reportEnable,
inputDefs = inputDefs,
outputDefs = [(MNRLDefs.BOOLEAN_OUTPUT, 1)],
attributes = attributes
)
self.gateType = gateType
self.reportId = reportId
else:
raise mnrlerror.InvalidGateFormat()
def toJSON(self):
j = json.loads(super(Boolean, self).toJSON())
j.update({'type' : 'boolean'})
if self.reportId is not None:
j['attributes'].update({'reportId':self.reportId})
j['attributes'].update({
'gateType' : self.gateType,
})
return json.dumps(j)
class MNRLDecoder(json.JSONDecoder):
def decode(self, json_string):
default_obj = super(MNRLDecoder,self).decode(json_string)
# build up a proper MNRL representation
mnrl_obj = MNRLNetwork(default_obj['id'])
# build up the mnrl network in two passes
# 1. add all the nodes
# 2. add all the connections
for n in default_obj['nodes']:
# for each node in the network, add it to the network
if n['type'] == "state":
node = State(
n['attributes']['symbolSet'],
enable = MNRLDefs.fromMNRLEnable(n['enable']),
id = n['id'],
report = n['report'],
reportEnable = MNRLDefs.fromMNRLReportEnable(n.get('reportEnable', "always")),
latched = n['attributes']['latched'] if 'latched' in n['attributes'] else False,
reportId = n['attributes']['reportId'] if 'reportId' in n['attributes'] else None,
attributes = n['attributes']
)
elif n['type'] == "hState":
node = HState(
n['attributes']['symbolSet'],
enable = MNRLDefs.fromMNRLEnable(n['enable']),
id = n['id'],
report = n['report'],
reportEnable = MNRLDefs.fromMNRLReportEnable(n.get('reportEnable', "always")),
latched = n['attributes']['latched'] if 'latched' in n['attributes'] else False,
reportId = n['attributes']['reportId'] if 'reportId' in n['attributes'] else None,
attributes = n['attributes']
)
elif n['type'] == "upCounter":
node = UpCounter(
n['attributes']['threshold'],
mode = MNRLDefs.fromMNRLCounterMode(n['attributes']['mode']),
id = n['id'],
report = n['report'],
reportEnable = MNRLDefs.fromMNRLReportEnable(n.get('reportEnable', "always")),
reportId = n['attributes']['reportId'] if 'reportId' in n['attributes'] else None,
attributes = n['attributes']
)
elif n['type'] == "Boolean":
if n['attributes']['gateType'] not in MNRLDefs.BOOLEAN_TYPES:
raise mnrlerror.InvalidGateType(n['attributes']['gateType'])
node = Boolean(
n['attributes']['gateType'],
portCounte=MNRLDefs.BOOLEAN_TYPES[n['attributes']['gateType']],
id = n['id'],
enable = MNRLDefs.fromMNRLEnable(n['enable']),
report = n['report'],
reportEnable = MNRLDefs.fromMNRLReportEnable(n.get('reportEnable', "always")),
reportId = n['attributes']['reportId'] if 'reportId' in n['attributes'] else None,
attributes = n['attributes']
)
else:
# convert input defs into format needed for constructor
ins = list()
for k in n['inputDefs']:
ins.append((k['portId'],k['width']))
# convert output defs into format needed for constructor
outs = list()
for k in n['outputDefs']:
outs.append((k['portId'], k['width']))
node = MNRLNode(
id = n['id'],
enable = MNRLDefs.fromMNRLEnable(n['enable']),
report = n['report'],
reportEnable = MNRLDefs.fromMNRLReportEnable(n.get('reportEnable', "always")),
inputDefs = ins,
outputDefs = outs,
attributes = n['attributes']
)
# add the node to the network
mnrl_obj.addNode(node)
for n in default_obj['nodes']:
# for each node, add all the connections
for k in n['outputDefs']:
# for each output port
for c in k['activate']:
mnrl_obj.addConnection(
(n['id'],k['portId']),
(c['id'],c['portId'])
)
return mnrl_obj
| 33.461111
| 164
| 0.53358
|
3ff8778f3ad263a88a6ad0fbf5059949d9c7d727
| 855
|
py
|
Python
|
build/piman.app/pysnmp/proto/secmod/rfc3414/auth/base.py
|
jackgisel/team-athens
|
91e2aa810c0064f8b6b39ee53c3b05f037e0aeb0
|
[
"Apache-2.0"
] | null | null | null |
build/piman.app/pysnmp/proto/secmod/rfc3414/auth/base.py
|
jackgisel/team-athens
|
91e2aa810c0064f8b6b39ee53c3b05f037e0aeb0
|
[
"Apache-2.0"
] | null | null | null |
build/piman.app/pysnmp/proto/secmod/rfc3414/auth/base.py
|
jackgisel/team-athens
|
91e2aa810c0064f8b6b39ee53c3b05f037e0aeb0
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.proto import errind, error
class AbstractAuthenticationService(object):
serviceID = None
def hashPassphrase(self, authKey):
raise error.ProtocolError(errind.noAuthentication)
def localizeKey(self, authKey, snmpEngineID):
raise error.ProtocolError(errind.noAuthentication)
@property
def digestLength(self):
raise error.ProtocolError(errind.noAuthentication)
# 7.2.4.1
def authenticateOutgoingMsg(self, authKey, wholeMsg):
raise error.ProtocolError(errind.noAuthentication)
# 7.2.4.2
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
raise error.ProtocolError(errind.noAuthentication)
| 28.5
| 73
| 0.733333
|
2b339cf605facc99f1e0af6e3dd9b7481aac3a42
| 844
|
py
|
Python
|
bot/persist.py
|
dronesoc/dronebot
|
ed4bb8a8f774eb59ac0d9a3a7e7e3a4d8ded7264
|
[
"MIT"
] | null | null | null |
bot/persist.py
|
dronesoc/dronebot
|
ed4bb8a8f774eb59ac0d9a3a7e7e3a4d8ded7264
|
[
"MIT"
] | null | null | null |
bot/persist.py
|
dronesoc/dronebot
|
ed4bb8a8f774eb59ac0d9a3a7e7e3a4d8ded7264
|
[
"MIT"
] | null | null | null |
import requests
import os
import json
"""
Defines a connection to a Persist Key/Value Store
"""
class PersistStore:
def __init__(self):
self.base_url = 'https://beepboophq.com/api/v1/persist/kv'
self.token = os.getenv("BEEPBOOP_TOKEN", "")
def list_keys(self):
r = requests.get(self.base_url, headers={'Authorization': 'Bearer {}'.format(self.token)})
return r.json()
def set_value(self, key, value):
r = requests.put(self.base_url + '/{}'.format(key), headers={'Authorization': 'Bearer {}'.format(self.token)}, data=json.dumps({'value': value}))
if r.status_code != 200:
print r.text
def get_value(self, key):
r = requests.get(self.base_url + '/{}'.format(key), headers={'Authorization': 'Bearer {}'.format(self.token)})
return r.json()['value']
| 32.461538
| 153
| 0.626777
|
65752e430072e99d3cb3aaad99b6af5d0e83dd7e
| 1,025
|
py
|
Python
|
cyborg/accelerator/drivers/nic/intel/driver.py
|
NeCTAR-RC/cyborg
|
e0fab29181467c0c72667ea26a8b04c53b238ddb
|
[
"Apache-2.0"
] | 37
|
2017-03-23T02:10:35.000Z
|
2021-11-25T07:57:36.000Z
|
cyborg/accelerator/drivers/nic/intel/driver.py
|
openstack/nomad
|
8cd846a16c2da04c2f204b02b90db814e32acd63
|
[
"Apache-2.0"
] | null | null | null |
cyborg/accelerator/drivers/nic/intel/driver.py
|
openstack/nomad
|
8cd846a16c2da04c2f204b02b90db814e32acd63
|
[
"Apache-2.0"
] | 27
|
2017-07-14T02:26:24.000Z
|
2022-01-19T07:55:10.000Z
|
# Copyright 2020 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cyborg Intel NIC driver implementation.
"""
from cyborg.accelerator.drivers.nic.base import NICDriver
from cyborg.accelerator.drivers.nic.intel import sysinfo
class IntelNICDriver(NICDriver):
"""Class for Intel NIC drivers.
Vendor should implement their specific drivers in this class.
"""
VENDOR = "intel"
def __init__(self, *args, **kwargs):
pass
def discover(self):
return sysinfo.nic_tree()
| 29.285714
| 75
| 0.730732
|
67a0e3d96e072d7671b47efd1217891ad09dd9b8
| 4,866
|
py
|
Python
|
python/Client.py
|
Taren-Ko/Taren-Ko.github.io
|
7e5890e96113236577087145d96035c68d3cb265
|
[
"MIT"
] | null | null | null |
python/Client.py
|
Taren-Ko/Taren-Ko.github.io
|
7e5890e96113236577087145d96035c68d3cb265
|
[
"MIT"
] | null | null | null |
python/Client.py
|
Taren-Ko/Taren-Ko.github.io
|
7e5890e96113236577087145d96035c68d3cb265
|
[
"MIT"
] | null | null | null |
import sys
from socket import *
def mail_reader():
# Tests for proper number of arguments passed
# Then passes arguments to hostname and port for Socket
if len(sys.argv) != 3:
sys.stderr.write("Error: Too many or too few arguments.\n")
sys.stderr.write("QUIT\n")
sys.exit(0)
else:
hostname = sys.argv[1]
port = int(sys.argv[2])
# Asks for From Address and stores it if properly formatted
print("Enter From")
while True:
from_piece = raw_input()
if mail_addr_test(from_piece) == False:
print("Invalid Address, try again.")
else:
break
# Same as the last one but asks for Recipients.
# A comma separated list will be split up and placed into an array
# If any address is invalid we start over from the top of Enter Recipients
print("Enter recipients")
while True:
to_piece = raw_input()
to_set = to_piece.split(',')
to_total = len(to_set)
i = 0
to_bool = False
while i < to_total:
if mail_addr_test(to_set[i]) == False:
print("Invalid Address, try again.")
break
elif i == to_total-1:
to_bool = True
i = i+1
else:
i = i+1
if to_bool == True:
break
# Same as the last two but can not be invalid
print("Enter subject")
subject_piece = raw_input()
# Takes in the message until a <CRLF>.<CRLF>
# Stores these into data[]
print("Enter message. Terminate with a period on an empty line.")
i = 0
data = [None] * 1024
dataLen = 0
while True:
dataset = raw_input()
if dataset == ".":
dataLen = i
break
else:
data[i] = dataset
i = i+1
msglines = i
# From here the user has finished input so we start the socket to the Server
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((hostname, port))
# confirming response from server then sending a friendly HELO
response = clientSocket.recv(1024).decode()
# print(response)
if response == None:
clientSocket.close()
print("Error: Invalid hostname or port.")
sys.exit(0)
else:
clientSocket.send("HELO <cs.unc.edu>".encode())
response = clientSocket.recv(1024)
# Package the from response into a valid SMTP command and send to server
fromMsg = "MAIL FROM: <" + from_piece + ">"
clientSocket.send(fromMsg.encode())
response = clientSocket.recv(1024)
response = clientSocket.recv(1024).decode()
# print(response)
if response != "250 OK":
clientSocket.close()
print("Connection encountered SMTP Error: MAIL FROM")
sys.exit(0)
# Ditto for the recipients
i=0
while i<to_total:
toMsg = "RCPT TO: <" + to_set[i] + ">"
clientSocket.send(toMsg.encode())
response = clientSocket.recv(1024)
response = clientSocket.recv(1024).decode()
# print(response)
if response != "250 OK":
clientSocket.close()
print("Connection encountered SMTP Error: RCPT TO")
sys.exit(0)
else:
i= i+1
# DATA transmission begins. Start by formatting properly with From: To: Subject: \n
# After this send a part of data[], receive it back,
# repeat until finished then send the terminating character.
# response = clientSocket.recv(1024).decode()
clientSocket.send("DATA".encode())
response = clientSocket.recv(1024)
response = clientSocket.recv(1024).decode()
# print(response)
if response[0:3] != "354":
clientSocket.close()
print("Connection encountered SMTP Error: DATA")
sys.exit(0)
fromMsg = "From: " + from_piece
clientSocket.send(fromMsg.encode())
response = clientSocket.recv(1024)
# print(response)
toMsg = "To: " + to_piece
clientSocket.send(toMsg.encode())
response = clientSocket.recv(1024)
# print(response)
subMsg = "Subject: " + subject_piece
clientSocket.send(subMsg.encode())
response = clientSocket.recv(1024)
# print(response)
clientSocket.send(" ".encode())
response = clientSocket.recv(1024)
# print(response)
i=0
while i<dataLen:
# print(data[i])
clientSocket.send(data[i].encode())
response = clientSocket.recv(1024)
# print(response)
i= i+1
clientSocket.send(".".encode())
response = clientSocket.recv(1024)
response = clientSocket.recv(1024).decode()
# print(response)
# Final response received, if it's a 250 OK then we are done and exit
if response != "250 OK":
clientSocket.close()
print("Connection encountered SMTP Error: Data Termination")
sys.exit(0)
clientSocket.send("QUIT".encode())
clientSocket.close()
sys.exit(0)
def mail_addr_test(str):
"""Tests for a mailing address with a singular @ symbol"""
testcase = str
if testcase.find('<') != -1 or testcase.find('>') != -1:
return False
if testcase.find(' ') != -1:
return False
if testcase.find('@') == -1:
return False
else:
endpt = testcase.find('@')
if testcase[endpt+1:].find('@') != -1:
return False
else:
return True
while True:
try:
mail_reader()
except EOFError:
break
| 25.746032
| 84
| 0.672626
|
12a850a8564bdc3c0e7b934e6b4093f9a8fdcde4
| 509
|
py
|
Python
|
app_sirene/migrations/0029_auto_20191228_1401.py
|
cavaliba/sirene
|
ccbf67ba169fb65f7d51550bd86b6fe5a217ef63
|
[
"BSD-3-Clause"
] | null | null | null |
app_sirene/migrations/0029_auto_20191228_1401.py
|
cavaliba/sirene
|
ccbf67ba169fb65f7d51550bd86b6fe5a217ef63
|
[
"BSD-3-Clause"
] | null | null | null |
app_sirene/migrations/0029_auto_20191228_1401.py
|
cavaliba/sirene
|
ccbf67ba169fb65f7d51550bd86b6fe5a217ef63
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.0 on 2019-12-28 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_sirene', '0028_auto_20191228_1400'),
]
operations = [
migrations.RemoveField(
model_name='info',
name='hide',
),
migrations.AddField(
model_name='info',
name='visible',
field=models.BooleanField(default=True, verbose_name='Masquer'),
),
]
| 22.130435
| 76
| 0.577603
|
9f2ff455945bd337b35f3d27f0e6c6818d84c45f
| 42
|
py
|
Python
|
openmdao/__init__.py
|
garobed1/OpenMDAO
|
6b93aca2ce235a8ebdf2ba65a9b798f2a807c1a5
|
[
"Apache-2.0"
] | null | null | null |
openmdao/__init__.py
|
garobed1/OpenMDAO
|
6b93aca2ce235a8ebdf2ba65a9b798f2a807c1a5
|
[
"Apache-2.0"
] | null | null | null |
openmdao/__init__.py
|
garobed1/OpenMDAO
|
6b93aca2ce235a8ebdf2ba65a9b798f2a807c1a5
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '2.7.1'
INF_BOUND = 1.0E30
| 10.5
| 21
| 0.666667
|
8c4394e29ef3476e4d50a87586c9076e6253a2ab
| 499
|
py
|
Python
|
Books/GodOfPython/P00_OriginalSource/ch14/gui_07_2.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | 2
|
2020-12-05T07:42:55.000Z
|
2021-01-06T23:23:18.000Z
|
Books/GodOfPython/P00_OriginalSource/ch14/gui_07_2.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
Books/GodOfPython/P00_OriginalSource/ch14/gui_07_2.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
# gui_07_2.py
import tkinter
SMILE = """
#define smileywe_width 16
#define smiley_height 16
static unsigned char smiley_bits[] = {
0xc0, 0x07, 0x30, 0x18, 0x08, 0x20, 0x04, 0x40, 0x44, 0x44, 0x02, 0x80,
0x02, 0x80, 0x02, 0x80, 0x22, 0x88, 0x62, 0x8c, 0xc4, 0x47, 0x04, 0x40,
0x08, 0x20, 0x30, 0x18, 0xc0, 0x07, 0x00, 0x00};
"""
root = tkinter.Tk()
cvs = tkinter.Canvas(root, width=50, height=30)
cvs.pack()
img = tkinter.BitmapImage(data=SMILE)
cvs.create_image(25, 15, image=img)
root.mainloop()
| 26.263158
| 71
| 0.705411
|
5847f68d5c5a8bdb335ca8b1e11cba3bac26388e
| 4,369
|
py
|
Python
|
tools/cfgparser.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 308
|
2015-11-17T13:15:33.000Z
|
2022-03-24T12:03:40.000Z
|
tools/cfgparser.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 211
|
2015-11-16T15:27:41.000Z
|
2022-03-28T16:20:15.000Z
|
tools/cfgparser.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 80
|
2015-11-16T18:20:30.000Z
|
2022-03-02T12:47:56.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import sys
import time
# make amplify libs available
script_location = os.path.abspath(os.path.expanduser(__file__))
agent_repo_path = os.path.dirname(os.path.dirname(script_location))
agent_config_file = os.path.join(agent_repo_path, 'etc', 'agent.conf.development')
sys.path.append(agent_repo_path)
# setup agent config
from amplify.agent.common.context import context
context.setup(app='agent', config_file=agent_config_file)
context.app_config['daemon']['cpu_sleep'] = 0.0
from amplify.agent.objects.nginx.config.config import NginxConfig
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
def parse_args():
from argparse import ArgumentParser
parser = ArgumentParser(description='A tool for using the NGINX Amplify config parser')
parser.add_argument('-c', '--config', metavar='file', required=True, help='path to nginx config file')
group = parser.add_mutually_exclusive_group()
group.add_argument('--light', action='store_true', help='light parse (find all files)')
group.add_argument('--simple', action='store_true', help='print the simplified config')
group.add_argument('--dirmap', action='store_true', help='print directory and file map')
group.add_argument('--payload', action='store_true', help='print entire config payload')
group = parser.add_mutually_exclusive_group()
group.add_argument('--pretty', action='store_true', help='pretty print json payloads')
group.add_argument('-q', '--quiet', action='store_true', help='print only elapsed time')
args = parser.parse_args()
args.config = os.path.abspath(os.path.expanduser(args.config))
if not os.path.exists(args.config):
parser.error('config: No such file or directory')
return args
def main():
args = parse_args()
def dump(heading, *payloads):
if heading:
print '\033[32m{} for {}\033[0m'.format(heading, args.config)
for x in payloads:
if isinstance(x, dict) and args.pretty:
print json.dumps(x, indent=4, sort_keys=True)
elif isinstance(x, dict):
print json.dumps(x, separators=(',', ':'), sort_keys=True)
else:
print json.dumps(x) # never prettify print lists
print
start = time.time()
cfg = NginxConfig(filename=args.config)
if args.light:
structure = cfg.collect_structure(include_ssl_certs=True)
else:
cfg.full_parse()
runtime = time.time() - start
if args.quiet:
print 'Parsed in %s seconds' % runtime
return
if args.light:
dump(None, *structure)
elif args.simple:
dump(None, cfg.subtree)
elif args.dirmap:
dump('Config files', cfg.files)
dump('Config directories', cfg.directories)
dump('Config directory map', cfg.directory_map)
dump('Config errors', cfg.parser_errors)
elif args.payload:
cfg.run_ssl_analysis()
payload = {
'tree': cfg.tree,
'directory_map': cfg.directory_map,
'files': cfg.files,
'directories': cfg.directories,
'ssl_certificates': cfg.ssl_certificates,
'access_logs': cfg.access_logs,
'error_logs': cfg.error_logs,
'errors': {
'parser': len(cfg.parser_errors),
'test': len(cfg.test_errors)
}
}
dump(None, payload)
else:
cfg.run_ssl_analysis()
dump('Config tree', cfg.tree)
dump('Config files', cfg.files)
dump('Config directory map', cfg.directory_map)
dump('SSL certificates', cfg.ssl_certificates)
dump(
'Stub status/plus status/api urls',
cfg.stub_status_urls,
cfg.plus_status_external_urls,
cfg.plus_status_internal_urls,
cfg.api_external_urls,
cfg.api_internal_urls
)
dump('Access logs', cfg.access_logs)
dump('Error logs', cfg.error_logs)
dump('Log formats', cfg.log_formats)
dump('Config errors', cfg.parser_errors)
print '\033[32mParsed in %s seconds\033[0m' % runtime
if __name__ == '__main__':
main()
| 33.607692
| 106
| 0.642939
|
8eca3f84be810b3a58fff4a148b13b05b368ced3
| 3,030
|
py
|
Python
|
omok/gui/gui.py
|
NohGod/BMT_Projects
|
c8bb9dc06a1693db7f9e9eeb524f5ef307fac2b8
|
[
"MIT"
] | null | null | null |
omok/gui/gui.py
|
NohGod/BMT_Projects
|
c8bb9dc06a1693db7f9e9eeb524f5ef307fac2b8
|
[
"MIT"
] | null | null | null |
omok/gui/gui.py
|
NohGod/BMT_Projects
|
c8bb9dc06a1693db7f9e9eeb524f5ef307fac2b8
|
[
"MIT"
] | 1
|
2021-08-28T04:16:48.000Z
|
2021-08-28T04:16:48.000Z
|
from tkinter import Tk, Frame, Label, Button, PhotoImage
from omok.core.board import Board
from omok.gui.omokslot import OmokSlot
class GUI:
"""Omok GUI created with tkinter"""
status_text = {Board.BLACK_TURN: "Black's turn",
Board.BLACK_WIN: 'Black wins!',
Board.WHITE_TURN: "White's turn",
Board.WHITE_WIN: 'White wins!',
Board.DRAW: 'Draw!'}
res_path = 'omok/res/'
img_name = {Board.EMPTY_SLOT: 'empty.gif',
Board.BLACK_SLOT: 'black.gif',
Board.WHITE_SLOT: 'white.gif'}
def __init__(self, board, windowtitle='Omok'):
self.board = board
self.board.lock.acquire()
self.window = Tk()
self.window.title(windowtitle)
self.img = {}
for key, name in GUI.img_name.items():
self.img[key] = PhotoImage(file=GUI.res_path + name)
self.windowheight = self.board.height * self.img[Board.EMPTY_SLOT].height()
self.windowwidth = self.board.width * self.img[Board.EMPTY_SLOT].width()
self.window.geometry(str(self.windowwidth+1) + 'x' + str(self.windowheight+68) + '+100+100')
self.window.resizable(True, True)
self.labelframe = Frame(self.window, height=20, bd=0)
self.labelframe.pack(side='top', fill='x')
self.resetbutton = Button(self.labelframe, text='Reset', font=("Arial", 25), command=self.board.reset)
self.resetbutton.pack(side='left', fill='y')
self.statuslabel = Label(self.labelframe, text=GUI.status_text[self.board.status], font=("Arial", 25), height=0, width=0)
self.statuslabel.pack(side='right', fill='y')
self.gameframe = Frame(self.window, bd=0)
self.gameframe.pack(expand=True, fill='both')
self.board_gui = []
for i in range(self.board.height):
self.board_gui.append([])
for j in range(self.board.width):
self.board_gui[i].append(OmokSlot(self.gameframe, i=i, j=j, bd=0, padx=0, pady=0,
image=self.img[self.board.board[i][j]],
height=self.img[self.board.board[i][j]].height(),
width=self.img[self.board.board[i][j]].width()))
self.board_gui[i][j].bind('<Button-1>', lambda x: self.board.place(x.widget.i, x.widget.j))
self.board_gui[i][j].grid(row=i, column=j)
self.board.load_gui(self)
self.board.lock.release()
self.window.mainloop()
def update(self, i=None, j=None):
self.statuslabel['text'] = GUI.status_text[self.board.status]
if i == None or j == None:
for i in range(self.board.height):
for j in range(self.board.width):
self.board_gui[i][j]['image'] = self.img[self.board.board[i][j]]
else:
self.board_gui[i][j]['image'] = self.img[self.board.board[i][j]]
| 44.558824
| 129
| 0.568317
|
7cf7888816e4558607b47a718b63fd3a1e6d39a8
| 14,133
|
py
|
Python
|
contrib/dummy_tv.py
|
Tigger2014/async_upnp_client
|
fa10aac4a98290559b4c258e23ba31d7b1177d5f
|
[
"Apache-2.0"
] | null | null | null |
contrib/dummy_tv.py
|
Tigger2014/async_upnp_client
|
fa10aac4a98290559b4c258e23ba31d7b1177d5f
|
[
"Apache-2.0"
] | null | null | null |
contrib/dummy_tv.py
|
Tigger2014/async_upnp_client
|
fa10aac4a98290559b4c258e23ba31d7b1177d5f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Dummy TV supporting DLNA/DMR."""
# Instructions:
# - Change `SOURCE``. When using IPv6, be sure to set the scope_id, the last value in the tuple.
# - Run this module.
# - Run upnp-client (change IP to your own IP):
# upnp-client call-action 'http://0.0.0.0:8000/device.xml' \
# RC/GetVolume InstanceID=0 Channel=Master
import asyncio
import logging
import xml.etree.ElementTree as ET
from typing import Dict
from async_upnp_client.client import UpnpRequester, UpnpStateVariable
from async_upnp_client.const import (
STATE_VARIABLE_TYPE_MAPPING,
DeviceInfo,
ServiceInfo,
StateVariableTypeInfo,
)
from .server import (
UpnpServerDevice,
UpnpServerService,
callable_action,
run_server,
)
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger("dummy_tv")
LOGGER_SSDP_TRAFFIC = logging.getLogger("async_upnp_client.traffic")
LOGGER_SSDP_TRAFFIC.setLevel(logging.WARNING)
SOURCE = ("0.0.0.0", 0) # Your IP here!
HTTP_PORT = 8000
class MediaRendererDevice(UpnpServerDevice):
"""Media Renderer device."""
DEVICE_DEFINITION = DeviceInfo(
device_type="urn:schemas-upnp-org:device:MediaRenderer:1",
friendly_name="Dummy TV",
manufacturer="Steven",
model_name="DummyTV v1",
udn="uuid:ea2181c0-c677-4a09-80e6-f9e69a951284",
model_description="Dummy TV DMR",
model_number="v0.0.1",
serial_number="0000001",
url="/device.xml",
icons=[],
xml=ET.Element("server_device"),
)
def __init__(self, requester: UpnpRequester, base_uri: str) -> None:
"""Initialize."""
services = [
RenderingControlService(requester=requester),
AVTransportService(requester=requester),
ConnectionManagerService(requester=requester),
]
super().__init__(
requester=requester,
base_uri=base_uri,
services=services,
embedded_devices=[],
)
class RenderingControlService(UpnpServerService):
"""Rendering Control service."""
SERVICE_DEFINITION = ServiceInfo(
service_id="urn:upnp-org:serviceId:RenderingControl",
service_type="urn:schemas-upnp-org:service:RenderingControl:1",
control_url="/upnp/control/RenderingControl1",
event_sub_url="/upnp/event/RenderingControl1",
scpd_url="/RenderingControl_1.xml",
xml=ET.Element("server_service"),
)
STATE_VARIABLE_DEFINITIONS = {
"Volume": StateVariableTypeInfo(
data_type="ui2",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui2"],
default_value="0",
allowed_value_range={
"min": "0",
"max": "100",
},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"Mute": StateVariableTypeInfo(
data_type="boolean",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["boolean"],
default_value="0",
allowed_value_range={},
allowed_values=["0", "1", ],
xml=ET.Element("server_stateVariable"),
),
"A_ARG_TYPE_InstanceID": StateVariableTypeInfo(
data_type="ui4",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"],
default_value=None,
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"A_ARG_TYPE_Channel": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value=None,
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
}
@callable_action(
name="GetVolume",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
"Channel": "A_ARG_TYPE_Channel",
},
out_args={
"CurrentVolume": "Volume",
},
)
async def get_volume(
self, InstanceID: int, Channel: str
) -> Dict[str, UpnpStateVariable]:
"""Get Volume."""
# pylint: disable=invalid-name, unused-argument
return {
"CurrentVolume": self.state_variable("Volume"),
}
@callable_action(
name="SetVolume",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
"Channel": "A_ARG_TYPE_Channel",
"DesiredVolume": "Volume",
},
out_args={},
)
async def set_volume(
self, InstanceID: int, Channel: str, DesiredVolume: int
) -> Dict[str, UpnpStateVariable]:
"""Set Volume."""
# pylint: disable=invalid-name, unused-argument
volume = self.state_variable("Volume")
volume.value = DesiredVolume
return {}
@callable_action(
name="GetMute",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
"Channel": "A_ARG_TYPE_Channel",
},
out_args={
"CurrentMute": "Mute",
},
)
async def get_mute(
self, InstanceID: int, Channel: str
) -> Dict[str, UpnpStateVariable]:
"""Get Mute."""
# pylint: disable=invalid-name, unused-argument
return {
"CurrentMute": self.state_variable("Mute"),
}
@callable_action(
name="SetMute",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
"Channel": "A_ARG_TYPE_Channel",
"DesiredMute": "Mute",
},
out_args={},
)
async def set_mute(
self, InstanceID: int, Channel: str, DesiredMute: bool
) -> Dict[str, UpnpStateVariable]:
"""Set Volume."""
# pylint: disable=invalid-name, unused-argument
volume = self.state_variable("Mute")
volume.value = DesiredMute
return {}
class AVTransportService(UpnpServerService):
"""AVTransport service."""
SERVICE_DEFINITION = ServiceInfo(
service_id="urn:upnp-org:serviceId:AVTransport",
service_type="urn:schemas-upnp-org:service:AVTransport:1",
control_url="/upnp/control/AVTransport1",
event_sub_url="/upnp/event/AVTransport1",
scpd_url="/AVTransport_1.xml",
xml=ET.Element("server_service"),
)
STATE_VARIABLE_DEFINITIONS = {
"A_ARG_TYPE_InstanceID": StateVariableTypeInfo(
data_type="ui4",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"],
default_value=None,
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"CurrentTrackURI": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="",
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"CurrentTrack": StateVariableTypeInfo(
data_type="ui4",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"],
default_value=None,
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"AVTransportURI": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="",
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"TransportState": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="STOPPED",
allowed_value_range={},
allowed_values=["STOPPED", "PLAYING", "PAUSED_PLAYBACK", "TRANSITIONING", ],
xml=ET.Element("server_stateVariable"),
),
"TransportStatus": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="",
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
"TransportPlaySpeed": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="1",
allowed_value_range={},
allowed_values=["1"],
xml=ET.Element("server_stateVariable"),
),
"PossiblePlaybackStorageMedia": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="NOT_IMPLEMENTED",
allowed_value_range={},
allowed_values=["NOT_IMPLEMENTED"],
xml=ET.Element("server_stateVariable"),
),
"PossibleRecordStorageMedia": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="NOT_IMPLEMENTED",
allowed_value_range={},
allowed_values=["NOT_IMPLEMENTED"],
xml=ET.Element("server_stateVariable"),
),
"PossibleRecordQualityModes": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="NOT_IMPLEMENTED",
allowed_value_range={},
allowed_values=["NOT_IMPLEMENTED"],
xml=ET.Element("server_stateVariable"),
),
"CurrentPlayMode": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="NORMAL",
allowed_value_range={},
allowed_values=["NORMAL"],
xml=ET.Element("server_stateVariable"),
),
"CurrentRecordQualityMode": StateVariableTypeInfo(
data_type="string",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["string"],
default_value="NOT_IMPLEMENTED",
allowed_value_range={},
allowed_values=["NOT_IMPLEMENTED"],
xml=ET.Element("server_stateVariable"),
),
}
@callable_action(
name="GetTransportInfo",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
},
out_args={
"CurrentTransportState": "TransportState",
"CurrentTransportStatus": "TransportStatus",
"CurrentSpeed": "TransportPlaySpeed",
},
)
async def get_transport_info(self, InstanceID: int) -> Dict[str, UpnpStateVariable]:
"""Get Transport Info."""
# pylint: disable=invalid-name, unused-argument
return {
"CurrentTransportState": self.state_variable("TransportState"),
"CurrentTransportStatus": self.state_variable("TransportStatus"),
"CurrentSpeed": self.state_variable("TransportPlaySpeed"),
}
@callable_action(
name="GetMediaInfo",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
},
out_args={
"CurrentURI": "AVTransportURI",
},
)
async def get_media_info(self, InstanceID: int) -> Dict[str, UpnpStateVariable]:
"""Get Media Info."""
# pylint: disable=invalid-name, unused-argument
return {
"CurrentURI": self.state_variable("AVTransportURI"),
}
@callable_action(
name="GetDeviceCapabilities",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
},
out_args={
"PlayMedia": "PossiblePlaybackStorageMedia",
"RecMedia": "PossibleRecordStorageMedia",
"RecQualityModes": "PossibleRecordQualityModes",
},
)
async def get_device_capabilities(self, InstanceID: int) -> Dict[str, UpnpStateVariable]:
"""Get Device Capabilities."""
# pylint: disable=invalid-name, unused-argument
return {
"PlayMedia": self.state_variable("PossiblePlaybackStorageMedia"),
"RecMedia": self.state_variable("PossibleRecordStorageMedia"),
"RecQualityModes": self.state_variable("PossibleRecordQualityModes"),
}
@callable_action(
name="GetTransportSettings",
in_args={
"InstanceID": "A_ARG_TYPE_InstanceID",
},
out_args={
"PlayMode": "CurrentPlayMode",
"RecQualityMode": "CurrentRecordQualityMode",
},
)
async def get_transport_settings(self, InstanceID: int) -> Dict[str, UpnpStateVariable]:
"""Get Transport Settings."""
# pylint: disable=invalid-name, unused-argument
return {
"PlayMode": self.state_variable("CurrentPlayMode"),
"RecQualityMode": self.state_variable("CurrentRecordQualityMode"),
}
class ConnectionManagerService(UpnpServerService):
"""ConnectionManager service."""
SERVICE_DEFINITION = ServiceInfo(
service_id="urn:upnp-org:serviceId:ConnectionManager",
service_type="urn:schemas-upnp-org:service:ConnectionManager:1",
control_url="/upnp/control/ConnectionManager1",
event_sub_url="/upnp/event/ConnectionManager1",
scpd_url="/ConnectionManager_1.xml",
xml=ET.Element("server_service"),
)
STATE_VARIABLE_DEFINITIONS = {
"A_ARG_TYPE_InstanceID": StateVariableTypeInfo(
data_type="ui4",
data_type_mapping=STATE_VARIABLE_TYPE_MAPPING["ui4"],
default_value=None,
allowed_value_range={},
allowed_values=None,
xml=ET.Element("server_stateVariable"),
),
}
async def async_main() -> None:
"""Main."""
await run_server(SOURCE, HTTP_PORT, MediaRendererDevice)
if __name__ == "__main__":
asyncio.run(async_main())
| 34.220339
| 96
| 0.607019
|
81e0e1e3dbe7461e03fd3e29c7df2fca41cecc91
| 12,922
|
py
|
Python
|
diffimg/diffimg.py
|
exoplanetvetting/DAVE
|
aea19a30d987b214fb4c0cf01aa733f127c411b9
|
[
"MIT"
] | 7
|
2019-05-07T02:01:51.000Z
|
2022-03-16T08:09:39.000Z
|
diffimg/diffimg.py
|
barentsen/dave
|
45ba97b7b535ad26dd555c33c963c6224a9af23c
|
[
"MIT"
] | 18
|
2015-12-09T22:18:59.000Z
|
2017-04-26T13:11:44.000Z
|
diffimg/diffimg.py
|
barentsen/dave
|
45ba97b7b535ad26dd555c33c963c6224a9af23c
|
[
"MIT"
] | 5
|
2017-03-08T11:42:53.000Z
|
2020-05-07T00:10:37.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 21:08:35 2015
@author: fergal
$Id$
$URL$
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as mp
import numpy as np
import dave.fileio.kplrfits as kplrfits
def constructK2DifferenceImage(cube, indexOfCadenceInTransit, rollPhase, flags):
"""Construct a difference image for a single K2 cadence
K2's roll motion makes constructing difference images harder
than it was in Classic Kepler. The star can not be assumed
to be in a similar location on the focal plane in any other
cadence. To construct difference images, this function
interpolates between cadences of similar roll angles to
construct an "out-of-transit" image
Inputs:
-------------
cube
(3d np array) A data cube created from a TPF file.
See fileio.tpf.getTargetPixelArrayFromFits()
indexOfCadenceInTransit
(int) Specify which row of cube to construct a difference
image for
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
Returns:
------------
A 3 element list, [diff, oot, diagnostics]
diff is a 2d np array corresponding to the difference image
oot is a 2d np array corresponds to the reconstructed image
subtracted from cube[indexOfCadence] in transit.
diff = cube[indexOfCadenceInTransit] - oot
diagnostics is a dictionary of details of how the image was
constructed.
Notes:
---------
In keeping with the Classic Kepler convention, a flux decrement
in a cadence (e.g a transit) appears as a positive feature in
the difference image.
Todo:
---------
I should do a better job picking which cadences are bad.
Not all flags indicate a cadence is untrustworthy.
"""
assert(cube.shape[0] == len(rollPhase))
i0 = indexOfCadenceInTransit
try:
oot, diagnostics = getInterpolatedOotImage(cube, rollPhase, flags, i0)
except ValueError, e:
msg = "WARN: While creating Diff Img for %i: %s" \
%(i0, e)
raise ValueError(msg)
diff = oot - cube[i0]
return diff, oot, diagnostics
def plotRollPhaseDiagnosticPlot(x, rollPhase, flags, indexOfEvent):
if x is None:
x = np.arange(len(rollPhase))
if flags.dtype == bool:
raise ValueError("Boolean flag array not accepted")
i0 = indexOfEvent
idx = flags == 0
mp.plot(x[idx], rollPhase[idx], 'ko-')
mp.axvline(i0, color='b')
mp.axhline(rollPhase[i0], color='grey')
dtFlag =kplrfits.SapQuality['PossibleRollTweak']
thrusterFiringIndices = np.where( flags & dtFlag )[0]
tfi = thrusterFiringIndices
tfiBefore = tfi[tfi < i0]
tfiAfter = tfi[tfi > i0]
lwr = max(0, i0-100)
upr = min(len(x)-1, i0+100)
plotLwr = x[lwr]
plotUpr = x[upr]
mp.xlim(plotLwr, plotUpr)
mp.ylim(-1,1)
for w in tfi:
if x[w] < plotLwr or x[w] > plotUpr:
continue
mp.axvline(x[w], color='r')
lwr, upr = getThrusterFiringIntervalBefore(tfiBefore)
i1, sgn, d1, d2 = getBracketingCadence(rollPhase, lwr, upr, rollPhase[i0])
mp.plot(x[i1], rollPhase[i1], 'ro')
mp.plot(x[i1+sgn], rollPhase[i1+sgn], 'ro')
lwr, upr = getThrusterFiringIntervalAfter(tfiAfter)
i1, sgn, d1, d2 = getBracketingCadence(rollPhase, lwr, upr, rollPhase[i0])
mp.plot(x[i1], rollPhase[i1], 'ro')
mp.plot(x[i1+sgn], rollPhase[i1+sgn], 'ro')
def getInterpolatedOotImage(cube, rollPhase, flags, i0):
"""Construct an out-of-transit image for a given cadence
Inputs:
-------------
cube
(3d np array) A data cube created from a TPF file.
See io.tpf.getTargetPixelArrayFromFits()
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
i0
(int) Specify which row of cube to construct a difference
image for
Returns:
-------------
A two element tuple:
* A np 2d array representing an interpolated image.
* A dictionary containing the indices of the cadences used for interpolation. The
keys of that dictionary are:
rangeBefore
(2 element tuple) The index range of between the two previous
thruster firings
rangeAfter
(2 element tuple) The index range of between the two succeeding
thruster firings
rinBefore
(2 element tuple) The bracking cadences interpolated to produce the
OOT image before the event
rinAfter
(2 element tuple) The bracking cadences interpolated to produce the
OOT image after the event
"""
rollPhase0 = rollPhase[i0]
oot = np.zeros_like(cube[0])
diagnostics = dict()
diagnostics['errorMsg'] = "None"
dtFlag =kplrfits.SapQuality['DefiniteRollTweak']
thrusterFiringIndices = np.where( flags & dtFlag )[0]
tfi = thrusterFiringIndices
tfiBefore = tfi[tfi < i0]
tfiAfter = tfi[tfi > i0]
# import pdb; pdb.set_trace()
lwrBefore, uprBefore = getThrusterFiringIntervalBefore(tfiBefore)
lwrAfter, uprAfter = getThrusterFiringIntervalAfter(tfiAfter)
diagnostics['rangeBefore'] = (lwrBefore, uprBefore)
diagnostics['rangeAfter'] = (lwrAfter, uprAfter)
if lwrAfter is None:
diagnostics['errorMsg'] = "No suitable range found before cadence of interest"
return oot, diagnostics
if lwrBefore is None:
diagnostics['errorMsg'] = "No suitable range found after cadence of interest"
return oot, diagnostics
try:
ootBefore, rinBefore = \
getDiffFromRange(cube, lwrBefore, uprBefore, rollPhase, rollPhase0)
diagnostics['rinBefore'] = rinBefore
except ValueError, e:
diagnostics['errorMsg'] = "Early diff img: %s" %(e)
return oot, diagnostics
#@TODO: Should I just return OOT before here?
try:
ootAfter, rinAfter = getDiffFromRange(cube, lwrAfter, uprAfter, \
rollPhase, rollPhase0)
diagnostics['rinAfter'] = rinAfter
except ValueError, e:
diagnostics['errorMsg'] = "Later diff img: %s" %(e)
return oot, diagnostics
oot = .5 * (ootBefore + ootAfter)
return oot, diagnostics
def getDiffFromRange(cube, lwr, upr, rollPhase, rollPhase0):
"""
Construct an interpolated difference image.
Construct an interpolated difference image from the data
in cube[lwr:upr] to match what would be expected if
a cadence was observed centred on rollPhase0.
Inputs:
-------------
cube
(3d np array) A data cube created from a TPF file.
See io.tpf.getTargetPixelArrayFromFits()
lwr, upr
(int) Range of cube to interpolate within.
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
rollPhase0
(float) the value of rollPhase to interpolate to.
An exception is raised if rollPhase[lwr:upr] does
not bracket rollPhase0
Returns:
--------------
A two element tuple
* A np 2d array representing an interpolated image.
* A tuple containing the indices of the cadences used for interpolation
TODO:
------------
Don't use cadences that are flagged in some bad way.
"""
maxDiffBetweenAdjacentPoints = .15
i0, sgn, d1, d2 = getBracketingCadence(rollPhase, lwr, upr, rollPhase0)
#d1 and d2 must bracket rollPhase0. They should also
#have similar values
if d1*d2 < 0 and np.fabs(d2-d1) < maxDiffBetweenAdjacentPoints:
diff = interpolateImages(rollPhase[i0], rollPhase[i0+sgn], \
cube[i0], cube[i0+sgn], rollPhase0 )
return diff, (i0, i0+sgn)
raise ValueError("Can't produce difference image")
def getBracketingCadence(rollPhase, lwr, upr, rollPhase0):
"""Get bracketing cadence for a given rollphase.
Computes i0 and sgn such that:
* i0 is in the range [lwr, upr]
* sgn is either +1 or -1
* rollPhase[i0] and rollPhase[i0+sgn] bracket rollPhase0, i.e
one value is larger than rollPhase0 and the other is lower.
Inputs:
------------
rollPhase
(1d numpy array) Values for rollphase
lwr, upr
(integers), range of values in rollphase to search
rollPhase0
The roll phase to bracket
Returns:
-----------
i0
(int) First bracketing cadence
sgn
(int) Either +1 or -1. The second bracketing cadence is i0+sgn
d1, d2
(int) Rollphase difference between rp[i0], rp[i0+sng] and rollPhase0
Can be used to assess the quality of the bracket chosen.
"""
i0 = np.argmin( np.fabs(rollPhase[lwr:upr] - rollPhase0)) + lwr
d1 = rollPhase0 - rollPhase[i0]
slope = rollPhase[i0+1] - rollPhase[i0-1]
#Should I interpolate with previous or next cadence
#If the slope is positive, and rp0 is larger than rp[i0]
#I want to use the next cadence.
sgn = 2*int(d1*slope > 0) - 1
d2 = rollPhase0 - rollPhase[i0 + sgn]
return i0, sgn, d1, d2
def interpolateImages(x0, x1, img0, img1, x):
"""Private function used by getDiffFromRange()"""
f = (x-x0)/(x1-x0)
return img0 + f*(img1-img0)
def getThrusterFiringIntervalBefore(tfIndices, minDuration = 10):
"""Returns range of points between last two thruster firings in input
See getThrusterFiringIntervalAfter() for more details. This
function does the same job, but returns the last two indices
in the array meeting the criteria.
"""
nTfi = len(tfIndices)
if nTfi == 0:
return None, None
if nTfi == 1:
return 0, tfIndices[0]
i = len(tfIndices)-1
while i > 1:
if tfIndices[i-1] + minDuration < tfIndices[i]:
return tfIndices[i-1], tfIndices[i]
i -= 1
return None, None
def getThrusterFiringIntervalAfter(tfIndices, minDuration=10):
"""Get range of points between first two thruster firings in input
Thruster firings tend to cluster, so we don't just want the first
pair of firings in the array. Better is the first pair of firings that
are separated by a minimum number of cadecnces, minDuration
Input:
--------
tfIndices (1d np array)
A list of numbers, each number represents the index where a
thruster firing occurs. This is not a boolean array
Optional Input:
---------------
minDuration
A pair of cadences must be separated by at least this many cadences
to be returned.
Returns:
----------
Values for first two numbers separated by more than minDuration.
Example:
---------
``getThrusterFiringIntervalAfter( [1,3,15,29], 10)``
returns ``[3,15]``
"""
numTf= len(tfIndices)
if numTf == 0:
return None, None
if numTf == 1:
return tfIndices[0], -1
i = 0
while i < numTf:
if tfIndices[i] + minDuration < tfIndices[i+1]:
return tfIndices[i], tfIndices[i+1]
i += 1
return None, None
def plotDiffDiagnostic(diff, oot):
"""Only works on my machine"""
mp.subplot(121)
# img = np.log10(1+diff - np.min(diff[np.isfinite(diff)]))
plotTpf.plotCadence(diff, axis="relative")
mp.colorbar()
mp.subplot(122)
plotTpf.plotCadence(diff/np.sqrt(2*oot), axis="relative")
mp.colorbar()
mp.clim(-3.0, 3.0)
import dave.diffimg.arclen as arclen
import dave.fileio.mastio as mastio
import dave.fileio.tpf as tpf
def example():
ar = mastio.K2Archive()
kepid = 206103150 #A wasp planet
fits = ar.getLongCadence(kepid, 3)
flags = fits['SAP_QUALITY']
cent1 = fits['MOM_CENTR1']
cent2 = fits['MOM_CENTR2']
fits, hdr = ar.getLongTpf(kepid, 3, header=True)
cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
# cube *= gain
#Compute roll phase
centColRow = np.vstack((cent1, cent2)).transpose()
rot = arclen.computeArcLength(centColRow, flags>0)
rollPhase = rot[:,0]
rollPhase[flags>0] = -9999 #A bad value
cadenceInTransit = 490
diff, oot = constructK2DifferenceImage(cube, cadenceInTransit, rollPhase, flags)
return diff, oot
| 29.1693
| 86
| 0.650209
|
daf91a40f7ad7935d355a287819ad1dbcdd84eb8
| 11,462
|
py
|
Python
|
python/paddle/fluid/layers/learning_rate_scheduler.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/learning_rate_scheduler.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/learning_rate_scheduler.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
When training a model, it's often useful to decay the
learning rate during training process, this is called
learning_rate_decay. There are many strategies to do
this, this module will provide some classical method.
User can also implement their own learning_rate_decay
strategy according to this module.
"""
from . import control_flow
from . import nn
from . import ops
from . import tensor
from ..initializer import init_on_cpu
from ..framework import default_main_program, Parameter
__all__ = [
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
'polynomial_decay', 'piecewise_decay', 'noam_decay', 'append_LARS'
]
def _decay_step_counter(begin=0):
# the first global step is zero in learning rate decay
global_step = nn.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1)
global_step = tensor.cast(global_step, 'float32')
return global_step
def noam_decay(d_model, warmup_steps):
"""
Noam decay method. The numpy implementation of noam decay as follows.
>>> import numpy as np
>>> lr_value = np.power(d_model, -0.5) * np.min([
>>> np.power(current_steps, -0.5),
>>> np.power(warmup_steps, -1.5) * current_steps])
Please reference `attention is all you need
<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args:
d_model(Variable): The dimensionality of input and output of model.
warmup_steps(Variable): A super parameter.
Returns:
The decayed learning rate.
"""
global_step = _decay_step_counter(1)
a = global_step**-0.5
b = (warmup_steps**-1.5) * global_step
lr_value = (d_model**-0.5) * ops.elementwise_min(a, b)
return lr_value
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, the learning rate will be decayed by
'decay_rate' every 'decay_steps' steps.
>>> if staircase == True:
>>> decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps)
>>> else:
>>> decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
Args:
learning_rate(Variable|float): The initial learning rate.
decay_steps(int): See the decay computation above.
decay_rate(float): The decay rate. See the decay computation above.
staircase(Boolean): If True, decay the learning rate at discrete intervals.
Default: False
Returns:
Variable: The decayed learning rate
Examples:
.. code-block:: python
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=base_lr,
decay_steps=10000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(avg_cost)
"""
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies natural exponential decay to the initial learning rate.
>>> if not staircase:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
>>> else:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
return decayed_lr
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as the
training progresses. By using this function, an inverse decay function will be
applied to the initial learning rate.
>>> if staircase == True:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
>>> else:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)
Args:
learning_rate(Variable|float): The initial learning rate.
decay_steps(int): See the decay computation above.
decay_rate(float): The decay rate. See the decay computation above.
staircase(Boolean): If True, decay the learning rate at discrete intervals.
Default: False
Returns:
Variable: The decayed learning rate
Examples:
.. code-block:: python
base_lr = 0.1
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.inverse_time_decay(
learning_rate=base_lr,
decay_steps=10000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(avg_cost)
"""
global_step = _decay_step_counter()
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res)
return decayed_lr
def polynomial_decay(learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False):
"""
Applies polynomial decay to the initial learning rate.
.. code-block:: python
if cycle:
decay_steps = decay_steps * ceil(global_step / decay_steps)
else:
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ power + end_learning_rate
Args:
learning_rate(Variable|float32): A scalar float32 value or a Variable. This
will be the initial learning rate during training.
decay_steps(int32): A Python `int32` number.
end_learning_rate(float): A Python `float` number.
power(float): A Python `float` number.
cycle(bool): If set true, decay the learning rate every decay_steps.
Returns:
Variable: The decayed learning rate
"""
global_step = _decay_step_counter()
if cycle:
div_res = ops.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant(shape=[1], dtype='float32', value=0.0)
one_var = tensor.fill_constant(shape=[1], dtype='float32', value=1.0)
with control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \
((1 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr
def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate.
The algorithm can be described as the code below.
.. code-block:: python
boundaries = [10000, 20000]
values = [1.0, 0.5, 0.1]
if step < 10000:
learning_rate = 1.0
elif 10000 <= step < 20000:
learning_rate = 0.5
else:
learning_rate = 0.1
Args:
boundaries: A list of steps numbers.
values: A list of learning rate values that will be picked during
different step boundaries.
Returns:
The decayed learning rate.
"""
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")
global_step = _decay_step_counter()
lr = tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
with control_flow.Switch() as switch:
for i in range(len(boundaries)):
boundary_val = tensor.fill_constant(
shape=[1],
dtype='float32',
value=float(boundaries[i]),
force_cpu=True)
value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[len(values) - 1]))
with switch.default():
tensor.assign(last_value_var, lr)
return lr
def append_LARS(params_grads, learning_rate, weight_decay):
"""Applies LARS (LAYER-WISE ADAPTIVE RATE SCALING) to learning rate for
each layer.
```python
learning_rate *= local_gw_ratio * sqrt(sumsq(param))
/ (sqrt(sumsq(gradient))+ weight_decay * sqrt(sumsq(param)))
```
Args:
learning_rate: A learning rate Variable. This
is the global learning rate for LARS.
weight_decay: A Python `float` number.
Returns:
The decayed learning rate
"""
def _balanced_weight(param_norm, grad_norm):
if weight_decay == 1.0:
return grad_norm + param_norm
else:
return grad_norm + weight_decay * param_norm
for param, grad in params_grads:
param_lr = param.optimize_attr['learning_rate']
param_norm = ops.sqrt(nn.reduce_sum(input=ops.square(param)))
grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad)))
if type(param_lr) == float and param_lr == 1.0:
decayed_lr = learning_rate * param_norm \
/ _balanced_weight(param_norm, grad_norm)
else:
decayed_lr = learning_rate * param_lr * param_norm \
/ _balanced_weight(param_norm, grad_norm)
# set back param local learning rate
param.optimize_attr['learning_rate'] = decayed_lr
| 34.214925
| 102
| 0.646222
|
8b19c6c99034dddd70e781640ca56664a60d18e9
| 55,415
|
py
|
Python
|
active_projects/diffyq/part1/pendulum.py
|
thevivekpandey/manim
|
483dbfc232fa684e7722969221bd416fde8bd55a
|
[
"MIT"
] | null | null | null |
active_projects/diffyq/part1/pendulum.py
|
thevivekpandey/manim
|
483dbfc232fa684e7722969221bd416fde8bd55a
|
[
"MIT"
] | null | null | null |
active_projects/diffyq/part1/pendulum.py
|
thevivekpandey/manim
|
483dbfc232fa684e7722969221bd416fde8bd55a
|
[
"MIT"
] | null | null | null |
from manimlib.imports import *
from active_projects.diffyq.part1.shared_constructs import *
class Pendulum(VGroup):
CONFIG = {
"length": 3,
"gravity": 9.8,
"weight_diameter": 0.5,
"initial_theta": 0.3,
"omega": 0,
"damping": 0.1,
"top_point": 2 * UP,
"rod_style": {
"stroke_width": 3,
"stroke_color": LIGHT_GREY,
"sheen_direction": UP,
"sheen_factor": 1,
},
"weight_style": {
"stroke_width": 0,
"fill_opacity": 1,
"fill_color": GREY_BROWN,
"sheen_direction": UL,
"sheen_factor": 0.5,
"background_stroke_color": BLACK,
"background_stroke_width": 3,
"background_stroke_opacity": 0.5,
},
"dashed_line_config": {
"num_dashes": 25,
"stroke_color": WHITE,
"stroke_width": 2,
},
"angle_arc_config": {
"radius": 1,
"stroke_color": WHITE,
"stroke_width": 2,
},
"velocity_vector_config": {
"color": RED,
},
"theta_label_height": 0.25,
"set_theta_label_height_cap": False,
"n_steps_per_frame": 100,
"include_theta_label": True,
"include_velocity_vector": False,
"velocity_vector_multiple": 0.5,
"max_velocity_vector_length_to_length_ratio": 0.5,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.create_fixed_point()
self.create_rod()
self.create_weight()
self.rotating_group = VGroup(self.rod, self.weight)
self.create_dashed_line()
self.create_angle_arc()
if self.include_theta_label:
self.add_theta_label()
if self.include_velocity_vector:
self.add_velocity_vector()
self.set_theta(self.initial_theta)
self.update()
def create_fixed_point(self):
self.fixed_point_tracker = VectorizedPoint(self.top_point)
self.add(self.fixed_point_tracker)
return self
def create_rod(self):
rod = self.rod = Line(UP, DOWN)
rod.set_height(self.length)
rod.set_style(**self.rod_style)
rod.move_to(self.get_fixed_point(), UP)
self.add(rod)
def create_weight(self):
weight = self.weight = Circle()
weight.set_width(self.weight_diameter)
weight.set_style(**self.weight_style)
weight.move_to(self.rod.get_end())
self.add(weight)
def create_dashed_line(self):
line = self.dashed_line = DashedLine(
self.get_fixed_point(),
self.get_fixed_point() + self.length * DOWN,
**self.dashed_line_config
)
line.add_updater(
lambda l: l.move_to(self.get_fixed_point(), UP)
)
self.add_to_back(line)
def create_angle_arc(self):
self.angle_arc = always_redraw(lambda: Arc(
arc_center=self.get_fixed_point(),
start_angle=-90 * DEGREES,
angle=self.get_arc_angle_theta(),
**self.angle_arc_config,
))
self.add(self.angle_arc)
def get_arc_angle_theta(self):
# Might be changed in certain scenes
return self.get_theta()
def add_velocity_vector(self):
def make_vector():
omega = self.get_omega()
theta = self.get_theta()
mvlr = self.max_velocity_vector_length_to_length_ratio
max_len = mvlr * self.rod.get_length()
vvm = self.velocity_vector_multiple
multiple = np.clip(
vvm * omega, -max_len, max_len
)
vector = Vector(
multiple * RIGHT,
**self.velocity_vector_config,
)
vector.rotate(theta, about_point=ORIGIN)
vector.shift(self.rod.get_end())
return vector
self.velocity_vector = always_redraw(make_vector)
self.add(self.velocity_vector)
return self
def add_theta_label(self):
self.theta_label = always_redraw(self.get_label)
self.add(self.theta_label)
def get_label(self):
label = TexMobject("\\theta")
label.set_height(self.theta_label_height)
if self.set_theta_label_height_cap:
max_height = self.angle_arc.get_width()
if label.get_height() > max_height:
label.set_height(max_height)
top = self.get_fixed_point()
arc_center = self.angle_arc.point_from_proportion(0.5)
vect = arc_center - top
norm = get_norm(vect)
vect = normalize(vect) * (norm + self.theta_label_height)
label.move_to(top + vect)
return label
#
def get_theta(self):
theta = self.rod.get_angle() - self.dashed_line.get_angle()
theta = (theta + PI) % TAU - PI
return theta
def set_theta(self, theta):
self.rotating_group.rotate(
theta - self.get_theta()
)
self.rotating_group.shift(
self.get_fixed_point() - self.rod.get_start(),
)
return self
def get_omega(self):
return self.omega
def set_omega(self, omega):
self.omega = omega
return self
def get_fixed_point(self):
return self.fixed_point_tracker.get_location()
#
def start_swinging(self):
self.add_updater(Pendulum.update_by_gravity)
def end_swinging(self):
self.remove_updater(Pendulum.update_by_gravity)
def update_by_gravity(self, dt):
theta = self.get_theta()
omega = self.get_omega()
nspf = self.n_steps_per_frame
for x in range(nspf):
d_theta = omega * dt / nspf
d_omega = op.add(
-self.damping * omega,
-(self.gravity / self.length) * np.sin(theta),
) * dt / nspf
theta += d_theta
omega += d_omega
self.set_theta(theta)
self.set_omega(omega)
return self
class GravityVector(Vector):
CONFIG = {
"color": YELLOW,
"length_multiple": 1 / 9.8,
# TODO, continually update the length based
# on the pendulum's gravity?
}
def __init__(self, pendulum, **kwargs):
super().__init__(DOWN, **kwargs)
self.pendulum = pendulum
self.scale(self.length_multiple * pendulum.gravity)
self.attach_to_pendulum(pendulum)
def attach_to_pendulum(self, pendulum):
self.add_updater(lambda m: m.shift(
pendulum.weight.get_center() - self.get_start(),
))
def add_component_lines(self):
self.component_lines = always_redraw(self.create_component_lines)
self.add(self.component_lines)
def create_component_lines(self):
theta = self.pendulum.get_theta()
x_new = rotate(RIGHT, theta)
base = self.get_start()
tip = self.get_end()
vect = tip - base
corner = base + x_new * np.dot(vect, x_new)
kw = {"dash_length": 0.025}
return VGroup(
DashedLine(base, corner, **kw),
DashedLine(corner, tip, **kw),
)
class ThetaValueDisplay(VGroup):
CONFIG = {
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
class ThetaVsTAxes(Axes):
CONFIG = {
"x_min": 0,
"x_max": 8,
"y_min": -PI / 2,
"y_max": PI / 2,
"y_axis_config": {
"tick_frequency": PI / 8,
"unit_size": 1.5,
},
"number_line_config": {
"color": "#EEEEEE",
"stroke_width": 2,
"include_tip": False,
},
"graph_style": {
"stroke_color": GREEN,
"stroke_width": 3,
"fill_opacity": 0,
},
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add_labels()
def add_axes(self):
self.axes = Axes(**self.axes_config)
self.add(self.axes)
def add_labels(self):
x_axis = self.get_x_axis()
y_axis = self.get_y_axis()
t_label = self.t_label = TexMobject("t")
t_label.next_to(x_axis.get_right(), UP, MED_SMALL_BUFF)
x_axis.label = t_label
x_axis.add(t_label)
theta_label = self.theta_label = TexMobject("\\theta(t)")
theta_label.next_to(y_axis.get_top(), UP, SMALL_BUFF)
y_axis.label = theta_label
y_axis.add(theta_label)
self.y_axis_label = theta_label
self.x_axis_label = t_label
x_axis.add_numbers()
y_axis.add(self.get_y_axis_coordinates(y_axis))
def get_y_axis_coordinates(self, y_axis):
texs = [
# "\\pi \\over 4",
# "\\pi \\over 2",
# "3 \\pi \\over 4",
# "\\pi",
"\\pi / 4",
"\\pi / 2",
"3 \\pi / 4",
"\\pi",
]
values = np.arange(1, 5) * PI / 4
labels = VGroup()
for pos_tex, pos_value in zip(texs, values):
neg_tex = "-" + pos_tex
neg_value = -1 * pos_value
for tex, value in (pos_tex, pos_value), (neg_tex, neg_value):
if value > self.y_max or value < self.y_min:
continue
symbol = TexMobject(tex)
symbol.scale(0.5)
point = y_axis.number_to_point(value)
symbol.next_to(point, LEFT, MED_SMALL_BUFF)
labels.add(symbol)
return labels
def get_live_drawn_graph(self, pendulum,
t_max=None,
t_step=1.0 / 60,
**style):
style = merge_dicts_recursively(self.graph_style, style)
if t_max is None:
t_max = self.x_max
graph = VMobject()
graph.set_style(**style)
graph.all_coords = [(0, pendulum.get_theta())]
graph.time = 0
graph.time_of_last_addition = 0
def update_graph(graph, dt):
graph.time += dt
if graph.time > t_max:
graph.remove_updater(update_graph)
return
new_coords = (graph.time, pendulum.get_theta())
if graph.time - graph.time_of_last_addition >= t_step:
graph.all_coords.append(new_coords)
graph.time_of_last_addition = graph.time
print(new_coords)
points = [
self.coords_to_point(*coords)
for coords in [*graph.all_coords, new_coords]
]
graph.set_points_smoothly(points)
graph.add_updater(update_graph)
return graph
# Scenes
class IntroducePendulum(PiCreatureScene, MovingCameraScene):
CONFIG = {
"pendulum_config": {
"length": 3,
"top_point": 4 * RIGHT,
"weight_diameter": 0.35,
"gravity": 20,
},
"theta_vs_t_axes_config": {
"y_max": PI / 4,
"y_min": -PI / 4,
"y_axis_config": {
"tick_frequency": PI / 16,
"unit_size": 2,
"tip_length": 0.3,
},
"x_max": 12,
"number_line_config": {
"stroke_width": 2,
}
},
}
def setup(self):
MovingCameraScene.setup(self)
PiCreatureScene.setup(self)
def construct(self):
self.add_pendulum()
# self.label_pi_creatures()
self.label_pendulum()
self.add_graph()
self.label_function()
self.show_graph_period()
self.show_length_and_gravity()
# self.tweak_length_and_gravity()
def create_pi_creatures(self):
randy = Randolph(color=BLUE_C)
morty = Mortimer(color=MAROON_E)
creatures = VGroup(randy, morty)
creatures.scale(0.5)
creatures.arrange(RIGHT, buff=2.5)
creatures.to_corner(DR)
return creatures
def add_pendulum(self):
pendulum = self.pendulum = Pendulum(**self.pendulum_config)
pendulum.start_swinging()
frame = self.camera_frame
frame.save_state()
frame.scale(0.5)
frame.move_to(pendulum.dashed_line)
self.add(pendulum, frame)
def label_pi_creatures(self):
randy, morty = self.pi_creatures
randy_label = TextMobject("Physics\\\\", "student")
morty_label = TextMobject("Physics\\\\", "teacher")
labels = VGroup(randy_label, morty_label)
labels.scale(0.5)
randy_label.next_to(randy, UP, LARGE_BUFF)
morty_label.next_to(morty, UP, LARGE_BUFF)
for label, pi in zip(labels, self.pi_creatures):
label.arrow = Arrow(
label.get_bottom(), pi.eyes.get_top()
)
label.arrow.set_color(WHITE)
label.arrow.set_stroke(width=5)
morty.labels = VGroup(
morty_label,
morty_label.arrow,
)
self.play(
FadeInFromDown(randy_label),
GrowArrow(randy_label.arrow),
randy.change, "hooray",
)
self.play(
Animation(self.pendulum.fixed_point_tracker),
TransformFromCopy(randy_label[0], morty_label[0]),
FadeIn(morty_label[1]),
GrowArrow(morty_label.arrow),
morty.change, "raise_right_hand",
)
self.wait(2)
def label_pendulum(self):
pendulum = self.pendulum
randy, morty = self.pi_creatures
label = pendulum.theta_label
rect = SurroundingRectangle(label, buff=0.5 * SMALL_BUFF)
rect.add_updater(lambda r: r.move_to(label))
for pi in randy, morty:
pi.add_updater(
lambda m: m.look_at(pendulum.weight)
)
self.play(randy.change, "pondering")
self.play(morty.change, "pondering")
self.wait(3)
randy.clear_updaters()
morty.clear_updaters()
self.play(
ShowCreationThenFadeOut(rect),
)
self.wait()
def add_graph(self):
axes = self.axes = ThetaVsTAxes(**self.theta_vs_t_axes_config)
axes.y_axis.label.next_to(axes.y_axis, UP, buff=0)
axes.to_corner(UL)
self.play(
Restore(
self.camera_frame,
rate_func=squish_rate_func(smooth, 0, 0.9),
),
DrawBorderThenFill(
axes,
rate_func=squish_rate_func(smooth, 0.5, 1),
lag_ratio=0.9,
),
Transform(
self.pendulum.theta_label.copy().clear_updaters(),
axes.y_axis.label.copy(),
remover=True,
rate_func=squish_rate_func(smooth, 0, 0.8),
),
run_time=3,
)
self.wait(1.5)
self.graph = axes.get_live_drawn_graph(self.pendulum)
self.add(self.graph)
def label_function(self):
hm_word = TextMobject("Simple harmonic motion")
hm_word.scale(1.25)
hm_word.to_edge(UP)
formula = TexMobject(
"=\\theta_0 \\cos(\\sqrt{g / L} t)"
)
formula.next_to(
self.axes.y_axis_label, RIGHT, SMALL_BUFF
)
formula.set_stroke(width=0, background=True)
self.play(FadeInFrom(hm_word, DOWN))
self.wait()
self.play(
Write(formula),
hm_word.to_corner, UR
)
self.wait(4)
def show_graph_period(self):
pendulum = self.pendulum
axes = self.axes
period = self.period = TAU * np.sqrt(
pendulum.length / pendulum.gravity
)
amplitude = pendulum.initial_theta
line = Line(
axes.coords_to_point(0, amplitude),
axes.coords_to_point(period, amplitude),
)
line.shift(SMALL_BUFF * RIGHT)
brace = Brace(line, UP, buff=SMALL_BUFF)
brace.add_to_back(brace.copy().set_style(BLACK, 10))
formula = get_period_formula()
formula.next_to(brace, UP, SMALL_BUFF)
self.period_formula = formula
self.period_brace = brace
self.play(
GrowFromCenter(brace),
FadeInFromDown(formula),
)
self.wait(2)
def show_length_and_gravity(self):
formula = self.period_formula
L = formula.get_part_by_tex("L")
g = formula.get_part_by_tex("g")
rod = self.pendulum.rod
new_rod = rod.copy()
new_rod.set_stroke(BLUE, 7)
new_rod.add_updater(lambda r: r.put_start_and_end_on(
*rod.get_start_and_end()
))
g_vect = GravityVector(
self.pendulum,
length_multiple=0.5 / 9.8,
)
down_vectors = self.get_down_vectors()
down_vectors.set_color(YELLOW)
down_vectors.set_opacity(0.5)
self.play(
ShowCreationThenDestructionAround(L),
ShowCreation(new_rod),
)
self.play(FadeOut(new_rod))
self.play(
ShowCreationThenDestructionAround(g),
GrowArrow(g_vect),
)
self.play(self.get_down_vectors_animation(down_vectors))
self.wait(6)
self.gravity_vector = g_vect
def tweak_length_and_gravity(self):
pendulum = self.pendulum
axes = self.axes
graph = self.graph
brace = self.period_brace
formula = self.period_formula
g_vect = self.gravity_vector
randy, morty = self.pi_creatures
graph.clear_updaters()
period2 = self.period * np.sqrt(2)
period3 = self.period / np.sqrt(2)
amplitude = pendulum.initial_theta
graph2, graph3 = [
axes.get_graph(
lambda t: amplitude * np.cos(TAU * t / p),
color=RED,
)
for p in (period2, period3)
]
formula.add_updater(lambda m: m.next_to(
brace, UP, SMALL_BUFF
))
new_pendulum_config = dict(self.pendulum_config)
new_pendulum_config["length"] *= 2
new_pendulum_config["top_point"] += 3.5 * UP
# new_pendulum_config["initial_theta"] = pendulum.get_theta()
new_pendulum = Pendulum(**new_pendulum_config)
down_vectors = self.get_down_vectors()
self.play(randy.change, "happy")
self.play(
ReplacementTransform(pendulum, new_pendulum),
morty.change, "horrified",
morty.shift, 3 * RIGHT,
morty.labels.shift, 3 * RIGHT,
)
self.remove(morty, morty.labels)
g_vect.attach_to_pendulum(new_pendulum)
new_pendulum.start_swinging()
self.play(
ReplacementTransform(graph, graph2),
brace.stretch, np.sqrt(2), 0, {"about_edge": LEFT},
)
self.add(g_vect)
self.wait(3)
new_pendulum.gravity *= 4
g_vect.scale(2)
self.play(
FadeOut(graph2),
self.get_down_vectors_animation(down_vectors)
)
self.play(
FadeIn(graph3),
brace.stretch, 0.5, 0, {"about_edge": LEFT},
)
self.wait(6)
#
def get_down_vectors(self):
down_vectors = VGroup(*[
Vector(0.5 * DOWN)
for x in range(10 * 150)
])
down_vectors.arrange_in_grid(10, 150, buff=MED_SMALL_BUFF)
down_vectors.set_color_by_gradient(BLUE, RED)
# for vect in down_vectors:
# vect.shift(0.1 * np.random.random(3))
down_vectors.to_edge(RIGHT)
return down_vectors
def get_down_vectors_animation(self, down_vectors):
return LaggedStart(
*[
GrowArrow(v, rate_func=there_and_back)
for v in down_vectors
],
lag_ratio=0.0005,
run_time=2,
remover=True
)
class MultiplePendulumsOverlayed(Scene):
CONFIG = {
"initial_thetas": [
150 * DEGREES,
90 * DEGREES,
60 * DEGREES,
30 * DEGREES,
10 * DEGREES,
],
"weight_colors": [
PINK, RED, GREEN, BLUE, GREY,
],
"pendulum_config": {
"top_point": ORIGIN,
"length": 3,
},
}
def construct(self):
pendulums = VGroup(*[
Pendulum(
initial_theta=theta,
weight_style={
"fill_color": wc,
"fill_opacity": 0.5,
},
**self.pendulum_config,
)
for theta, wc in zip(
self.initial_thetas,
self.weight_colors,
)
])
for pendulum in pendulums:
pendulum.start_swinging()
pendulum.remove(pendulum.theta_label)
randy = Randolph(color=BLUE_C)
randy.to_corner(DL)
randy.add_updater(lambda r: r.look_at(pendulums[0].weight))
axes = ThetaVsTAxes(
x_max=20,
y_axis_config={
"unit_size": 0.5,
"tip_length": 0.3,
},
)
axes.to_corner(UL)
graphs = VGroup(*[
axes.get_live_drawn_graph(
pendulum,
stroke_color=pendulum.weight.get_color(),
stroke_width=1,
)
for pendulum in pendulums
])
self.add(pendulums)
self.add(axes, *graphs)
self.play(randy.change, "sassy")
self.wait(2)
self.play(Blink(randy))
self.wait(5)
self.play(randy.change, "angry")
self.play(Blink(randy))
self.wait(10)
class LowAnglePendulum(Scene):
CONFIG = {
"pendulum_config": {
"initial_theta": 20 * DEGREES,
"length": 2.0,
"damping": 0,
"top_point": ORIGIN,
},
"axes_config": {
"y_axis_config": {"unit_size": 0.75},
"x_axis_config": {
"unit_size": 0.5,
"numbers_to_show": range(2, 25, 2),
"number_scale_val": 0.5,
},
"x_max": 25,
"number_line_config": {
"tip_length": 0.3,
"stroke_width": 2,
}
},
"axes_corner": UL,
}
def construct(self):
pendulum = Pendulum(**self.pendulum_config)
axes = ThetaVsTAxes(**self.axes_config)
axes.center()
axes.to_corner(self.axes_corner, buff=LARGE_BUFF)
graph = axes.get_live_drawn_graph(pendulum)
L = pendulum.length
g = pendulum.gravity
theta0 = pendulum.initial_theta
prediction = axes.get_graph(
lambda t: theta0 * np.cos(t * np.sqrt(g / L))
)
dashed_prediction = DashedVMobject(prediction, num_dashes=300)
dashed_prediction.set_stroke(WHITE, 1)
prediction_formula = TexMobject(
"\\theta_0", "\\cos(\\sqrt{g / L} \\cdot t)"
)
prediction_formula.scale(0.75)
prediction_formula.next_to(
dashed_prediction, UP, SMALL_BUFF,
)
theta0 = prediction_formula.get_part_by_tex("\\theta_0")
theta0_brace = Brace(theta0, UP, buff=SMALL_BUFF)
theta0_brace.stretch(0.5, 1, about_edge=DOWN)
theta0_label = Integer(
pendulum.initial_theta * 180 / PI,
unit="^\\circ"
)
theta0_label.scale(0.75)
theta0_label.next_to(theta0_brace, UP, SMALL_BUFF)
group = VGroup(theta0_brace, theta0_label, prediction_formula)
group.shift_onto_screen(buff=MED_SMALL_BUFF)
self.add(axes, dashed_prediction, pendulum)
self.play(
ShowCreation(dashed_prediction, run_time=2),
FadeInFromDown(prediction_formula),
FadeInFromDown(theta0_brace),
FadeInFromDown(theta0_label),
)
self.play(
ShowCreationThenFadeAround(theta0_label),
ShowCreationThenFadeAround(pendulum.theta_label),
)
self.wait()
pendulum.start_swinging()
self.add(graph)
self.wait(30)
class ApproxWordsLowAnglePendulum(Scene):
def construct(self):
period = TexMobject(
"\\text{Period}", "\\approx",
"2\\pi \\sqrt{\\,{L} / {g}}",
**Lg_formula_config
)
checkmark = TexMobject("\\checkmark")
checkmark.set_color(GREEN)
checkmark.scale(2)
checkmark.next_to(period, RIGHT, MED_LARGE_BUFF)
self.add(period, checkmark)
class MediumAnglePendulum(LowAnglePendulum):
CONFIG = {
"pendulum_config": {
"initial_theta": 50 * DEGREES,
"n_steps_per_frame": 1000,
},
"axes_config": {
"y_axis_config": {"unit_size": 0.75},
"y_max": PI / 2,
"y_min": -PI / 2,
"number_line_config": {
"tip_length": 0.3,
"stroke_width": 2,
}
},
"pendulum_shift_vect": 1 * RIGHT,
}
class MediumHighAnglePendulum(MediumAnglePendulum):
CONFIG = {
"pendulum_config": {
"initial_theta": 90 * DEGREES,
"n_steps_per_frame": 1000,
},
}
class HighAnglePendulum(LowAnglePendulum):
CONFIG = {
"pendulum_config": {
"initial_theta": 175 * DEGREES,
"n_steps_per_frame": 1000,
"top_point": 1.5 * DOWN,
"length": 2,
},
"axes_config": {
"y_axis_config": {"unit_size": 0.5},
"y_max": PI,
"y_min": -PI,
"number_line_config": {
"tip_length": 0.3,
"stroke_width": 2,
}
},
"pendulum_shift_vect": 1 * RIGHT,
}
class VeryLowAnglePendulum(LowAnglePendulum):
CONFIG = {
"pendulum_config": {
"initial_theta": 10 * DEGREES,
"n_steps_per_frame": 1000,
"top_point": ORIGIN,
"length": 3,
},
"axes_config": {
"y_axis_config": {"unit_size": 2},
"y_max": PI / 4,
"y_min": -PI / 4,
"number_line_config": {
"tip_length": 0.3,
"stroke_width": 2,
}
},
"pendulum_shift_vect": 1 * RIGHT,
}
class WherePendulumLeads(PiCreatureScene):
def construct(self):
pendulum = Pendulum(
top_point=UP,
length=3,
gravity=20,
)
pendulum.start_swinging()
l_title = TextMobject("Linearization")
l_title.scale(1.5)
l_title.to_corner(UL)
c_title = TextMobject("Chaos")
c_title.scale(1.5)
c_title.move_to(l_title)
c_title.move_to(
c_title.get_center() * np.array([-1, 1, 1])
)
get_theta = pendulum.get_theta
spring = always_redraw(
lambda: ParametricFunction(
lambda t: np.array([
np.cos(TAU * t) + (1.4 + get_theta()) * t,
np.sin(TAU * t) - 0.5,
0,
]),
t_min=-0.5,
t_max=7,
color=GREY,
sheen_factor=1,
sheen_direction=UL,
).scale(0.2).to_edge(LEFT, buff=0)
)
spring_rect = SurroundingRectangle(
spring, buff=MED_LARGE_BUFF,
stroke_width=0,
fill_color=BLACK,
fill_opacity=0,
)
weight = Dot(radius=0.25)
weight.add_updater(lambda m: m.move_to(
spring.points[-1]
))
weight.set_color(BLUE)
weight.set_sheen(1, UL)
spring_system = VGroup(spring, weight)
linear_formula = TexMobject(
"\\frac{d \\vec{\\textbf{x}}}{dt}="
"A\\vec{\\textbf{x}}"
)
linear_formula.next_to(spring, UP, LARGE_BUFF)
linear_formula.match_x(l_title)
randy = self.pi_creature
randy.set_height(2)
randy.center()
randy.to_edge(DOWN)
randy.shift(3 * LEFT)
q_marks = TexMobject("???")
q_marks.next_to(randy, UP)
self.add(pendulum, randy)
self.play(
randy.change, "pondering", pendulum,
FadeInFromDown(q_marks, lag_ratio=0.3)
)
self.play(randy.look_at, pendulum)
self.wait(5)
self.play(
Animation(VectorizedPoint(pendulum.get_top())),
FadeOutAndShift(q_marks, UP, lag_ratio=0.3),
)
self.add(spring_system)
self.play(
FadeOut(spring_rect),
FadeInFrom(linear_formula, UP),
FadeInFromDown(l_title),
)
self.play(FadeInFromDown(c_title))
self.wait(8)
class LongDoublePendulum(ExternallyAnimatedScene):
pass
class AnalyzePendulumForce(MovingCameraScene):
CONFIG = {
"pendulum_config": {
"length": 5,
"top_point": 3.5 * UP,
"initial_theta": 60 * DEGREES,
"set_theta_label_height_cap": True,
},
"g_vect_config": {
"length_multiple": 0.25,
},
"tan_line_color": BLUE,
"perp_line_color": PINK,
}
def construct(self):
self.add_pendulum()
self.show_arc_length()
self.add_g_vect()
self.show_constraint()
self.break_g_vect_into_components()
self.show_angle_geometry()
self.show_gsin_formula()
self.show_sign()
self.show_acceleration_formula()
# self.ask_about_what_to_do()
# self.emphasize_theta()
# self.show_angular_velocity()
# self.show_angular_acceleration()
# self.circle_g_sin_formula()
def add_pendulum(self):
pendulum = Pendulum(**self.pendulum_config)
theta_tracker = ValueTracker(pendulum.get_theta())
pendulum.add_updater(lambda p: p.set_theta(
theta_tracker.get_value()
))
self.add(pendulum)
self.pendulum = pendulum
self.theta_tracker = theta_tracker
def show_arc_length(self):
pendulum = self.pendulum
angle = pendulum.get_theta()
height = pendulum.length
top = pendulum.get_fixed_point()
line = Line(UP, DOWN)
line.set_height(height)
line.move_to(top, UP)
arc = always_redraw(lambda: Arc(
start_angle=-90 * DEGREES,
angle=pendulum.get_theta(),
arc_center=pendulum.get_fixed_point(),
radius=pendulum.length,
stroke_color=GREEN,
))
brace = Brace(Line(ORIGIN, 5 * UP), RIGHT)
brace.point = VectorizedPoint(brace.get_right())
brace.add(brace.point)
brace.set_height(angle)
brace.move_to(ORIGIN, DL)
brace.apply_complex_function(np.exp)
brace.scale(height)
brace.rotate(-90 * DEGREES)
brace.move_to(arc)
brace.shift(MED_SMALL_BUFF * normalize(
arc.point_from_proportion(0.5) - top
))
x_sym = TexMobject("x")
x_sym.set_color(GREEN)
x_sym.next_to(brace.point, DR, buff=SMALL_BUFF)
rhs = TexMobject("=", "L", "\\theta")
rhs.set_color_by_tex("\\theta", BLUE)
rhs.next_to(x_sym, RIGHT)
rhs.shift(0.7 * SMALL_BUFF * UP)
line_L = TexMobject("L")
line_L.next_to(
pendulum.rod.get_center(), UR, SMALL_BUFF,
)
self.play(
ShowCreation(arc),
Rotate(line, angle, about_point=top),
UpdateFromAlphaFunc(
line, lambda m, a: m.set_stroke(
width=2 * there_and_back(a)
)
),
GrowFromPoint(
brace, line.get_bottom(),
path_arc=angle
),
)
self.play(FadeInFrom(x_sym, UP))
self.wait()
# Show equation
line.set_stroke(BLUE, 5)
self.play(
ShowCreationThenFadeOut(line),
FadeInFromDown(line_L)
)
self.play(
TransformFromCopy(
line_L, rhs.get_part_by_tex("L")
),
Write(rhs.get_part_by_tex("="))
)
self.play(
TransformFromCopy(
pendulum.theta_label,
rhs.get_parts_by_tex("\\theta"),
)
)
self.add(rhs)
x_eq = VGroup(x_sym, rhs)
self.play(
FadeOut(brace),
x_eq.rotate, angle / 2,
x_eq.next_to, arc.point_from_proportion(0.5),
UL, {"buff": -MED_SMALL_BUFF}
)
self.x_eq = x_eq
self.arc = arc
self.line_L = line_L
def add_g_vect(self):
pendulum = self.pendulum
g_vect = self.g_vect = GravityVector(
pendulum, **self.g_vect_config,
)
g_word = self.g_word = TextMobject("Gravity")
g_word.rotate(-90 * DEGREES)
g_word.scale(0.75)
g_word.add_updater(lambda m: m.next_to(
g_vect, RIGHT, buff=-SMALL_BUFF,
))
self.play(
GrowArrow(g_vect),
FadeInFrom(g_word, UP, lag_ratio=0.1),
)
self.wait()
def show_constraint(self):
pendulum = self.pendulum
arcs = VGroup()
for u in [-1, 2, -1]:
d_theta = 40 * DEGREES * u
arc = Arc(
start_angle=pendulum.get_theta() - 90 * DEGREES,
angle=d_theta,
radius=pendulum.length,
arc_center=pendulum.get_fixed_point(),
stroke_width=2,
stroke_color=YELLOW,
stroke_opacity=0.5,
)
self.play(
self.theta_tracker.increment_value, d_theta,
ShowCreation(arc)
)
arcs.add(arc)
self.play(FadeOut(arcs))
def break_g_vect_into_components(self):
g_vect = self.g_vect
g_vect.component_lines = always_redraw(
g_vect.create_component_lines
)
tan_line, perp_line = g_vect.component_lines
g_vect.tangent = always_redraw(lambda: Arrow(
tan_line.get_start(),
tan_line.get_end(),
buff=0,
color=self.tan_line_color,
))
g_vect.perp = always_redraw(lambda: Arrow(
perp_line.get_start(),
perp_line.get_end(),
buff=0,
color=self.perp_line_color,
))
self.play(ShowCreation(g_vect.component_lines))
self.play(GrowArrow(g_vect.tangent))
self.wait()
self.play(GrowArrow(g_vect.perp))
self.wait()
def show_angle_geometry(self):
g_vect = self.g_vect
arc = Arc(
start_angle=90 * DEGREES,
angle=self.pendulum.get_theta(),
radius=0.5,
arc_center=g_vect.get_end(),
)
q_mark = TexMobject("?")
q_mark.next_to(arc.get_center(), UL, SMALL_BUFF)
theta_label = TexMobject("\\theta")
theta_label.move_to(q_mark)
self.add(g_vect)
self.play(
ShowCreation(arc),
Write(q_mark)
)
self.play(ShowCreationThenFadeAround(q_mark))
self.wait()
self.play(ShowCreationThenFadeAround(
self.pendulum.theta_label
))
self.play(
TransformFromCopy(
self.pendulum.theta_label,
theta_label,
),
FadeOut(q_mark)
)
self.wait()
self.play(WiggleOutThenIn(g_vect.tangent))
self.play(WiggleOutThenIn(
Line(
*g_vect.get_start_and_end(),
buff=0,
).add_tip().match_style(g_vect),
remover=True
))
self.wait()
self.play(
FadeOut(arc),
FadeOut(theta_label),
)
def show_gsin_formula(self):
g_vect = self.g_vect
g_word = self.g_word
g_word.clear_updaters()
g_term = self.g_term = TexMobject("-g")
g_term.add_updater(lambda m: m.next_to(
g_vect,
RIGHT if self.pendulum.get_theta() >= 0 else LEFT,
SMALL_BUFF
))
def create_vect_label(vect, tex, direction):
label = TexMobject(tex)
label.set_stroke(width=0, background=True)
label.add_background_rectangle()
label.scale(0.7)
max_width = 0.9 * vect.get_length()
if label.get_width() > max_width:
label.set_width(max_width)
angle = vect.get_angle()
angle = (angle + PI / 2) % PI - PI / 2
label.next_to(ORIGIN, direction, SMALL_BUFF)
label.rotate(angle, about_point=ORIGIN)
label.shift(vect.get_center())
return label
g_sin_label = always_redraw(lambda: create_vect_label(
g_vect.tangent, "-g\\sin(\\theta)", UP,
))
g_cos_label = always_redraw(lambda: create_vect_label(
g_vect.perp, "-g\\cos(\\theta)", DOWN,
))
self.play(
ReplacementTransform(g_word[0][0], g_term[0][1]),
FadeOut(g_word[0][1:]),
Write(g_term[0][0]),
)
self.add(g_term)
self.wait()
for label in g_sin_label, g_cos_label:
self.play(
GrowFromPoint(label[0], g_term.get_center()),
TransformFromCopy(g_term, label[1][:2]),
GrowFromPoint(label[1][2:], g_term.get_center()),
remover=True
)
self.add(label)
self.wait()
self.g_sin_label = g_sin_label
self.g_cos_label = g_cos_label
def show_sign(self):
get_theta = self.pendulum.get_theta
theta_decimal = DecimalNumber(include_sign=True)
theta_decimal.add_updater(lambda d: d.set_value(
get_theta()
))
theta_decimal.add_updater(lambda m: m.next_to(
self.pendulum.theta_label, DOWN
))
theta_decimal.add_updater(lambda m: m.set_color(
GREEN if get_theta() > 0 else RED
))
self.play(
FadeInFrom(theta_decimal, UP),
FadeOut(self.x_eq),
FadeOut(self.line_L),
)
self.set_theta(-60 * DEGREES, run_time=4)
self.set_theta(60 * DEGREES, run_time=4)
self.play(
FadeOut(theta_decimal),
FadeIn(self.x_eq),
)
def show_acceleration_formula(self):
x_eq = self.x_eq
g_sin_theta = self.g_sin_label
equation = TexMobject(
"a", "=",
"\\ddot", "x",
"=",
"-", "g", "\\sin\\big(", "\\theta", "\\big)",
)
equation.to_edge(LEFT)
second_deriv = equation[2:4]
x_part = equation.get_part_by_tex("x")
x_part.set_color(GREEN)
a_eq = equation[:2]
eq2 = equation.get_parts_by_tex("=")[1]
rhs = equation[5:]
second_deriv_L_form = TexMobject(
"L", "\\ddot", "\\theta"
)
second_deriv_L_form.move_to(second_deriv, DOWN)
eq3 = TexMobject("=")
eq3.rotate(90 * DEGREES)
eq3.next_to(second_deriv_L_form, UP)
g_L_frac = TexMobject(
"-", "{g", "\\over", "L}"
)
g_L_frac.move_to(rhs[:2], LEFT)
g_L_frac.shift(SMALL_BUFF * UP / 2)
mu_term = TexMobject(
"-\\mu", "\\dot", "\\theta",
)
mu_term.next_to(g_L_frac, LEFT)
mu_term.shift(SMALL_BUFF * UP / 2)
mu_brace = Brace(mu_term, UP)
mu_word = mu_brace.get_text("Air resistance")
for mob in equation, second_deriv_L_form, mu_term:
mob.set_color_by_tex("\\theta", BLUE)
self.play(
TransformFromCopy(x_eq[0], x_part),
Write(equation[:3]),
)
self.wait()
self.play(
Write(eq2),
TransformFromCopy(g_sin_theta, rhs)
)
self.wait()
#
self.show_acceleration_at_different_angles()
#
self.play(
FadeInFromDown(second_deriv_L_form),
Write(eq3),
second_deriv.next_to, eq3, UP,
a_eq.shift, SMALL_BUFF * LEFT,
eq2.shift, SMALL_BUFF * RIGHT,
rhs.shift, SMALL_BUFF * RIGHT,
)
self.wait()
self.wait()
self.play(
FadeOut(a_eq),
FadeOut(second_deriv),
FadeOut(eq3),
ReplacementTransform(
second_deriv_L_form.get_part_by_tex("L"),
g_L_frac.get_part_by_tex("L"),
),
ReplacementTransform(
equation.get_part_by_tex("-"),
g_L_frac.get_part_by_tex("-"),
),
ReplacementTransform(
equation.get_part_by_tex("g"),
g_L_frac.get_part_by_tex("g"),
),
Write(g_L_frac.get_part_by_tex("\\over")),
rhs[2:].next_to, g_L_frac, RIGHT, {"buff": SMALL_BUFF},
)
self.wait()
self.play(
GrowFromCenter(mu_term),
VGroup(eq2, second_deriv_L_form[1:]).next_to,
mu_term, LEFT,
)
self.play(
GrowFromCenter(mu_brace),
FadeInFromDown(mu_word),
)
def show_acceleration_at_different_angles(self):
to_fade = VGroup(
self.g_cos_label,
self.g_vect.perp,
)
new_comp_line_sytle = {
"stroke_width": 0.5,
"stroke_opacity": 0.25,
}
self.play(
FadeOut(self.x_eq),
to_fade.set_opacity, 0.25,
self.g_vect.component_lines.set_style,
new_comp_line_sytle
)
self.g_vect.component_lines.add_updater(
lambda m: m.set_style(**new_comp_line_sytle)
)
for mob in to_fade:
mob.add_updater(lambda m: m.set_opacity(0.25))
self.set_theta(0)
self.wait(2)
self.set_theta(89.9 * DEGREES, run_time=3)
self.wait(2)
self.set_theta(
60 * DEGREES,
FadeIn(self.x_eq),
run_time=2,
)
self.wait()
def ask_about_what_to_do(self):
g_vect = self.g_vect
g_sin_label = self.g_sin_label
angle = g_vect.tangent.get_angle()
angle = (angle - PI) % TAU
randy = You()
randy.to_corner(DL)
bubble = randy.get_bubble(
height=2,
width=3.5,
)
g_sin_copy = g_sin_label.copy()
g_sin_copy.remove(g_sin_copy[0])
g_sin_copy.generate_target()
g_sin_copy.target.scale(1 / 0.75)
g_sin_copy.target.rotate(-angle)
a_eq = TexMobject("a=")
thought_term = VGroup(a_eq, g_sin_copy.target)
thought_term.arrange(RIGHT, buff=SMALL_BUFF)
thought_term.move_to(bubble.get_bubble_center())
rect = SurroundingRectangle(g_sin_copy.target)
rect.rotate(angle)
rect.move_to(g_sin_label)
randy.save_state()
randy.fade(1)
self.play(randy.restore, randy.change, "pondering")
self.play(ShowCreationThenFadeOut(rect))
self.play(
ShowCreation(bubble),
Write(a_eq),
MoveToTarget(g_sin_copy),
randy.look_at, bubble,
)
thought_term.remove(g_sin_copy.target)
thought_term.add(g_sin_copy)
self.play(Blink(randy))
self.wait()
self.play(
ShowCreationThenDestruction(
thought_term.copy().set_style(
stroke_color=YELLOW,
stroke_width=2,
fill_opacity=0,
),
run_time=2,
lag_ratio=0.2,
),
randy.change, "confused", thought_term,
)
self.play(Blink(randy))
self.play(
FadeOut(randy),
FadeOut(bubble),
thought_term.next_to, self.pendulum, DOWN, LARGE_BUFF
)
self.accleration_equation = thought_term
def emphasize_theta(self):
pendulum = self.pendulum
self.play(FocusOn(pendulum.theta_label))
self.play(Indicate(pendulum.theta_label))
pendulum_copy = pendulum.deepcopy()
pendulum_copy.clear_updaters()
pendulum_copy.fade(1)
pendulum_copy.start_swinging()
def new_updater(p):
p.set_theta(pendulum_copy.get_theta())
pendulum.add_updater(new_updater)
self.add(pendulum_copy)
self.wait(5)
pendulum_copy.end_swinging()
self.remove(pendulum_copy)
pendulum.remove_updater(new_updater)
self.update_mobjects(0)
def show_angular_velocity(self):
pass
def show_angular_acceleration(self):
pass
def circle_g_sin_formula(self):
self.play(
ShowCreationThenFadeAround(
self.accleration_equation
)
)
#
def set_theta(self, value, *added_anims, **kwargs):
kwargs["run_time"] = kwargs.get("run_time", 2)
self.play(
self.theta_tracker.set_value, value,
*added_anims,
**kwargs,
)
class BuildUpEquation(Scene):
CONFIG = {
"tex_config": {
"tex_to_color_map": {
"{a}": YELLOW,
"{v}": RED,
"{x}": GREEN,
"\\theta": BLUE,
"{L}": WHITE,
}
}
}
def construct(self):
# self.add_center_line()
self.show_derivatives()
self.show_theta_double_dot_equation()
self.talk_about_sine_component()
self.add_air_resistance()
def add_center_line(self):
line = Line(UP, DOWN)
line.set_height(FRAME_HEIGHT)
line.set_stroke(WHITE, 1)
self.add(line)
def show_derivatives(self):
a_eq = TexMobject(
"{a}", "=", "{d{v} \\over dt}",
**self.tex_config,
)
v_eq = TexMobject(
"{v}", "=", "{d{x} \\over dt}",
**self.tex_config,
)
x_eq = TexMobject(
"{x} = {L} \\theta",
**self.tex_config,
)
eqs = VGroup(a_eq, v_eq, x_eq)
eqs.arrange(DOWN, buff=LARGE_BUFF)
eqs.to_corner(UL)
v_rhs = TexMobject(
"={L}{d\\theta \\over dt}",
"=", "{L}\\dot{\\theta}",
**self.tex_config,
)
v_rhs.next_to(v_eq, RIGHT, SMALL_BUFF)
v_rhs.shift(
UP * (v_eq[1].get_bottom()[1] - v_rhs[0].get_bottom()[1])
)
a_rhs = TexMobject(
"={L}{d", "\\dot{\\theta}", "\\over dt}",
"=", "{L}\\ddot{\\theta}",
**self.tex_config,
)
a_rhs.next_to(a_eq, RIGHT, SMALL_BUFF)
a_rhs.shift(
UP * (a_eq[1].get_bottom()[1] - a_rhs[0].get_bottom()[1])
)
# a_eq
self.play(Write(a_eq))
self.wait()
# v_eq
self.play(
TransformFromCopy(
a_eq.get_part_by_tex("{v}"),
v_eq.get_part_by_tex("{v}"),
)
)
self.play(TransformFromCopy(v_eq[:1], v_eq[1:]))
self.wait()
# x_eq
self.play(
TransformFromCopy(
v_eq.get_part_by_tex("{x}"),
x_eq.get_part_by_tex("{x}"),
)
)
self.play(Write(x_eq[1:]))
self.wait()
for tex in "L", "\\theta":
self.play(ShowCreationThenFadeAround(
x_eq.get_part_by_tex(tex)
))
self.wait()
# v_rhs
self.play(*[
TransformFromCopy(
x_eq.get_part_by_tex(tex),
v_rhs.get_part_by_tex(tex),
)
for tex in ("=", "{L}", "\\theta")
])
self.play(
TransformFromCopy(v_eq[-3], v_rhs[2]),
TransformFromCopy(v_eq[-1], v_rhs[4]),
)
self.wait()
self.play(
Write(v_rhs[-5]),
TransformFromCopy(*v_rhs.get_parts_by_tex("{L}")),
TransformFromCopy(v_rhs[3:4], v_rhs[-3:])
)
self.wait()
self.play(ShowCreationThenFadeAround(v_rhs[2:4]))
self.play(ShowCreationThenFadeAround(v_rhs[4]))
self.wait()
# a_rhs
self.play(*[
TransformFromCopy(
v_rhs.get_parts_by_tex(tex)[-1],
a_rhs.get_part_by_tex(tex),
)
for tex in ("=", "{L}", "\\theta", "\\dot")
])
self.play(
TransformFromCopy(a_eq[-3], a_rhs[2]),
TransformFromCopy(a_eq[-1], a_rhs[6]),
)
self.wait()
self.play(
Write(a_rhs[-5]),
TransformFromCopy(*a_rhs.get_parts_by_tex("{L}")),
TransformFromCopy(a_rhs[3:4], a_rhs[-3:]),
)
self.wait()
self.equations = VGroup(
a_eq, v_eq, x_eq,
v_rhs, a_rhs,
)
def show_theta_double_dot_equation(self):
equations = self.equations
a_deriv = equations[0]
a_rhs = equations[-1][-5:].copy()
shift_vect = 1.5 * DOWN
equals = TexMobject("=")
equals.rotate(90 * DEGREES)
equals.next_to(a_deriv[0], UP, MED_LARGE_BUFF)
g_sin_eq = TexMobject(
"-", "g", "\\sin", "(", "\\theta", ")",
**self.tex_config,
)
g_sin_eq.next_to(
equals, UP,
buff=MED_LARGE_BUFF,
aligned_edge=LEFT,
)
g_sin_eq.to_edge(LEFT)
g_sin_eq.shift(shift_vect)
shift_vect += (
g_sin_eq[1].get_center() -
a_deriv[0].get_center()
)[0] * RIGHT
equals.shift(shift_vect)
a_rhs.shift(shift_vect)
self.play(
equations.shift, shift_vect,
Write(equals),
GrowFromPoint(
g_sin_eq, 2 * RIGHT + 3 * DOWN
)
)
self.wait()
self.play(
a_rhs.next_to, g_sin_eq, RIGHT,
a_rhs.shift, SMALL_BUFF * UP,
)
self.wait()
# Fade equations
self.play(
FadeOut(equals),
equations.shift, DOWN,
equations.fade, 0.5,
)
# Rotate sides
equals, L, ddot, theta, junk = a_rhs
L_dd_theta = VGroup(L, ddot, theta)
minus, g, sin, lp, theta2, rp = g_sin_eq
m2, g2, over, L2 = frac = TexMobject("-", "{g", "\\over", "L}")
frac.next_to(equals, RIGHT)
self.play(
L_dd_theta.next_to, equals, LEFT,
L_dd_theta.shift, SMALL_BUFF * UP,
g_sin_eq.next_to, equals, RIGHT,
path_arc=PI / 2,
)
self.play(
ReplacementTransform(g, g2),
ReplacementTransform(minus, m2),
ReplacementTransform(L, L2),
Write(over),
g_sin_eq[2:].next_to, over, RIGHT, SMALL_BUFF,
)
self.wait()
# Surround
rect = SurroundingRectangle(VGroup(g_sin_eq, frac, ddot))
rect.stretch(1.1, 0)
dashed_rect = DashedVMobject(
rect, num_dashes=50, positive_space_ratio=1,
)
dashed_rect.shuffle()
dashed_rect.save_state()
dashed_rect.space_out_submobjects(1.1)
for piece in dashed_rect:
piece.rotate(90 * DEGREES)
dashed_rect.fade(1)
self.play(Restore(dashed_rect, lag_ratio=0.05))
dashed_rect.generate_target()
dashed_rect.target.space_out_submobjects(0.9)
dashed_rect.target.fade(1)
for piece in dashed_rect.target:
piece.rotate(90 * DEGREES)
self.play(MoveToTarget(
dashed_rect,
lag_ratio=0.05,
remover=True
))
self.wait()
self.main_equation = VGroup(
ddot, theta, equals,
m2, L2, over, g2,
sin, lp, theta2, rp,
)
def talk_about_sine_component(self):
main_equation = self.main_equation
gL_part = main_equation[4:7]
sin_part = main_equation[7:]
sin = sin_part[0]
morty = Mortimer(height=1.5)
morty.next_to(sin, DR, buff=LARGE_BUFF)
morty.add_updater(lambda m: m.look_at(sin))
self.play(ShowCreationThenFadeAround(gL_part))
self.wait()
self.play(ShowCreationThenFadeAround(sin_part))
self.wait()
self.play(FadeIn(morty))
sin.save_state()
self.play(
morty.change, "angry",
sin.next_to, morty, LEFT, {"aligned_edge": UP},
)
self.play(Blink(morty))
morty.clear_updaters()
self.play(
morty.change, "concerned_musician",
morty.look, DR,
)
self.play(Restore(sin))
self.play(FadeOut(morty))
self.wait()
# Emphasize theta as input
theta = sin_part[2]
arrow = Vector(0.5 * UP, color=WHITE)
arrow.next_to(theta, DOWN, SMALL_BUFF)
word = TextMobject("Input")
word.next_to(arrow, DOWN)
self.play(
FadeInFrom(word, UP),
GrowArrow(arrow)
)
self.play(
ShowCreationThenDestruction(
theta.copy().set_style(
fill_opacity=0,
stroke_width=2,
stroke_color=YELLOW,
),
lag_ratio=0.1,
)
)
self.play(FadeOut(arrow), FadeOut(word))
def add_air_resistance(self):
main_equation = self.main_equation
tdd_eq = main_equation[:3]
rhs = main_equation[3:]
new_term = TexMobject(
"-", "\\mu", "\\dot{", "\\theta}",
)
new_term.set_color_by_tex("\\theta", BLUE)
new_term.move_to(main_equation)
new_term.shift(0.5 * SMALL_BUFF * UP)
new_term[0].align_to(rhs[0], UP)
brace = Brace(new_term, DOWN)
words = brace.get_text("Air resistance")
self.play(
FadeInFromDown(new_term),
tdd_eq.next_to, new_term, LEFT,
tdd_eq.align_to, tdd_eq, UP,
rhs.next_to, new_term, RIGHT,
rhs.align_to, rhs, UP,
)
self.play(
GrowFromCenter(brace),
Write(words)
)
self.wait()
class SimpleDampenedPendulum(Scene):
def construct(self):
pendulum = Pendulum(
top_point=ORIGIN,
initial_theta=150 * DEGREES,
mu=0.5,
)
self.add(pendulum)
pendulum.start_swinging()
self.wait(20)
class NewSceneName(Scene):
def construct(self):
pass
| 29.242744
| 73
| 0.531625
|
a56e2c67e428411ddde467b6e6ea0c3c8e69d1e3
| 2,580
|
py
|
Python
|
(35)Search_Insert_Position.py
|
ElnuraMusaoglu/LeetCodeQuestions
|
ce13a2a790d77d1f17f7874beebe3c78d6064cc2
|
[
"MIT"
] | null | null | null |
(35)Search_Insert_Position.py
|
ElnuraMusaoglu/LeetCodeQuestions
|
ce13a2a790d77d1f17f7874beebe3c78d6064cc2
|
[
"MIT"
] | null | null | null |
(35)Search_Insert_Position.py
|
ElnuraMusaoglu/LeetCodeQuestions
|
ce13a2a790d77d1f17f7874beebe3c78d6064cc2
|
[
"MIT"
] | null | null | null |
'''
Given a sorted array of distinct integers and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
You must write an algorithm with O(log n) runtime complexity.
Example 1:
Input: nums = [1,3,5,6], target = 5
Output: 2
Example 2:
Input: nums = [1,3,5,6], target = 2
Output: 1
Example 3:
Input: nums = [1,3,5,6], target = 7
Output: 4
Example 4:
Input: nums = [1,3,5,6], target = 0
Output: 0
Example 5:
Input: nums = [1], target = 0
Output: 0
Constraints:
1 <= nums.length <= 104
-104 <= nums[i] <= 104
nums contains distinct values sorted in ascending order.
-104 <= target <= 104
Runtime: 52 ms, faster than 46.38% of Python3 online submissions for Search Insert Position.
Memory Usage: 15.1 MB, less than 52.45% of Python3 online submissions for Search Insert Position.
'''
from typing import List
class Solution:
def searchInsert(self, nums, target):
left = 0
right = int(len(nums)) - 1
while left <= right:
middle = (left + right) // 2
if target == nums[middle]:
return middle
elif target < nums[middle]: # Search in left side
right = middle - 1
else: # Search in right side
left = middle + 1
return left
def searchInsert2(self, nums: List[int], target: int) -> int:
# check for constraint first and last situation
if len(nums) == 0:
return 0
if target > nums[int(len(nums))-1]:
return int(len(nums))
if len(nums) == 1:
return 0 if target <= nums[0] else 1
mid = int(len(nums) / 2)
if target >= nums[mid]:
start = mid
finish = int(len(nums))
else:
start = 0
finish = mid
for i in range(start, finish):
if nums[i] >= target:
return i
else:
result = finish
return result
print(Solution().searchInsert([1, 3], 2)) # 1
print(Solution().searchInsert([1, 3], 1)) # 0
print(Solution().searchInsert([1, 3, 5, 6], 5)) # 2
print(Solution().searchInsert([1, 3, 5, 6], 2)) # 1
print(Solution().searchInsert([1, 3, 5], 4)) # 2
print(Solution().searchInsert([1, 3, 5, 6], 7)) # 4
print(Solution().searchInsert([1, 3, 5, 6], 0)) # 0
print(Solution().searchInsert([1], 0)) # 0
print(Solution().searchInsert([1], 2)) # 1
| 28.351648
| 175
| 0.551163
|
5ea10a37e7917087a99abf9e1efa44eb9332fbaf
| 3,095
|
py
|
Python
|
tests/test_client.py
|
BruceNUAA/mqttools
|
b3c4bb9db4a42eea74f90f343ee4ee7f6f932d12
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
BruceNUAA/mqttools
|
b3c4bb9db4a42eea74f90f343ee4ee7f6f932d12
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
BruceNUAA/mqttools
|
b3c4bb9db4a42eea74f90f343ee4ee7f6f932d12
|
[
"MIT"
] | null | null | null |
import logging
import asyncio
import unittest
import mqttools
async def broker_main(listener):
async with listener:
try:
await listener.serve_forever()
except asyncio.CancelledError:
pass
class ClientTest(unittest.TestCase):
def test_connack_timeout(self):
asyncio.run(self.connack_timeout())
async def connack_timeout(self):
def on_client_connected(reader, writer):
pass
listener = await asyncio.start_server(on_client_connected, 'localhost', 0)
async def client_main():
client = mqttools.Client(*listener.sockets[0].getsockname(),
'connack',
response_timeout=0.1,
connect_delays=[],
topic_alias_maximum=0)
with self.assertRaises(mqttools.TimeoutError):
await client.start()
listener.close()
await asyncio.wait_for(
asyncio.gather(broker_main(listener), client_main()), 1)
def test_subscribe_timeout(self):
asyncio.run(self.subscribe_timeout())
async def subscribe_timeout(self):
def on_client_connected(reader, writer):
# CONNACK
writer.write(b'\x20\x03\x00\x00\x00')
listener = await asyncio.start_server(on_client_connected, 'localhost', 0)
async def client_main():
client = mqttools.Client(*listener.sockets[0].getsockname(),
'suback',
response_timeout=0.1,
topic_alias_maximum=0)
await client.start()
with self.assertRaises(mqttools.TimeoutError):
await client.subscribe('/foo')
listener.close()
await asyncio.wait_for(
asyncio.gather(broker_main(listener), client_main()), 1)
def test_unsubscribe_timeout(self):
asyncio.run(self.unsubscribe_timeout())
async def unsubscribe_timeout(self):
def on_client_connected(reader, writer):
# CONNACK
writer.write(b'\x20\x03\x00\x00\x00')
listener = await asyncio.start_server(on_client_connected, 'localhost', 0)
async def client_main():
client = mqttools.Client(*listener.sockets[0].getsockname(),
'unsuback',
response_timeout=0.1,
topic_alias_maximum=0)
await client.start()
with self.assertRaises(mqttools.TimeoutError):
await client.unsubscribe('/foo')
listener.close()
await asyncio.wait_for(
asyncio.gather(broker_main(listener), client_main()), 1)
def test_client_id(self):
client = mqttools.Client('localhost', 0)
self.assertEqual(client.client_id[:9], 'mqttools-')
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
unittest.main()
| 30.048544
| 82
| 0.569305
|
fe0299a57ca97e2898525ea9c5fc1192719a183d
| 965
|
py
|
Python
|
web/prepare/dump_kv.py
|
Ahacad/YYeTsBot
|
423d7d29f248e9f181378028dafd23903816c887
|
[
"MIT"
] | 5
|
2021-03-04T03:44:50.000Z
|
2021-08-09T01:19:09.000Z
|
web/prepare/dump_kv.py
|
sekkit/YYeTsBot
|
bc7096f5c2371f0654c20188c6b0ecfb662b2e22
|
[
"MIT"
] | 17
|
2021-03-07T03:08:18.000Z
|
2021-05-31T09:14:34.000Z
|
web/prepare/dump_kv.py
|
sekkit/YYeTsBot
|
bc7096f5c2371f0654c20188c6b0ecfb662b2e22
|
[
"MIT"
] | 3
|
2021-06-19T11:03:14.000Z
|
2021-09-07T16:15:06.000Z
|
#!/usr/local/bin/python3
# coding: utf-8
# YYeTsBot - dump_kv.py
# 2/6/21 18:12
#
__author__ = "Benny <benny.think@gmail.com>"
import threading
import requests
import json
from concurrent.futures.thread import ThreadPoolExecutor
s = requests.Session()
with open("index.json", ) as f:
ids = json.load(f)
chunk = [ids[x:x + 3000] for x in range(0, len(ids), 3000)]
def download(c):
print("running batch ", c[0])
for i in c:
data = s.get("https://yyets.dmesg.app/id={}".format(i)).json()
with open(f"{i}.json", "w") as f:
json.dump(data, f)
if __name__ == '__main__':
threads = []
for part in chunk:
# Create 9 threads counting 10-19, 20-29, ... 90-99.
thread = threading.Thread(target=download, args=(part,))
threads.append(thread)
# Start them all
for thread in threads:
thread.start()
# Wait for all to complete
for thread in threads:
thread.join()
| 21.444444
| 70
| 0.613472
|
7db267904f8a5da0de76922c73db45bfdca60b1d
| 572
|
py
|
Python
|
setup.py
|
LairdCP/weblcm-python
|
ccaa554391ad4f993e3c01929bff252ca48991fb
|
[
"0BSD"
] | null | null | null |
setup.py
|
LairdCP/weblcm-python
|
ccaa554391ad4f993e3c01929bff252ca48991fb
|
[
"0BSD"
] | null | null | null |
setup.py
|
LairdCP/weblcm-python
|
ccaa554391ad4f993e3c01929bff252ca48991fb
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/python
from setuptools import setup
import os
environment_variable_value = os.environ.get('WEBLCM_PYTHON_EXTRA_MODULES', '')
if len(environment_variable_value) > 0:
extra_modules = [s.strip() for s in environment_variable_value.split()]
else:
extra_modules = []
setup(
name='weblcm-python',
version='1.0',
py_modules=[
'__main__', 'weblcm_network', 'weblcm_log', 'weblcm_def', 'weblcm_swupdate',
'weblcm_users', 'weblcm_files', 'weblcm_advanced', 'weblcm_network_status',
'weblcm_settings', 'weblcm_datetime', 'weblcm_modem'
] + extra_modules
)
| 26
| 78
| 0.746503
|
5dbe4bb26e1e79f3dbab3aee373b09c484981c8e
| 3,624
|
py
|
Python
|
aii/components/design.py
|
Tranquangdai/Auto-Image-Improvements
|
a5e5b5a57f76b1c95c83fbf7650e2eb40325e6cc
|
[
"MIT"
] | 2
|
2020-08-03T02:36:14.000Z
|
2020-08-03T03:09:48.000Z
|
aii/components/design.py
|
Tranquangdai/Auto-Image-Improvements
|
a5e5b5a57f76b1c95c83fbf7650e2eb40325e6cc
|
[
"MIT"
] | null | null | null |
aii/components/design.py
|
Tranquangdai/Auto-Image-Improvements
|
a5e5b5a57f76b1c95c83fbf7650e2eb40325e6cc
|
[
"MIT"
] | null | null | null |
import json
import os
import uuid
from os.path import exists
import numpy as np
from PIL import Image
from tqdm import trange
from aii.utils import array_to_PIL
from .background import BackGround
from .collect import PatchCollect, SignCollect
from .segment import Segment
class Designer:
def __init__(self, cfg):
self.pac = PatchCollect(cfg['patch'])
self.sc = SignCollect(cfg['sign'])
self.lgc = SignCollect(cfg['logo'])
self.pl = BackGround(cfg['background'])
self.sg = Segment(cfg['product'])
self._init_container()
def _init_container(self):
self.images = []
self.annotations = []
self.dataset = {
'images': self.images,
'categories': [
{
"supercategory": "product",
"id": 1,
"name": "product"
}
],
'annotations': self.annotations,
}
@staticmethod
def convert_xyxy_to_xywh(box):
x_min, y_min, x_max, y_max = box
return [x_min, y_min, x_max - x_min, y_max - y_min]
def from_polygon(self, polygons):
tmp = dict()
tmp['segmentation'] = [(p.flatten() + 0.5).tolist()
for p in polygons.points]
tmp['iscrowd'] = 0
tmp['category_id'] = 1
tmp['id'] = 1
tmp['area'] = self.compute_area(polygons)
tmp['bbox'] = self.convert_xyxy_to_xywh(
[float(i) for i in polygons.bbox()])
tmp['bbox_mode'] = 0
return tmp
def draw(self):
try:
self.pl.place_banner(self.pac.sample_collect())
self.pl.place_sign(self.sc.sample_collect())
self.pl.place_sign(self.lgc.sample_collect())
bg = np.array(self.pl.bg)
img, polygons = self.sg.sample_collect(bg)
return img, polygons
except:
return None, None
def generate(self, dirname, steps=10):
if not exists(dirname):
os.makedirs(dirname)
counter = 0
for _ in trange(steps):
img, polygon = self.draw()
if img is not None:
polygon_info = self.from_polygon(polygon)
image_info = self.gen_image_info(dirname)
polygon_info['image_id'] = image_info['id']
polygon_info['id'] = counter
counter += 1
array_to_PIL(img).convert('RGB').save(
'{}/{}'.format(dirname, image_info['file_name']))
self.images.append(image_info)
self.annotations.append(polygon_info)
@staticmethod
def gen_image_info(dirname):
id_ = str(uuid.uuid4().int)[:20]
path = '{}.jpg'.format(id_)
return {
'height': 1000,
'width': 1000,
'id': id_,
'file_name': path,
}
def save(self, dirname):
if not exists(dirname):
os.makedirs(dirname)
rand_id = str(uuid.uuid4().int)[:20]
path = '{}/{}.jpg'.format(dirname, rand_id)
self.pl.bg.convert('RGB').save(path)
self.annot.append((path, [self.pl.bbox]))
@staticmethod
def compute_area(polygons):
contour = np.array(polygons.points[0])
x = contour[:, 0]
y = contour[:, 1]
area = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) -
np.dot(y, np.roll(x, 1)))
return area
def dump_annotations(self, filename):
with open(filename, 'w') as f:
json.dump(self.dataset, f)
| 29.950413
| 69
| 0.533113
|
9124f80a8559562f017e96a59233249fcb32c253
| 13,976
|
py
|
Python
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/commands/user.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 96
|
2021-06-16T09:06:52.000Z
|
2022-03-26T09:56:32.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/commands/user.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 16
|
2021-07-01T05:34:48.000Z
|
2022-03-28T09:40:15.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/commands/user.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 24
|
2021-06-19T15:58:31.000Z
|
2022-03-14T09:17:19.000Z
|
import os
import subprocess
import sys
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from requests.exceptions import HTTPError
from transformers.commands import BaseTransformersCLICommand
from transformers.hf_api import HfApi, HfFolder
UPLOAD_MAX_FILES = 15
class UserCommands(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# s3_datasets (s3-based system)
s3_parser = parser.add_parser(
"s3_datasets", help="{ls, rm} Commands to interact with the files you upload on S3."
)
s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
ls_parser = s3_subparsers.add_parser("ls")
ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))
rm_parser = s3_subparsers.add_parser("rm")
rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
rm_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))
upload_parser = s3_subparsers.add_parser("upload", help="Upload a file to S3.")
upload_parser.add_argument("path", type=str, help="Local path of the folder or individual file to upload.")
upload_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
upload_parser.add_argument(
"--filename", type=str, default=None, help="Optional: override individual object filename on S3."
)
upload_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
upload_parser.set_defaults(func=lambda args: UploadCommand(args))
# deprecated model upload
upload_parser = parser.add_parser(
"upload",
help=(
"Deprecated: used to be the way to upload a model to S3."
" We now use a git-based system for storing models and other artifacts."
" Use the `repo create` command instead."
),
)
upload_parser.set_defaults(func=lambda args: DeprecatedUploadCommand(args))
# new system: git-based repo system
repo_parser = parser.add_parser(
"repo", help="{create, ls-files} Commands to interact with your huggingface.co repos."
)
repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")
ls_parser = repo_subparsers.add_parser("ls-files", help="List all your files on huggingface.co")
ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))
repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")
repo_create_parser.add_argument(
"name",
type=str,
help="Name for your model's repo. Will be namespaced under your username to build the model id.",
)
repo_create_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
repo_create_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_red = "\u001b[31m"
_gray = "\u001b[90m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return "{}{}{}".format(cls._bold, s, cls._reset)
@classmethod
def red(cls, s):
return "{}{}{}".format(cls._bold + cls._red, s, cls._reset)
@classmethod
def gray(cls, s):
return "{}{}{}".format(cls._gray, s, cls._reset)
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
- stackoverflow.com/a/8356620/593036
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print( # docstyle-ignore
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
"""
)
username = input("Username: ")
password = getpass()
try:
token = self._api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
print(ANSI.red(e.response.text))
exit(1)
HfFolder.save_token(token)
print("Login successful")
print("Your token:", token, "\n")
print("Your token has been saved to", HfFolder.path_token)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
user, orgs = self._api.whoami(token)
print(user)
if orgs:
print(ANSI.bold("orgs: "), ",".join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
self._api.logout(token)
print("Successfully logged out.")
class ListObjsCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
objs = self._api.list_objs(token, organization=self.args.organization)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
if len(objs) == 0:
print("No shared file yet")
exit()
rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]
print(tabulate(rows, headers=["Filename", "LastModified", "ETag", "Size"]))
class DeleteObjCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
self._api.delete_obj(token, filename=self.args.filename, organization=self.args.organization)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("Done")
class ListReposObjsCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
objs = self._api.list_repos_objs(token, organization=self.args.organization)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
if len(objs) == 0:
print("No shared file yet")
exit()
rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]
print(tabulate(rows, headers=["Filename", "LastModified", "Commit-Sha", "Size"]))
class RepoCreateCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print("Looks like you do not have git installed, please install.")
try:
stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print(
ANSI.red(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
)
print("")
user, _ = self._api.whoami(token)
namespace = self.args.organization if self.args.organization is not None else user
print("You are about to create {}".format(ANSI.bold(namespace + "/" + self.args.name)))
if not self.args.yes:
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
try:
url = self._api.create_repo(token, name=self.args.name, organization=self.args.organization)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("\nYour repo now lives at:")
print(" {}".format(ANSI.bold(url)))
print("\nYou can clone it locally with the command below," " and commit/push as usual.")
print(f"\n git clone {url}")
print("")
class DeprecatedUploadCommand(BaseUserCommand):
def run(self):
print(
ANSI.red(
"Deprecated: used to be the way to upload a model to S3."
" We now use a git-based system for storing models and other artifacts."
" Use the `repo create` command instead."
)
)
exit(1)
class UploadCommand(BaseUserCommand):
def walk_dir(self, rel_path):
"""
Recursively list all files in a folder.
"""
entries: List[os.DirEntry] = list(os.scandir(rel_path))
files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)
for f in entries:
if f.is_dir():
files += self.walk_dir(f.path)
return files
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
local_path = os.path.abspath(self.args.path)
if os.path.isdir(local_path):
if self.args.filename is not None:
raise ValueError("Cannot specify a filename override when uploading a folder.")
rel_path = os.path.basename(local_path)
files = self.walk_dir(rel_path)
elif os.path.isfile(local_path):
filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)
files = [(local_path, filename)]
else:
raise ValueError("Not a valid file or directory: {}".format(local_path))
if sys.platform == "win32":
files = [(filepath, filename.replace(os.sep, "/")) for filepath, filename in files]
if len(files) > UPLOAD_MAX_FILES:
print(
"About to upload {} files to S3. This is probably wrong. Please filter files before uploading.".format(
ANSI.bold(len(files))
)
)
exit(1)
user, _ = self._api.whoami(token)
namespace = self.args.organization if self.args.organization is not None else user
for filepath, filename in files:
print(
"About to upload file {} to S3 under filename {} and namespace {}".format(
ANSI.bold(filepath), ANSI.bold(filename), ANSI.bold(namespace)
)
)
if not self.args.yes:
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
print(ANSI.bold("Uploading... This might take a while if files are large"))
for filepath, filename in files:
try:
access_url = self._api.presign_and_upload(
token=token, filename=filename, filepath=filepath, organization=self.args.organization
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("Your file now lives at:")
print(access_url)
| 39.258427
| 120
| 0.58028
|
5806442cfbe53423fba65e091345627971617fb5
| 42
|
py
|
Python
|
je_auto_control/utils/image/__init__.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | 9
|
2020-10-12T06:33:36.000Z
|
2021-09-13T07:07:36.000Z
|
je_auto_control/utils/image/__init__.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | null | null | null |
je_auto_control/utils/image/__init__.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | null | null | null |
from je_auto_control.utils.image import *
| 21
| 41
| 0.833333
|
e3509efd6eb45f4fd020c24bc56279513b6f8865
| 5,563
|
py
|
Python
|
configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_retrograde.py
|
vampireacen/NonMotor
|
8a144793ffc0211494b86aefe6201a7466a5c03f
|
[
"Apache-2.0"
] | null | null | null |
configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_retrograde.py
|
vampireacen/NonMotor
|
8a144793ffc0211494b86aefe6201a7466a5c03f
|
[
"Apache-2.0"
] | null | null | null |
configs/swin/cascade_mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_retrograde.py
|
vampireacen/NonMotor
|
8a144793ffc0211494b86aefe6201a7466a5c03f
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/cascade_mask_rcnn_swin_fpn_retrograde.py',
'../_base_/datasets/coco_detection_retrograde.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime_3.py'
]
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.2,
patch_norm=True,
use_checkpoint=False
),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(
bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=3,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=3,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=3,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), # , 'gt_masks'
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 39.453901
| 101
| 0.495236
|
560c9cfd4156364d3b939f914567ea0a6c8eda31
| 4,819
|
py
|
Python
|
gmocoin/public/api.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 1
|
2021-05-20T01:34:28.000Z
|
2021-05-20T01:34:28.000Z
|
gmocoin/public/api.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 44
|
2020-11-15T01:17:38.000Z
|
2021-07-20T13:45:12.000Z
|
gmocoin/public/api.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 1
|
2021-04-27T12:12:47.000Z
|
2021-04-27T12:12:47.000Z
|
#!python3
import requests
import json
from datetime import datetime, date, timedelta
import pandas as pd
from ..common.annotation import post_request
from ..common.const import GMOConst
from ..common.logging import get_logger, log
from ..common.dto import Status
from .dto import GetStatusResSchema, GetStatusRes, \
GetTickerResSchema, GetTickerRes, Symbol , \
GetOrderBooksResSchema, GetOrderBooksRes, \
GetTradesResSchema, GetTradesRes
logger = get_logger()
class Client:
'''
GMOCoinのパブリックAPIクライアントクラスです。
'''
@log(logger)
@post_request(GetStatusResSchema)
def get_status(self) -> GetStatusRes:
"""
取引所の稼動状態を取得します。
Args:
なし
Returns:
GetStatusRes
"""
ret = requests.get(GMOConst.END_POINT_PUBLIC + 'status')
res_json = ret.json()
if res_json['status'] == 5 and res_json['messages'][0]['message_code'] == 'ERR-5201':
# メンテナンス中の場合、メンテナンスレスポンスを返却
# {'status': 5, 'messages': [{'message_code': 'ERR-5201', 'message_string': 'MAINTENANCE. Please wait for a while'}]}
return GetStatusRes(status=0, responsetime=datetime.now(),
data=GetStatusData(status=Status.MAINTENANCE))
return ret
@log(logger)
@post_request(GetTickerResSchema)
def get_ticker(self, symbol:Symbol = None) -> GetTickerRes:
"""
指定した銘柄の最新レートを取得します。
全銘柄分の最新レートを取得する場合はsymbolパラメータ指定無しでの実行をおすすめします。
Args:
symbol:
指定しない場合は全銘柄分の最新レートを返す。
BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
Returns:
GetTickerRes
"""
if symbol is None:
return requests.get(GMOConst.END_POINT_PUBLIC + f'ticker')
else:
return requests.get(GMOConst.END_POINT_PUBLIC + f'ticker?symbol={symbol.value}')
@log(logger)
@post_request(GetOrderBooksResSchema)
def get_orderbooks(self, symbol:Symbol) -> GetOrderBooksRes:
"""
指定した銘柄の板情報(snapshot)を取得します。
Args:
symbol:
BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
Returns:
GetOrderBooksRes
"""
return requests.get(GMOConst.END_POINT_PUBLIC + f'orderbooks?symbol={symbol.value}')
@log(logger)
@post_request(GetTradesResSchema)
def get_trades(self, symbol:Symbol, page:int=1, count:int=100) -> GetTradesRes:
"""
指定した銘柄の板情報(snapshot)を取得します。
Args:
symbol:
BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
page:
取得対象ページ
指定しない場合は1を指定したとして動作する。
count:
1ページ当りの取得件数
指定しない場合は100(最大値)を指定したとして動作する。
Returns:
GetTradesRes
"""
return requests.get(GMOConst.END_POINT_PUBLIC + f'trades?symbol={symbol.value}&page={page}&count={count}')
@log(logger)
def get_historical_data(self, symbol:Symbol, page:int=1, count:int=100) -> GetTradesRes:
"""
指定した銘柄の板情報(snapshot)を取得します。
Args:
symbol:
BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
page:
取得対象ページ
指定しない場合は1を指定したとして動作する。
count:
1ページ当りの取得件数
指定しない場合は100(最大値)を指定したとして動作する。
Returns:
GetTradesRes
"""
return requests.get(GMOConst.END_POINT_PUBLIC + f'trades?symbol={symbol.value}&page={page}&count={count}')
@log(logger)
def get_historical_data(self, symbol:Symbol, past_days: int, base_date:date = None) -> pd.DataFrame:
"""
指定した銘柄の過去取引情報を取得します。
Args:
symbol:
BTC ETH BCH LTC XRP BTC_JPY ETH_JPY BCH_JPY LTC_JPY XRP_JPY
past_days:
過去期間日
base_date-past_days ~ base_dateのデータを取得する。
base_date:
過去基準日
指定しない場合は現在日を指定したとして動作する。
Returns:
DataFrame
"""
if base_date == None:
base_date = date.today()
start_date = base_date - timedelta(days=past_days)
#差分を使って、スタートの日から現在まで一日ずつ足していく
url_list = []
for d in range(past_days):
day=start_date + timedelta(days=d)
# print(day)
url = f'https://api.coin.z.com/data/trades/{symbol.value}/{day.year}/{day.month:02}/{day.year}{day.month:02}{day.day:02}_{symbol.value}.csv.gz'
# MEMO: 土日は更新されないようなので、存在する日付だけlistに追加する
if requests.get(url).status_code == 200:
url_list.append(url)
return pd.concat([pd.read_csv(url) for url in url_list], axis=0, sort=True)
| 30.5
| 155
| 0.591409
|
9c039e3aad80f998fb7bf057071263ff29ad236a
| 43,902
|
py
|
Python
|
backend/venv/Lib/site-packages/sphinx/builders/html.py
|
analurandis/Tur
|
b4b5d1230d70659be0c3f477f0baea68fc46ba39
|
[
"MIT"
] | 1
|
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
backend/venv/Lib/site-packages/sphinx/builders/html.py
|
analurandis/Tur
|
b4b5d1230d70659be0c3f477f0baea68fc46ba39
|
[
"MIT"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
backend/venv/Lib/site-packages/sphinx/builders/html.py
|
analurandis/Tur
|
b4b5d1230d70659be0c3f477f0baea68fc46ba39
|
[
"MIT"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.html
~~~~~~~~~~~~~~~~~~~~
Several HTML builders.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import zlib
import codecs
import posixpath
import cPickle as pickle
from os import path
try:
from hashlib import md5
except ImportError:
# 2.4 compatibility
from md5 import md5
from docutils import nodes
from docutils.io import DocTreeInput, StringOutput
from docutils.core import Publisher
from docutils.utils import new_document
from docutils.frontend import OptionParser
from docutils.readers.doctree import Reader as DoctreeReader
from sphinx import package_dir, __version__
from sphinx.util import jsonimpl, copy_static_entry
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
from sphinx.util.pycompat import any, b
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.search import js_index
from sphinx.theming import Theme
from sphinx.builders import Builder
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.highlighting import PygmentsBridge
from sphinx.util.console import bold, darkgreen, brown
from sphinx.writers.html import HTMLWriter, HTMLTranslator, \
SmartyPantsHTMLTranslator
#: the filename for the inventory of objects
INVENTORY_FILENAME = 'objects.inv'
#: the filename for the "last build" file (for serializing builders)
LAST_BUILD_FILENAME = 'last_build'
def get_stable_hash(obj):
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
in unpredictable order due to hash randomization in newer Pythons.
"""
if isinstance(obj, dict):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
return md5(unicode(obj).encode('utf8')).hexdigest()
class StandaloneHTMLBuilder(Builder):
"""
Builds standalone HTML docs.
"""
name = 'html'
format = 'html'
copysource = True
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format = js_index
indexer_dumps_unicode = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
searchindex_filename = 'searchindex.js'
add_permalinks = True
embedded = False # for things like HTML help or Qt help: suppresses sidebar
# This is a class attribute because it is mutated by Sphinx.add_javascript.
script_files = ['_static/jquery.js', '_static/underscore.js',
'_static/doctools.js']
# Dito for this one.
css_files = []
default_sidebars = ['localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
# cached publisher object for snippets
_publisher = None
def init(self):
# a hash of all config values that, if changed, cause a full rebuild
self.config_hash = ''
self.tags_hash = ''
# section numbers for headings in the currently visited document
self.secnumbers = {}
# currently written docname
self.current_docname = None
self.init_templates()
self.init_highlighter()
self.init_translator_class()
if self.config.html_file_suffix is not None:
self.out_suffix = self.config.html_file_suffix
if self.config.html_link_suffix is not None:
self.link_suffix = self.config.html_link_suffix
else:
self.link_suffix = self.out_suffix
if self.config.language is not None:
if self._get_translations_js():
self.script_files.append('_static/translations.js')
def _get_translations_js(self):
candidates = [path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
self.config.language, 'sphinx.js')] + \
[path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs]
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def get_theme_config(self):
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
Theme.init_themes(self.confdir, self.config.html_theme_path,
warn=self.warn)
themename, themeoptions = self.get_theme_config()
self.theme = Theme(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self):
# determine Pygments style and create the highlighter
if self.config.pygments_style is not None:
style = self.config.pygments_style
elif self.theme:
style = self.theme.get_confstr('theme', 'pygments_style', 'none')
else:
style = 'sphinx'
self.highlighter = PygmentsBridge('html', style,
self.config.trim_doctest_flags)
def init_translator_class(self):
if self.config.html_translator_class:
self.translator_class = self.app.import_object(
self.config.html_translator_class,
'html_translator_class setting')
elif self.config.html_use_smartypants:
self.translator_class = SmartyPantsHTMLTranslator
else:
self.translator_class = HTMLTranslator
def get_outdated_docs(self):
cfgdict = dict((name, self.config[name])
for (name, desc) in self.config.values.iteritems()
if desc[1] == 'html')
self.config_hash = get_stable_hash(cfgdict)
self.tags_hash = get_stable_hash(sorted(self.tags))
old_config_hash = old_tags_hash = ''
try:
fp = open(path.join(self.outdir, '.buildinfo'))
try:
version = fp.readline()
if version.rstrip() != '# Sphinx build info version 1':
raise ValueError
fp.readline() # skip commentary
cfg, old_config_hash = fp.readline().strip().split(': ')
if cfg != 'config':
raise ValueError
tag, old_tags_hash = fp.readline().strip().split(': ')
if tag != 'tags':
raise ValueError
finally:
fp.close()
except ValueError:
self.warn('unsupported build info format in %r, building all' %
path.join(self.outdir, '.buildinfo'))
except Exception:
pass
if old_config_hash != self.config_hash or \
old_tags_hash != self.tags_hash:
for docname in self.env.found_docs:
yield docname
return
if self.templates:
template_mtime = self.templates.newest_template_mtime()
else:
template_mtime = 0
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
targetname = self.get_outfilename(docname)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = max(path.getmtime(self.env.doc2path(docname)),
template_mtime)
if srcmtime > targetmtime:
yield docname
except EnvironmentError:
# source doesn't exist anymore
pass
def render_partial(self, node):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
doc = new_document(b('<partial node>'))
doc.append(node)
if self._publisher is None:
self._publisher = Publisher(
source_class = DocTreeInput,
destination_class=StringOutput)
self._publisher.set_components('standalone',
'restructuredtext', 'pseudoxml')
pub = self._publisher
pub.reader = DoctreeReader()
pub.writer = HTMLWriter(self)
pub.process_programmatic_settings(
None, {'output_encoding': 'unicode'}, None)
pub.set_source(doc, None)
pub.set_destination(None, None)
pub.publish()
return pub.writer.parts
def prepare_writing(self, docnames):
# create the search indexer
from sphinx.search import IndexBuilder, languages
lang = self.config.html_search_language or self.config.language
if not lang or lang not in languages:
lang = 'en'
self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames)
self.docwriter = HTMLWriter(self)
self.docsettings = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
read_config_files=True).get_default_values()
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
# determine the additional indices to include
self.domain_indices = []
# html_domain_indices can be False/True or a list of index names
indices_config = self.config.html_domain_indices
if indices_config:
for domain in self.env.domains.itervalues():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
# deprecated config value
if indexname == 'py-modindex' and \
not self.config.html_use_modindex:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse))
# format the "last updated on" string, only once is enough since it
# typically doesn't include the time of day
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = ustrftime(lufmt or _('%b %d, %Y'))
else:
self.last_updated = None
logo = self.config.html_logo and \
path.basename(self.config.html_logo) or ''
favicon = self.config.html_favicon and \
path.basename(self.config.html_favicon) or ''
if favicon and os.path.splitext(favicon)[1] != '.ico':
self.warn('html_favicon is not an .ico file')
if not isinstance(self.config.html_use_opensearch, basestring):
self.warn('html_use_opensearch config value must now be a string')
self.relations = self.env.collect_relations()
rellinks = []
if self.get_builder_config('use_index', 'html'):
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
# if it has a short name
if indexcls.shortname:
rellinks.append((indexname, indexcls.localname,
'', indexcls.shortname))
if self.config.html_style is not None:
stylename = self.config.html_style
elif self.theme:
stylename = self.theme.get_confstr('theme', 'stylesheet')
else:
stylename = 'default.css'
self.globalcontext = dict(
embedded = self.embedded,
project = self.config.project,
release = self.config.release,
version = self.config.version,
last_updated = self.last_updated,
copyright = self.config.copyright,
master_doc = self.config.master_doc,
use_opensearch = self.config.html_use_opensearch,
docstitle = self.config.html_title,
shorttitle = self.config.html_short_title,
show_copyright = self.config.html_show_copyright,
show_sphinx = self.config.html_show_sphinx,
has_source = self.config.html_copy_source,
show_source = self.config.html_show_sourcelink,
file_suffix = self.out_suffix,
script_files = self.script_files,
css_files = self.css_files,
sphinx_version = __version__,
style = stylename,
rellinks = rellinks,
builder = self.name,
parents = [],
logo = logo,
favicon = favicon,
)
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
self.theme.get_options(self.theme_options).iteritems())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
parents = []
rellinks = self.globalcontext['rellinks'][:]
related = self.relations.get(docname)
titles = self.env.titles
if related and related[2]:
try:
next = {
'link': self.get_relative_uri(docname, related[2]),
'title': self.render_partial(titles[related[2]])['title']
}
rellinks.append((related[2], next['title'], 'N', _('next')))
except KeyError:
next = None
if related and related[1]:
try:
prev = {
'link': self.get_relative_uri(docname, related[1]),
'title': self.render_partial(titles[related[1]])['title']
}
rellinks.append((related[1], prev['title'], 'P', _('previous')))
except KeyError:
# the relation is (somehow) not in the TOC tree, handle
# that gracefully
prev = None
while related and related[0]:
try:
parents.append(
{'link': self.get_relative_uri(docname, related[0]),
'title': self.render_partial(titles[related[0]])['title']})
except KeyError:
pass
related = self.relations.get(related[0])
if parents:
parents.pop() # remove link to the master file; we have a generic
# "back to index" link already
parents.reverse()
# title rendered as HTML
title = self.env.longtitles.get(docname)
title = title and self.render_partial(title)['title'] or ''
# the name for the copied source
sourcename = self.config.html_copy_source and docname + '.txt' or ''
# metadata for the document
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
self_toc = self.env.get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return dict(
parents = parents,
prev = prev,
next = next,
title = title,
meta = meta,
body = body,
metatags = metatags,
rellinks = rellinks,
sourcename = sourcename,
toc = toc,
# only display a TOC if there's more than one item to show
display_toc = (self.env.toc_num_entries[docname] > 1),
)
def write_doc(self, docname, doctree):
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
body = self.docwriter.parts['fragment']
metatags = self.docwriter.clean_meta
ctx = self.get_doc_context(docname, body, metatags)
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.post_process_images(doctree)
title = self.env.longtitles.get(docname)
title = title and self.render_partial(title)['title'] or ''
self.index_page(docname, doctree, title)
def finish(self):
self.info(bold('writing additional files...'), nonl=1)
# pages from extensions
for pagelist in self.app.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
# the global general index
if self.get_builder_config('use_index', 'html'):
self.write_genindex()
# the global domain-specific indices
self.write_domain_indices()
# the search page
if self.name != 'htmlhelp':
self.info(' search', nonl=1)
self.handle_page('search', {}, 'search.html')
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
self.info(' '+pagename, nonl=1)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch and self.name != 'htmlhelp':
self.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
self.info()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
# dump the search index
self.handle_finish()
def write_genindex(self):
# the total count of lines for each index letter, used to distribute
# the entries into two columns
genindex = self.env.create_index(self)
indexcounts = []
for _, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems) in entries))
genindexcontext = dict(
genindexentries = genindex,
genindexcounts = indexcounts,
split_index = self.config.html_split_index,
)
self.info(' genindex', nonl=1)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
'genindex-split.html')
self.handle_page('genindex-all', genindexcontext,
'genindex.html')
for (key, entries), count in zip(genindex, indexcounts):
ctx = {'key': key, 'entries': entries, 'count': count,
'genindexentries': genindex}
self.handle_page('genindex-' + key, ctx,
'genindex-single.html')
else:
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self):
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = dict(
indextitle = indexcls.localname,
content = content,
collapse_index = collapse,
)
self.info(' ' + indexname, nonl=1)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self):
# copy image files
if self.images:
ensuredir(path.join(self.outdir, '_images'))
for src in self.status_iterator(self.images, 'copying images... ',
brown, len(self.images)):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, '_images', dest))
except Exception, err:
self.warn('cannot copy image file %r: %s' %
(path.join(self.srcdir, src), err))
def copy_download_files(self):
# copy downloadable files
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
for src in self.status_iterator(self.env.dlfiles,
'copying downloadable files... ',
brown, len(self.env.dlfiles)):
dest = self.env.dlfiles[src][1]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, '_downloads', dest))
except Exception, err:
self.warn('cannot copy downloadable file %r: %s' %
(path.join(self.srcdir, src), err))
def copy_static_files(self):
# copy static files
self.info(bold('copying static files... '), nonl=True)
ensuredir(path.join(self.outdir, '_static'))
# first, create pygments style file
f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w')
f.write(self.highlighter.get_stylesheet())
f.close()
# then, copy translations JavaScript file
if self.config.language is not None:
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static',
'translations.js'))
# add context items for search function used in searchtools.js_t
ctx = self.globalcontext.copy()
ctx.update(self.indexer.context_for_searchtool())
# then, copy over theme-supplied static files
if self.theme:
themeentries = [path.join(themepath, 'static')
for themepath in self.theme.get_dirchain()[::-1]]
for entry in themeentries:
copy_static_entry(entry, path.join(self.outdir, '_static'),
self, ctx)
# then, copy over all user-supplied static files
staticentries = [path.join(self.confdir, spath)
for spath in self.config.html_static_path]
matchers = compile_matchers(
self.config.exclude_patterns +
['**/' + d for d in self.config.exclude_dirnames]
)
for entry in staticentries:
if not path.exists(entry):
self.warn('html_static_path entry %r does not exist' % entry)
continue
copy_static_entry(entry, path.join(self.outdir, '_static'), self,
ctx, exclude_matchers=matchers)
# copy logo and favicon files if not already in static path
if self.config.html_logo:
logobase = path.basename(self.config.html_logo)
logotarget = path.join(self.outdir, '_static', logobase)
if not path.isfile(path.join(self.confdir, self.config.html_logo)):
self.warn('logo file %r does not exist' % self.config.html_logo)
elif not path.isfile(logotarget):
copyfile(path.join(self.confdir, self.config.html_logo),
logotarget)
if self.config.html_favicon:
iconbase = path.basename(self.config.html_favicon)
icontarget = path.join(self.outdir, '_static', iconbase)
if not path.isfile(path.join(self.confdir, self.config.html_favicon)):
self.warn('favicon file %r does not exist' % self.config.html_favicon)
elif not path.isfile(icontarget):
copyfile(path.join(self.confdir, self.config.html_favicon),
icontarget)
self.info('done')
def copy_extra_files(self):
# copy html_extra_path files
self.info(bold('copying extra files... '), nonl=True)
extraentries = [path.join(self.confdir, epath)
for epath in self.config.html_extra_path]
for entry in extraentries:
if not path.exists(entry):
self.warn('html_extra_path entry %r does not exist' % entry)
continue
copy_static_entry(entry, self.outdir, self)
self.info('done')
def write_buildinfo(self):
# write build info file
fp = open(path.join(self.outdir, '.buildinfo'), 'w')
try:
fp.write('# Sphinx build info version 1\n'
'# This file hashes the configuration used when building'
' these files. When it is not found, a full rebuild will'
' be done.\nconfig: %s\ntags: %s\n' %
(self.config_hash, self.tags_hash))
finally:
fp.close()
def cleanup(self):
# clean up theme stuff
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree):
"""Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
for node in doctree.traverse(nodes.image):
scale_keys = ('scale', 'width', 'height')
if not any((key in node) for key in scale_keys) or \
isinstance(node.parent, nodes.reference):
# docutils does unfortunately not preserve the
# ``target`` attribute on images, so we need to check
# the parent node here.
continue
uri = node['uri']
reference = nodes.reference('', '', internal=True)
if uri in self.images:
reference['refuri'] = posixpath.join(self.imgpath,
self.images[uri])
else:
reference['refuri'] = uri
node.replace_self(reference)
reference.append(node)
def load_indexer(self, docnames):
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
f = codecs.open(searchindexfn, 'r', encoding='utf-8')
else:
f = open(searchindexfn, 'rb')
try:
self.indexer.load(f, self.indexer_format)
finally:
f.close()
except (IOError, OSError, ValueError):
if keep:
self.warn('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
'incomplete.')
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
# only index pages with title
if self.indexer is not None and title:
self.indexer.feed(pagename, title, doctree)
def _get_local_toctree(self, docname, collapse=True, **kwds):
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(self.env.get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
def has_wildcard(pattern):
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
customsidebar = None
for pattern, patsidebars in self.config.html_sidebars.iteritems():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
self.warn('page %s matches two patterns in '
'html_sidebars: %r and %r' %
(pagename, matched, pattern))
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
matched = pattern
sidebars = patsidebars
if sidebars is None:
# keep defaults
pass
elif isinstance(sidebars, basestring):
# 0.x compatible mode: insert custom sidebar before searchbox
customsidebar = sidebars
sidebars = None
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
def pathto(otheruri, resource=False, baseuri=default_baseuri):
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
elif not resource:
otheruri = self.get_target_uri(otheruri)
uri = relative_uri(baseuri, otheruri) or '#'
return uri
ctx['pathto'] = pathto
ctx['hasdoc'] = lambda name: name in self.env.all_docs
if self.name != 'htmlhelp':
ctx['encoding'] = encoding = self.config.html_output_encoding
else:
ctx['encoding'] = encoding = self.encoding
ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
self.app.emit('html-page-context', pagename, templatename,
ctx, event_arg)
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
self.warn("a Unicode error occurred when rendering the page %s. "
"Please make sure all config values that contain "
"non-ASCII content are Unicode strings." % pagename)
return
if not outfilename:
outfilename = self.get_outfilename(pagename)
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
f = codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace')
try:
f.write(output)
finally:
f.close()
except (IOError, OSError), err:
self.warn("error writing file %s: %s" % (outfilename, err))
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
self.dump_search_index()
self.dump_inventory()
def dump_inventory(self):
self.info(bold('dumping object inventory... '), nonl=True)
f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb')
try:
f.write((u'# Sphinx inventory version 2\n'
u'# Project: %s\n'
u'# Version: %s\n'
u'# The remainder of this file is compressed using zlib.\n'
% (self.config.project, self.config.version)
).encode('utf-8'))
compressor = zlib.compressobj(9)
for domainname, domain in self.env.domains.iteritems():
for name, dispname, type, docname, anchor, prio in \
domain.get_objects():
if anchor.endswith(name):
# this can shorten the inventory by as much as 25%
anchor = anchor[:-len(name)] + '$'
uri = self.get_target_uri(docname) + '#' + anchor
if dispname == name:
dispname = u'-'
f.write(compressor.compress(
(u'%s %s:%s %s %s %s\n' % (name, domainname, type,
prio, uri, dispname)
).encode('utf-8')))
f.write(compressor.flush())
finally:
f.close()
self.info('done')
def dump_search_index(self):
self.info(bold('dumping search index... '), nonl=True)
self.indexer.prune(self.env.all_docs)
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
else:
f = open(searchindexfn + '.tmp', 'wb')
try:
self.indexer.dump(f, self.indexer_format)
finally:
f.close()
movefile(searchindexfn + '.tmp', searchindexfn)
self.info('done')
class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder that creates all HTML pages as "index.html" in
a directory given by their pagename, so that generated URLs don't have
``.html`` in them.
"""
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
return docname[:-5] # up to sep
return docname + SEP
def get_outfilename(self, pagename):
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename)
+ self.out_suffix)
else:
outfilename = path.join(self.outdir, os_path(pagename),
'index' + self.out_suffix)
return outfilename
def prepare_writing(self, docnames):
StandaloneHTMLBuilder.prepare_writing(self, docnames)
self.globalcontext['no_search_suffix'] = True
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder subclass that puts the whole document tree on one
HTML page.
"""
name = 'singlehtml'
copysource = False
def get_outdated_docs(self):
return 'all documents'
def get_target_uri(self, docname, typ=None):
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree):
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex+1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def assemble_doctree(self):
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen)
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def get_doc_context(self, docname, body, metatags):
# no relation links...
toc = self.env.get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
if toc:
self.fix_refuris(toc)
toc = self.render_partial(toc)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return dict(
parents = [],
prev = None,
next = None,
docstitle = None,
title = self.config.html_title,
meta = None,
body = body,
metatags = metatags,
rellinks = [],
sourcename = '',
toc = toc,
display_toc = display_toc,
)
def write(self, *ignored):
docnames = self.env.all_docs
self.info(bold('preparing documents... '), nonl=True)
self.prepare_writing(docnames)
self.info('done')
self.info(bold('assembling single document... '), nonl=True)
doctree = self.assemble_doctree()
self.info()
self.info(bold('writing... '), nonl=True)
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
self.info('done')
def finish(self):
# no indices or search pages are supported
self.info(bold('writing additional files...'), nonl=1)
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
self.info(' '+pagename, nonl=1)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
self.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
self.info()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
class SerializingHTMLBuilder(StandaloneHTMLBuilder):
"""
An abstract builder that serializes the generated HTML.
"""
#: the serializing implementation to use. Set this to a module that
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
implementation = None
implementation_dumps_unicode = False
#: additional arguments for dump()
additional_dump_args = ()
#: the filename for the global context file
globalcontext_filename = None
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
def init(self):
self.config_hash = ''
self.tags_hash = ''
self.theme = None # no theme necessary
self.templates = None # no template bridge necessary
self.init_translator_class()
self.init_highlighter()
def get_target_uri(self, docname, typ=None):
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
return docname[:-5] # up to sep
return docname + SEP
def dump_context(self, context, filename):
if self.implementation_dumps_unicode:
f = codecs.open(filename, 'w', encoding='utf-8')
else:
f = open(filename, 'wb')
try:
self.implementation.dump(context, f, *self.additional_dump_args)
finally:
f.close()
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
if not outfilename:
outfilename = path.join(self.outdir,
os_path(pagename) + self.out_suffix)
self.app.emit('html-page-context', pagename, templatename,
ctx, event_arg)
ensuredir(path.dirname(outfilename))
self.dump_context(ctx, outfilename)
# if there is a source file, copy the source file for the
# "show source" link
if ctx.get('sourcename'):
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
StandaloneHTMLBuilder.handle_finish(self)
# copy the environment file from the doctree dir to the output dir
# as needed by the web app
copyfile(path.join(self.doctreedir, ENV_PICKLE_FILENAME),
path.join(self.outdir, ENV_PICKLE_FILENAME))
# touch 'last build' file, used by the web application to determine
# when to reload its environment and clear the cache
open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close()
class PickleHTMLBuilder(SerializingHTMLBuilder):
"""
A Builder that dumps the generated HTML into pickle files.
"""
implementation = pickle
implementation_dumps_unicode = False
additional_dump_args = (pickle.HIGHEST_PROTOCOL,)
indexer_format = pickle
indexer_dumps_unicode = False
name = 'pickle'
out_suffix = '.fpickle'
globalcontext_filename = 'globalcontext.pickle'
searchindex_filename = 'searchindex.pickle'
# compatibility alias
WebHTMLBuilder = PickleHTMLBuilder
class JSONHTMLBuilder(SerializingHTMLBuilder):
"""
A builder that dumps the generated HTML into JSON files.
"""
implementation = jsonimpl
implementation_dumps_unicode = True
indexer_format = jsonimpl
indexer_dumps_unicode = True
name = 'json'
out_suffix = '.fjson'
globalcontext_filename = 'globalcontext.json'
searchindex_filename = 'searchindex.json'
def init(self):
if jsonimpl.json is None:
raise SphinxError(
'The module simplejson (or json in Python >= 2.6) '
'is not available. The JSONHTMLBuilder builder will not work.')
SerializingHTMLBuilder.init(self)
| 39.622744
| 86
| 0.580976
|
6fb110d24c968e30dd0108e4b896483f301cef61
| 273
|
py
|
Python
|
src/genres/models.py
|
kostinbrodorg/open-library
|
bbceb953b2d78d7eb0f2c64b81c6deac13d73531
|
[
"MIT"
] | null | null | null |
src/genres/models.py
|
kostinbrodorg/open-library
|
bbceb953b2d78d7eb0f2c64b81c6deac13d73531
|
[
"MIT"
] | null | null | null |
src/genres/models.py
|
kostinbrodorg/open-library
|
bbceb953b2d78d7eb0f2c64b81c6deac13d73531
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext as _
class Genres(models.Model):
name = models.CharField(verbose_name=_('Name'), max_length=255)
class Meta:
verbose_name = _('Genre')
verbose_name_plural = _('Genres')
| 21
| 67
| 0.699634
|
beef5008f5219ff8f5cf4cfc934318a4ad5eada6
| 118
|
py
|
Python
|
src/abstract_http_client/exceptions.py
|
QualiSystemsLab/abstract-requests-client
|
eb8281082cdfbd54b78451e907b9e7eef21b718a
|
[
"MIT"
] | 1
|
2021-12-08T20:54:40.000Z
|
2021-12-08T20:54:40.000Z
|
src/abstract_http_client/exceptions.py
|
QualiSystemsLab/abstract-requests-client
|
eb8281082cdfbd54b78451e907b9e7eef21b718a
|
[
"MIT"
] | null | null | null |
src/abstract_http_client/exceptions.py
|
QualiSystemsLab/abstract-requests-client
|
eb8281082cdfbd54b78451e907b9e7eef21b718a
|
[
"MIT"
] | null | null | null |
class RestClientException(Exception):
pass
class RestClientUnauthorizedException(RestClientException):
pass
| 16.857143
| 59
| 0.813559
|
8904413206eff09ceb53107fe086e38d6ee069d7
| 740
|
py
|
Python
|
newton_interpolasyonu.py
|
berfinsari/numerical-methods
|
72ce37da1df44d5ed4b7bf32263e38db55b3a532
|
[
"Unlicense"
] | null | null | null |
newton_interpolasyonu.py
|
berfinsari/numerical-methods
|
72ce37da1df44d5ed4b7bf32263e38db55b3a532
|
[
"Unlicense"
] | null | null | null |
newton_interpolasyonu.py
|
berfinsari/numerical-methods
|
72ce37da1df44d5ed4b7bf32263e38db55b3a532
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
dosya=open("newton.txt")
d = []
for line in dosya.readlines():
line = line.rstrip('\n').split(' ')
d.append(line)
dosya.close()
x = float(input("hangi değerin hesaplanmasını istiyorsunuz: "))
boyut = len(line)
boyut2= len(d)
for m in range(boyut2):
for i in range(boyut):
d[m][i] = float(d[m][i])
def b_f(xi, yi):
return ((d[1][yi] - d[1][xi]) / (d[0][yi] - d[0][xi]))
a = b_f(0, 1)
b = (x-d[0][1]) * (b_f(2, 1) - b_f(1, 0)) / (d[0][2] - d[0][0])
c = (x-d[0][1]) * (x-d[0][2]) * ((((b_f(3, 2) - b_f(2, 1)) / (d[0][3]-d[0][1])) - ((b_f(2, 0)-b_f(1, 0)) / (d[0][2]-d[0][0]))) / (d[0][3] - d[0][0]))
genelsonuc = d[1][0] + (x-d[0][0]) * (a + b + c)
print(genelsonuc)
| 23.870968
| 149
| 0.489189
|
eea5674ded8feb83b8abbde72f72fcbffd11e0ed
| 10,175
|
py
|
Python
|
aioquant/utils/web.py
|
liiklin/aioquant
|
254238a031b6a1312ae6a7890fa17b3c01f89741
|
[
"MIT"
] | 1
|
2022-01-06T01:54:35.000Z
|
2022-01-06T01:54:35.000Z
|
aioquant/utils/web.py
|
PopMa/aioquant
|
8e1f6a802dc1487348b5f540a1c44220d70715ba
|
[
"MIT"
] | null | null | null |
aioquant/utils/web.py
|
PopMa/aioquant
|
8e1f6a802dc1487348b5f540a1c44220d70715ba
|
[
"MIT"
] | 2
|
2020-11-29T17:13:09.000Z
|
2022-01-07T01:34:02.000Z
|
# -*- coding:utf-8 -*-
"""
Web module.
Author: HuangTao
Date: 2018/08/26
Email: huangtao@ifclover.com
"""
import json
import aiohttp
from urllib.parse import urlparse
from aioquant.utils import logger
from aioquant.configure import config
from aioquant.tasks import LoopRunTask, SingleTask
from aioquant.utils.decorator import async_method_locker
__all__ = ("Websocket", "AsyncHttpRequests", )
class Websocket:
"""Websocket connection.
Attributes:
url: Websocket connection url.
connected_callback: Asynchronous callback function will be called after connected to Websocket server successfully.
process_callback: Asynchronous callback function will be called if any stream data receive from Websocket
connection, this function only callback `text/json` message. e.g.
async def process_callback(json_message): pass
process_binary_callback: Asynchronous callback function will be called if any stream data receive from Websocket
connection, this function only callback `binary` message. e.g.
async def process_binary_callback(binary_message): pass
check_conn_interval: Check Websocket connection interval time(seconds), default is 10s.
"""
def __init__(self, url, connected_callback=None, process_callback=None, process_binary_callback=None,
check_conn_interval=10):
"""Initialize."""
self._url = url
self._connected_callback = connected_callback
self._process_callback = process_callback
self._process_binary_callback = process_binary_callback
self._check_conn_interval = check_conn_interval
self._ws = None # Websocket connection object.
LoopRunTask.register(self._check_connection, self._check_conn_interval)
SingleTask.run(self._connect)
@property
def ws(self):
return self._ws
async def close(self):
await self._ws.close()
async def ping(self, message: bytes = b"") -> None:
await self._ws.ping(message)
async def pong(self, message: bytes = b"") -> None:
await self._ws.pong(message)
async def _connect(self) -> None:
logger.info("url:", self._url, caller=self)
proxy = config.proxy
session = aiohttp.ClientSession()
try:
self._ws = await session.ws_connect(self._url, proxy=proxy)
except aiohttp.ClientConnectorError:
logger.error("connect to Websocket server error! url:", self._url, caller=self)
return
if self._connected_callback:
SingleTask.run(self._connected_callback)
SingleTask.run(self._receive)
@async_method_locker("Websocket.reconnect.locker", False, 30)
async def reconnect(self) -> None:
"""Re-connect to Websocket server."""
logger.warn("reconnecting to Websocket server right now!", caller=self)
await self.close()
await self._connect()
async def _receive(self):
"""Receive stream message from Websocket connection."""
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if self._process_callback:
try:
data = json.loads(msg.data)
except:
data = msg.data
SingleTask.run(self._process_callback, data)
elif msg.type == aiohttp.WSMsgType.BINARY:
if self._process_binary_callback:
SingleTask.run(self._process_binary_callback, msg.data)
elif msg.type == aiohttp.WSMsgType.CLOSED:
logger.warn("receive event CLOSED:", msg, caller=self)
SingleTask.run(self.reconnect)
elif msg.type == aiohttp.WSMsgType.ERROR:
logger.error("receive event ERROR:", msg, caller=self)
else:
logger.warn("unhandled msg:", msg, caller=self)
async def _check_connection(self, *args, **kwargs) -> None:
"""Check Websocket connection, if connection closed, re-connect immediately."""
if not self.ws:
logger.warn("Websocket connection not connected yet!", caller=self)
return
if self.ws.closed:
SingleTask.run(self.reconnect)
async def send(self, data) -> bool:
""" Send message to Websocket server.
Args:
data: Message content, must be dict or string.
Returns:
If send successfully, return True, otherwise return False.
"""
if not self.ws:
logger.warn("Websocket connection not connected yet!", caller=self)
return False
if isinstance(data, dict):
await self.ws.send_json(data)
elif isinstance(data, str):
await self.ws.send_str(data)
else:
logger.error("send message failed:", data, caller=self)
return False
logger.debug("send message:", data, caller=self)
return True
class AsyncHttpRequests(object):
""" Asynchronous HTTP Request Client.
"""
# Every domain name holds a connection session, for less system resource utilization and faster request speed.
_SESSIONS = {} # {"domain-name": session, ... }
@classmethod
async def fetch(cls, method, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" Create a HTTP request.
Args:
method: HTTP request method. `GET` / `POST` / `PUT` / `DELETE`
url: Request url.
params: HTTP query params.
body: HTTP request body, string or bytes format.
data: HTTP request body, dict format.
headers: HTTP request header.
timeout: HTTP request timeout(seconds), default is 30s.
kwargs:
proxy: HTTP proxy.
Return:
code: HTTP response code.
success: HTTP response data. If something wrong, this field is None.
error: If something wrong, this field will holding a Error information, otherwise it's None.
Raises:
HTTP request exceptions or response data parse exceptions. All the exceptions will be captured and return
Error information.
"""
session = cls._get_session(url)
if not kwargs.get("proxy"):
kwargs["proxy"] = config.proxy # If there is a `HTTP PROXY` Configuration in config file?
try:
if method == "GET":
response = await session.get(url, params=params, headers=headers, timeout=timeout, **kwargs)
elif method == "POST":
response = await session.post(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "PUT":
response = await session.put(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "DELETE":
response = await session.delete(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
else:
error = "http method error!"
return None, None, error
except Exception as e:
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "Error:", e, caller=cls)
return None, None, e
code = response.status
if code not in (200, 201, 202, 203, 204, 205, 206):
text = await response.text()
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", text, caller=cls)
return code, None, text
try:
result = await response.json()
except:
result = await response.text()
logger.debug("response data is not json format!", "method:", method, "url:", url, "headers:", headers,
"params:", params, "body:", body, "data:", data, "code:", code, "result:", result, caller=cls)
logger.debug("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", json.dumps(result), caller=cls)
return code, result, None
@classmethod
async def get(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP GET
"""
result = await cls.fetch("GET", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def post(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP POST
"""
result = await cls.fetch("POST", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def delete(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP DELETE
"""
result = await cls.fetch("DELETE", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def put(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP PUT
"""
result = await cls.fetch("PUT", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
def _get_session(cls, url):
""" Get the connection session for url's domain, if no session, create a new.
Args:
url: HTTP request url.
Returns:
session: HTTP request session.
"""
parsed_url = urlparse(url)
key = parsed_url.netloc or parsed_url.hostname
if key not in cls._SESSIONS:
session = aiohttp.ClientSession()
cls._SESSIONS[key] = session
return cls._SESSIONS[key]
| 40.376984
| 123
| 0.599902
|
6ee1d92dd0786d02cbbdf576d2c5710d733a04a6
| 29,513
|
py
|
Python
|
rllib/evaluation/sampler.py
|
maximsmol/ray
|
9f3e9e7e9fd19fffe5ff647965ace2a0b022f899
|
[
"Apache-2.0"
] | null | null | null |
rllib/evaluation/sampler.py
|
maximsmol/ray
|
9f3e9e7e9fd19fffe5ff647965ace2a0b022f899
|
[
"Apache-2.0"
] | null | null | null |
rllib/evaluation/sampler.py
|
maximsmol/ray
|
9f3e9e7e9fd19fffe5ff647965ace2a0b022f899
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict, namedtuple
import logging
import numpy as np
import queue
import threading
import time
from ray.util.debug import log_once
from ray.rllib.evaluation.episode import MultiAgentEpisode, _flatten_action
from ray.rllib.evaluation.rollout_metrics import RolloutMetrics
from ray.rllib.evaluation.sample_batch_builder import \
MultiAgentSampleBatchBuilder
from ray.rllib.policy.policy import clip_action
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.env.base_env import BaseEnv, ASYNC_RESET_RETURN
from ray.rllib.env.atari_wrappers import get_wrapper_by_cls, MonitorEnv
from ray.rllib.offline import InputReader
from ray.rllib.utils.annotations import override
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.tuple_actions import TupleActions
from ray.rllib.utils.tf_run_builder import TFRunBuilder
logger = logging.getLogger(__name__)
PolicyEvalData = namedtuple("PolicyEvalData", [
"env_id", "agent_id", "obs", "info", "rnn_state", "prev_action",
"prev_reward"
])
class PerfStats:
"""Sampler perf stats that will be included in rollout metrics."""
def __init__(self):
self.iters = 0
self.env_wait_time = 0.0
self.processing_time = 0.0
self.inference_time = 0.0
def get(self):
return {
"mean_env_wait_ms": self.env_wait_time * 1000 / self.iters,
"mean_processing_ms": self.processing_time * 1000 / self.iters,
"mean_inference_ms": self.inference_time * 1000 / self.iters
}
class SamplerInput(InputReader):
"""Reads input experiences from an existing sampler."""
@override(InputReader)
def next(self):
batches = [self.get_data()]
batches.extend(self.get_extra_batches())
if len(batches) > 1:
return batches[0].concat_samples(batches)
else:
return batches[0]
class SyncSampler(SamplerInput):
def __init__(self,
worker,
env,
policies,
policy_mapping_fn,
preprocessors,
obs_filters,
clip_rewards,
rollout_fragment_length,
callbacks,
horizon=None,
pack=False,
tf_sess=None,
clip_actions=True,
soft_horizon=False,
no_done_at_end=False):
self.base_env = BaseEnv.to_base_env(env)
self.rollout_fragment_length = rollout_fragment_length
self.horizon = horizon
self.policies = policies
self.policy_mapping_fn = policy_mapping_fn
self.preprocessors = preprocessors
self.obs_filters = obs_filters
self.extra_batches = queue.Queue()
self.perf_stats = PerfStats()
self.rollout_provider = _env_runner(
worker, self.base_env, self.extra_batches.put, self.policies,
self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,
self.preprocessors, self.obs_filters, clip_rewards, clip_actions,
pack, callbacks, tf_sess, self.perf_stats, soft_horizon,
no_done_at_end)
self.metrics_queue = queue.Queue()
def get_data(self):
while True:
item = next(self.rollout_provider)
if isinstance(item, RolloutMetrics):
self.metrics_queue.put(item)
else:
return item
def get_metrics(self):
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait()._replace(
perf_stats=self.perf_stats.get()))
except queue.Empty:
break
return completed
def get_extra_batches(self):
extra = []
while True:
try:
extra.append(self.extra_batches.get_nowait())
except queue.Empty:
break
return extra
class AsyncSampler(threading.Thread, SamplerInput):
def __init__(self,
worker,
env,
policies,
policy_mapping_fn,
preprocessors,
obs_filters,
clip_rewards,
rollout_fragment_length,
callbacks,
horizon=None,
pack=False,
tf_sess=None,
clip_actions=True,
blackhole_outputs=False,
soft_horizon=False,
no_done_at_end=False):
for _, f in obs_filters.items():
assert getattr(f, "is_concurrent", False), \
"Observation Filter must support concurrent updates."
self.worker = worker
self.base_env = BaseEnv.to_base_env(env)
threading.Thread.__init__(self)
self.queue = queue.Queue(5)
self.extra_batches = queue.Queue()
self.metrics_queue = queue.Queue()
self.rollout_fragment_length = rollout_fragment_length
self.horizon = horizon
self.policies = policies
self.policy_mapping_fn = policy_mapping_fn
self.preprocessors = preprocessors
self.obs_filters = obs_filters
self.clip_rewards = clip_rewards
self.daemon = True
self.pack = pack
self.tf_sess = tf_sess
self.callbacks = callbacks
self.clip_actions = clip_actions
self.blackhole_outputs = blackhole_outputs
self.soft_horizon = soft_horizon
self.no_done_at_end = no_done_at_end
self.perf_stats = PerfStats()
self.shutdown = False
def run(self):
try:
self._run()
except BaseException as e:
self.queue.put(e)
raise e
def _run(self):
if self.blackhole_outputs:
queue_putter = (lambda x: None)
extra_batches_putter = (lambda x: None)
else:
queue_putter = self.queue.put
extra_batches_putter = (
lambda x: self.extra_batches.put(x, timeout=600.0))
rollout_provider = _env_runner(
self.worker, self.base_env, extra_batches_putter, self.policies,
self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,
self.preprocessors, self.obs_filters, self.clip_rewards,
self.clip_actions, self.pack, self.callbacks, self.tf_sess,
self.perf_stats, self.soft_horizon, self.no_done_at_end)
while not self.shutdown:
# The timeout variable exists because apparently, if one worker
# dies, the other workers won't die with it, unless the timeout is
# set to some large number. This is an empirical observation.
item = next(rollout_provider)
if isinstance(item, RolloutMetrics):
self.metrics_queue.put(item)
else:
queue_putter(item)
def get_data(self):
if not self.is_alive():
raise RuntimeError("Sampling thread has died")
rollout = self.queue.get(timeout=600.0)
# Propagate errors
if isinstance(rollout, BaseException):
raise rollout
return rollout
def get_metrics(self):
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait()._replace(
perf_stats=self.perf_stats.get()))
except queue.Empty:
break
return completed
def get_extra_batches(self):
extra = []
while True:
try:
extra.append(self.extra_batches.get_nowait())
except queue.Empty:
break
return extra
def _env_runner(worker, base_env, extra_batch_callback, policies,
policy_mapping_fn, rollout_fragment_length, horizon,
preprocessors, obs_filters, clip_rewards, clip_actions, pack,
callbacks, tf_sess, perf_stats, soft_horizon, no_done_at_end):
"""This implements the common experience collection logic.
Args:
worker (RolloutWorker): reference to the current rollout worker.
base_env (BaseEnv): env implementing BaseEnv.
extra_batch_callback (fn): function to send extra batch data to.
policies (dict): Map of policy ids to Policy instances.
policy_mapping_fn (func): Function that maps agent ids to policy ids.
This is called when an agent first enters the environment. The
agent is then "bound" to the returned policy for the episode.
rollout_fragment_length (int): Number of episode steps before
`SampleBatch` is yielded. Set to infinity to yield complete
episodes.
horizon (int): Horizon of the episode.
preprocessors (dict): Map of policy id to preprocessor for the
observations prior to filtering.
obs_filters (dict): Map of policy id to filter used to process
observations for the policy.
clip_rewards (bool): Whether to clip rewards before postprocessing.
pack (bool): Whether to pack multiple episodes into each batch. This
guarantees batches will be exactly `rollout_fragment_length` in
size.
clip_actions (bool): Whether to clip actions to the space range.
callbacks (DefaultCallbacks): User callbacks to run on episode events.
tf_sess (Session|None): Optional tensorflow session to use for batching
TF policy evaluations.
perf_stats (PerfStats): Record perf stats into this object.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the episode
and instead record done=False.
Yields:
rollout (SampleBatch): Object containing state, action, reward,
terminal condition, and other fields as dictated by `policy`.
"""
# Try to get Env's max_episode_steps prop. If it doesn't exist, catch
# error and continue.
max_episode_steps = None
try:
max_episode_steps = base_env.get_unwrapped()[0].spec.max_episode_steps
except Exception:
pass
# Trainer has a given `horizon` setting.
if horizon:
# `horizon` is larger than env's limit -> Error and explain how
# to increase Env's own episode limit.
if max_episode_steps and horizon > max_episode_steps:
raise ValueError(
"Your `horizon` setting ({}) is larger than the Env's own "
"timestep limit ({})! Try to increase the Env's limit via "
"setting its `spec.max_episode_steps` property.".format(
horizon, max_episode_steps))
# Otherwise, set Trainer's horizon to env's max-steps.
elif max_episode_steps:
horizon = max_episode_steps
logger.debug(
"No episode horizon specified, setting it to Env's limit ({}).".
format(max_episode_steps))
else:
horizon = float("inf")
logger.debug("No episode horizon specified, assuming inf.")
# Pool of batch builders, which can be shared across episodes to pack
# trajectory data.
batch_builder_pool = []
def get_batch_builder():
if batch_builder_pool:
return batch_builder_pool.pop()
else:
return MultiAgentSampleBatchBuilder(policies, clip_rewards,
callbacks)
def new_episode():
episode = MultiAgentEpisode(policies, policy_mapping_fn,
get_batch_builder, extra_batch_callback)
# Call each policy's Exploration.on_episode_start method.
for p in policies.values():
p.exploration.on_episode_start(
policy=p,
environment=base_env,
episode=episode,
tf_sess=getattr(p, "_sess", None))
callbacks.on_episode_start(
worker=worker,
base_env=base_env,
policies=policies,
episode=episode)
return episode
active_episodes = defaultdict(new_episode)
while True:
perf_stats.iters += 1
t0 = time.time()
# Get observations from all ready agents
unfiltered_obs, rewards, dones, infos, off_policy_actions = \
base_env.poll()
perf_stats.env_wait_time += time.time() - t0
if log_once("env_returns"):
logger.info("Raw obs from env: {}".format(
summarize(unfiltered_obs)))
logger.info("Info return from env: {}".format(summarize(infos)))
# Process observations and prepare for policy evaluation
t1 = time.time()
active_envs, to_eval, outputs = _process_observations(
worker, base_env, policies, batch_builder_pool, active_episodes,
unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon,
preprocessors, obs_filters, rollout_fragment_length, pack,
callbacks, soft_horizon, no_done_at_end)
perf_stats.processing_time += time.time() - t1
for o in outputs:
yield o
# Do batched policy eval
t2 = time.time()
eval_results = _do_policy_eval(tf_sess, to_eval, policies,
active_episodes)
perf_stats.inference_time += time.time() - t2
# Process results and update episode state
t3 = time.time()
actions_to_send = _process_policy_eval_results(
to_eval, eval_results, active_episodes, active_envs,
off_policy_actions, policies, clip_actions)
perf_stats.processing_time += time.time() - t3
# Return computed actions to ready envs. We also send to envs that have
# taken off-policy actions; those envs are free to ignore the action.
t4 = time.time()
base_env.send_actions(actions_to_send)
perf_stats.env_wait_time += time.time() - t4
def _process_observations(worker, base_env, policies, batch_builder_pool,
active_episodes, unfiltered_obs, rewards, dones,
infos, off_policy_actions, horizon, preprocessors,
obs_filters, rollout_fragment_length, pack,
callbacks, soft_horizon, no_done_at_end):
"""Record new data from the environment and prepare for policy evaluation.
Returns:
active_envs: set of non-terminated env ids
to_eval: map of policy_id to list of agent PolicyEvalData
outputs: list of metrics and samples to return from the sampler
"""
active_envs = set()
to_eval = defaultdict(list)
outputs = []
large_batch_threshold = max(1000, rollout_fragment_length * 10) if \
rollout_fragment_length != float("inf") else 5000
# For each environment
for env_id, agent_obs in unfiltered_obs.items():
new_episode = env_id not in active_episodes
episode = active_episodes[env_id]
if not new_episode:
episode.length += 1
episode.batch_builder.count += 1
episode._add_agent_rewards(rewards[env_id])
if (episode.batch_builder.total() > large_batch_threshold
and log_once("large_batch_warning")):
logger.warning(
"More than {} observations for {} env steps ".format(
episode.batch_builder.total(),
episode.batch_builder.count) + "are buffered in "
"the sampler. If this is more than you expected, check that "
"that you set a horizon on your environment correctly and that"
" it terminates at some point. "
"Note: In multi-agent environments, `rollout_fragment_length` "
"sets the batch size based on environment steps, not the "
"steps of "
"individual agents, which can result in unexpectedly large "
"batches. Also, you may be in evaluation waiting for your Env "
"to terminate (batch_mode=`complete_episodes`). Make sure it "
"does at some point.")
# Check episode termination conditions
if dones[env_id]["__all__"] or episode.length >= horizon:
hit_horizon = (episode.length >= horizon
and not dones[env_id]["__all__"])
all_done = True
atari_metrics = _fetch_atari_metrics(base_env)
if atari_metrics is not None:
for m in atari_metrics:
outputs.append(
m._replace(custom_metrics=episode.custom_metrics))
else:
outputs.append(
RolloutMetrics(episode.length, episode.total_reward,
dict(episode.agent_rewards),
episode.custom_metrics, {},
episode.hist_data))
else:
hit_horizon = False
all_done = False
active_envs.add(env_id)
# For each agent in the environment.
for agent_id, raw_obs in agent_obs.items():
policy_id = episode.policy_for(agent_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
if log_once("prep_obs"):
logger.info("Preprocessed obs: {}".format(summarize(prep_obs)))
filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)
if log_once("filtered_obs"):
logger.info("Filtered obs: {}".format(summarize(filtered_obs)))
agent_done = bool(all_done or dones[env_id].get(agent_id))
if not agent_done:
to_eval[policy_id].append(
PolicyEvalData(env_id, agent_id, filtered_obs,
infos[env_id].get(agent_id, {}),
episode.rnn_state_for(agent_id),
episode.last_action_for(agent_id),
rewards[env_id][agent_id] or 0.0))
last_observation = episode.last_observation_for(agent_id)
episode._set_last_observation(agent_id, filtered_obs)
episode._set_last_raw_obs(agent_id, raw_obs)
episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))
# Record transition info if applicable
if (last_observation is not None and infos[env_id].get(
agent_id, {}).get("training_enabled", True)):
episode.batch_builder.add_values(
agent_id,
policy_id,
t=episode.length - 1,
eps_id=episode.episode_id,
agent_index=episode._agent_index(agent_id),
obs=last_observation,
actions=episode.last_action_for(agent_id),
rewards=rewards[env_id][agent_id],
prev_actions=episode.prev_action_for(agent_id),
prev_rewards=episode.prev_reward_for(agent_id),
dones=(False if (no_done_at_end
or (hit_horizon and soft_horizon)) else
agent_done),
infos=infos[env_id].get(agent_id, {}),
new_obs=filtered_obs,
**episode.last_pi_info_for(agent_id))
# Invoke the step callback after the step is logged to the episode
callbacks.on_episode_step(
worker=worker, base_env=base_env, episode=episode)
# Cut the batch if we're not packing multiple episodes into one,
# or if we've exceeded the requested batch size.
if episode.batch_builder.has_pending_agent_data():
if dones[env_id]["__all__"] and not no_done_at_end:
episode.batch_builder.check_missing_dones()
if (all_done and not pack) or \
episode.batch_builder.count >= rollout_fragment_length:
outputs.append(episode.batch_builder.build_and_reset(episode))
elif all_done:
# Make sure postprocessor stays within one episode
episode.batch_builder.postprocess_batch_so_far(episode)
if all_done:
# Handle episode termination
batch_builder_pool.append(episode.batch_builder)
# Call each policy's Exploration.on_episode_end method.
for p in policies.values():
p.exploration.on_episode_end(
policy=p,
environment=base_env,
episode=episode,
tf_sess=getattr(p, "_sess", None))
# Call custom on_episode_end callback.
callbacks.on_episode_end(
worker=worker,
base_env=base_env,
policies=policies,
episode=episode)
if hit_horizon and soft_horizon:
episode.soft_reset()
resetted_obs = agent_obs
else:
del active_episodes[env_id]
resetted_obs = base_env.try_reset(env_id)
if resetted_obs is None:
# Reset not supported, drop this env from the ready list
if horizon != float("inf"):
raise ValueError(
"Setting episode horizon requires reset() support "
"from the environment.")
elif resetted_obs != ASYNC_RESET_RETURN:
# Creates a new episode if this is not async return
# If reset is async, we will get its result in some future poll
episode = active_episodes[env_id]
for agent_id, raw_obs in resetted_obs.items():
policy_id = episode.policy_for(agent_id)
policy = _get_or_raise(policies, policy_id)
prep_obs = _get_or_raise(preprocessors,
policy_id).transform(raw_obs)
filtered_obs = _get_or_raise(obs_filters,
policy_id)(prep_obs)
episode._set_last_observation(agent_id, filtered_obs)
to_eval[policy_id].append(
PolicyEvalData(
env_id, agent_id, filtered_obs,
episode.last_info_for(agent_id) or {},
episode.rnn_state_for(agent_id),
np.zeros_like(
_flatten_action(policy.action_space.sample())),
0.0))
return active_envs, to_eval, outputs
def _do_policy_eval(tf_sess, to_eval, policies, active_episodes):
"""Call compute actions on observation batches to get next actions.
Returns:
eval_results: dict of policy to compute_action() outputs.
"""
eval_results = {}
if tf_sess:
builder = TFRunBuilder(tf_sess, "policy_eval")
pending_fetches = {}
else:
builder = None
if log_once("compute_actions_input"):
logger.info("Inputs to compute_actions():\n\n{}\n".format(
summarize(to_eval)))
for policy_id, eval_data in to_eval.items():
rnn_in = [t.rnn_state for t in eval_data]
policy = _get_or_raise(policies, policy_id)
if builder and (policy.compute_actions.__code__ is
TFPolicy.compute_actions.__code__):
obs_batch = [t.obs for t in eval_data]
state_batches = _to_column_format(rnn_in)
# TODO(ekl): how can we make info batch available to TF code?
obs_batch = [t.obs for t in eval_data]
prev_action_batch = [t.prev_action for t in eval_data]
prev_reward_batch = [t.prev_reward for t in eval_data]
pending_fetches[policy_id] = policy._build_compute_actions(
builder,
obs_batch=obs_batch,
state_batches=state_batches,
prev_action_batch=prev_action_batch,
prev_reward_batch=prev_reward_batch,
timestep=policy.global_timestep)
else:
# TODO(sven): Does this work for LSTM torch?
rnn_in_cols = [
np.stack([row[i] for row in rnn_in])
for i in range(len(rnn_in[0]))
]
eval_results[policy_id] = policy.compute_actions(
[t.obs for t in eval_data],
state_batches=rnn_in_cols,
prev_action_batch=[t.prev_action for t in eval_data],
prev_reward_batch=[t.prev_reward for t in eval_data],
info_batch=[t.info for t in eval_data],
episodes=[active_episodes[t.env_id] for t in eval_data],
timestep=policy.global_timestep)
if builder:
for pid, v in pending_fetches.items():
eval_results[pid] = builder.get(v)
if log_once("compute_actions_result"):
logger.info("Outputs of compute_actions():\n\n{}\n".format(
summarize(eval_results)))
return eval_results
def _process_policy_eval_results(to_eval, eval_results, active_episodes,
active_envs, off_policy_actions, policies,
clip_actions):
"""Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies.
"""
actions_to_send = defaultdict(dict)
for env_id in active_envs:
actions_to_send[env_id] = {} # at minimum send empty dict
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
actions = eval_results[policy_id][0]
rnn_out_cols = eval_results[policy_id][1]
pi_info_cols = eval_results[policy_id][2]
if len(rnn_in_cols) != len(rnn_out_cols):
raise ValueError("Length of RNN in did not match RNN out, got: "
"{} vs {}".format(rnn_in_cols, rnn_out_cols))
# Add RNN state info
for f_i, column in enumerate(rnn_in_cols):
pi_info_cols["state_in_{}".format(f_i)] = column
for f_i, column in enumerate(rnn_out_cols):
pi_info_cols["state_out_{}".format(f_i)] = column
# Save output rows
actions = _unbatch_tuple_actions(actions)
policy = _get_or_raise(policies, policy_id)
for i, action in enumerate(actions):
env_id = eval_data[i].env_id
agent_id = eval_data[i].agent_id
if clip_actions:
actions_to_send[env_id][agent_id] = clip_action(
action, policy.action_space)
else:
actions_to_send[env_id][agent_id] = action
episode = active_episodes[env_id]
episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])
episode._set_last_pi_info(
agent_id, {k: v[i]
for k, v in pi_info_cols.items()})
if env_id in off_policy_actions and \
agent_id in off_policy_actions[env_id]:
episode._set_last_action(agent_id,
off_policy_actions[env_id][agent_id])
else:
episode._set_last_action(agent_id, action)
return actions_to_send
def _fetch_atari_metrics(base_env):
"""Atari games have multiple logical episodes, one per life.
However for metrics reporting we count full episodes all lives included.
"""
unwrapped = base_env.get_unwrapped()
if not unwrapped:
return None
atari_out = []
for u in unwrapped:
monitor = get_wrapper_by_cls(u, MonitorEnv)
if not monitor:
return None
for eps_rew, eps_len in monitor.next_episode_results():
atari_out.append(RolloutMetrics(eps_len, eps_rew))
return atari_out
def _unbatch_tuple_actions(action_batch):
# convert list of batches -> batch of lists
if isinstance(action_batch, TupleActions):
out = []
for j in range(len(action_batch.batches[0])):
out.append([
action_batch.batches[i][j]
for i in range(len(action_batch.batches))
])
return out
return action_batch
def _to_column_format(rnn_state_rows):
num_cols = len(rnn_state_rows[0])
return [[row[i] for row in rnn_state_rows] for i in range(num_cols)]
def _get_or_raise(mapping, policy_id):
"""Returns a Policy object under key `policy_id` in `mapping`.
Throws an error if `policy_id` cannot be found.
Returns:
Policy: The found Policy object.
"""
if policy_id not in mapping:
raise ValueError(
"Could not find policy for agent: agent policy id `{}` not "
"in policy map keys {}.".format(policy_id, mapping.keys()))
return mapping[policy_id]
| 40.707586
| 79
| 0.599295
|
eb163fdcbdfb434ed6e9a4981ca9b7448a4af170
| 29,429
|
py
|
Python
|
tests/components/device_tracker/test_owntracks.py
|
jamescurtin/home-assistant
|
6a9968ccb9b0082f5629e50955549d432aba7d90
|
[
"Apache-2.0"
] | 1
|
2017-09-26T06:13:10.000Z
|
2017-09-26T06:13:10.000Z
|
tests/components/device_tracker/test_owntracks.py
|
moose51789/home-assistant
|
63c9d59d5455850fd4b37c2475fe6f10effb5245
|
[
"Apache-2.0"
] | null | null | null |
tests/components/device_tracker/test_owntracks.py
|
moose51789/home-assistant
|
63c9d59d5455850fd4b37c2475fe6f10effb5245
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Owntracks device tracker."""
import asyncio
import json
import unittest
from unittest.mock import patch
from tests.common import (
assert_setup_component, fire_mqtt_message, mock_coro, mock_component,
get_test_home_assistant, mock_mqtt_component)
import homeassistant.components.device_tracker.owntracks as owntracks
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM, STATE_NOT_HOME
from homeassistant.util.async import run_coroutine_threadsafe
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINT_TOPIC = 'owntracks/{}/{}/waypoints'.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINT_TOPIC_BLOCKED = 'owntracks/{}/{}/waypoints'.format(
USER_BLACKLIST, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
REGION_TRACKER_STATE = 'device_tracker.beacon_{}'.format(IBEACON_DEVICE)
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
LOCATION_MESSAGE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 1.0,
't': 'u',
'alt': 27,
'acc': 60,
'p': 101.3977584838867,
'vac': 4,
'lat': 2.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_INACCURATE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 2000,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_ZERO_ACCURACY = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 0,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
REGION_ENTER_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_MESSAGE = {
'lon': 1.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_INACCURATE_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 2000,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_ENTITY_NAMES = [
'zone.greg_phone__exp_wayp1',
'zone.greg_phone__exp_wayp2',
'zone.ram_phone__exp_wayp1',
'zone.ram_phone__exp_wayp2',
]
REGION_ENTER_ZERO_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_ZERO_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
TEST_SECRET_KEY = 's3cretkey'
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and TEST_SECRET_KEY
'_type': 'encrypted',
'data': ('qm1A83I6TVFRmH5343xy+cbex8jBBxDFkHRuJhELVKVRA/DgXcyKtghw'
'9pOw75Lo4gHcyy2wV5CmkjrpKEBR7Qhye4AR0y7hOvlx6U/a3GuY1+W8'
'I4smrLkwMvGgBOzXSNdVTzbFTHDvG3gRRaNHFkt2+5MsbH2Dd6CXmpzq'
'DIfSN7QzwOevuvNIElii5MlFxI6ZnYIDYA/ZdnAXHEVsNIbyT2N0CXt3'
'fTPzgGtFzsufx40EEUkC06J7QTJl7lLG6qaLW1cCWp86Vp0eL3vtZ6xq')
}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': ('gANDCXMzY3JldGtleXEAQ6p7ImxvbiI6IDEuMCwgInQiOiAidSIsICJi'
'YXR0IjogOTIsICJhY2MiOiA2MCwgInZlbCI6IDAsICJfdHlwZSI6ICJs'
'b2NhdGlvbiIsICJ2YWMiOiA0LCAicCI6IDEwMS4zOTc3NTg0ODM4ODY3'
'LCAidHN0IjogMSwgImxhdCI6IDIuMCwgImFsdCI6IDI3LCAiY29nIjog'
'MjQ4LCAidGlkIjogInVzZXIifXEBhnECLg==')
}
class BaseMQTT(unittest.TestCase):
"""Base MQTT assert functions."""
hass = None
def send_message(self, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
fire_mqtt_message(self.hass, topic, mod_message)
self.hass.block_till_done()
def assert_location_state(self, location):
"""Test the assertion of a location state."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_location_latitude(self, latitude):
"""Test the assertion of a location latitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_location_longitude(self, longitude):
"""Test the assertion of a location longitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('longitude'), longitude)
def assert_location_accuracy(self, accuracy):
"""Test the assertion of a location accuracy."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
class TestDeviceTrackerOwnTracks(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
mock_component(self.hass, 'group')
mock_component(self.hass, 'zone')
patcher = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patcher.start()
self.addCleanup(patcher.stop)
orig_context = owntracks.OwnTracksContext
def store_context(*args):
self.context = orig_context(*args)
return self.context
with patch('homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([])), \
patch('homeassistant.components.device_tracker.'
'load_yaml_config_file', return_value=mock_coro({})), \
patch.object(owntracks, 'OwnTracksContext', store_context), \
assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}})
self.hass.states.set(
'zone.inner', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.inner_2', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.outer', 'zoning',
{
'name': 'zone',
'latitude': 2.0,
'longitude': 1.0,
'radius': 100000
})
# Clear state between teste
self.hass.states.set(DEVICE_TRACKER_STATE, None)
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
def assert_tracker_state(self, location):
"""Test the assertion of a tracker state."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_tracker_latitude(self, latitude):
"""Test the assertion of a tracker latitude."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_tracker_accuracy(self, accuracy):
"""Test the assertion of a tracker accuracy."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def test_location_invalid_devid(self): # pylint: disable=invalid-name
"""Test the update of a location."""
self.send_message('owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = self.hass.states.get('device_tracker.paulus_nexus5x')
assert state.state == 'outer'
def test_location_update(self):
"""Test the update of a location."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
def test_location_inaccurate_gps(self):
"""Test the location for inaccurate GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_location_zero_accuracy_gps(self):
"""Ignore the location for zero accuracy GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_event_entry_exit(self):
"""Test the entry event."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
# Exit switches back to GPS
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
def test_event_with_spaces(self):
"""Test the entry event."""
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner 2')
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
# Left clean zone state
self.assertFalse(self.context.regions_entered[USER])
def test_event_entry_exit_inaccurate(self):
"""Test the event for inaccurate exit."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_INACCURATE_MESSAGE)
# Exit doesn't use inaccurate gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(self.context.regions_entered[USER])
def test_event_entry_exit_zero_accuracy(self):
"""Test entry/exit events with accuracy zero."""
self.send_message(EVENT_TOPIC, REGION_ENTER_ZERO_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_ZERO_MESSAGE)
# Exit doesn't use zero gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(self.context.regions_entered[USER])
def test_event_exit_outside_zone_sets_away(self):
"""Test the event for exit zone."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Exit message far away GPS location
message = REGION_LEAVE_MESSAGE.copy()
message['lon'] = 90.1
message['lat'] = 90.1
self.send_message(EVENT_TOPIC, message)
# Exit forces zone change to away
self.assert_location_state(STATE_NOT_HOME)
def test_event_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner_2 - should be in 'inner'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('outer')
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
def test_event_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('outer')
def test_event_entry_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_exit_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_entry_zone_loading_dash(self):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "-inner"
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
def test_mobile_enter_move_beacon(self):
"""Test the movement of a beacon."""
# Enter mobile beacon, should set location
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Move should move beacon
message = LOCATION_MESSAGE.copy()
message['lat'] = "3.0"
self.send_message(LOCATION_TOPIC, message)
self.assert_tracker_latitude(3.0)
self.assert_tracker_state(STATE_NOT_HOME)
def test_mobile_enter_exit_region_beacon(self):
"""Test the enter and the exit of a region beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Enter location should move beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.1)
self.assert_tracker_state('inner_2')
# Exit location should switch to gps
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
def test_mobile_exit_move_beacon(self):
"""Test the exit move of a beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Exit mobile beacon, should set location
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
message['lat'] = "3.0"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(3.0)
# Move after exit should do nothing
message = LOCATION_MESSAGE.copy()
message['lat'] = "4.0"
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_tracker_latitude(3.0)
def test_mobile_multiple_async_enter_exit(self):
"""Test the multiple entering."""
# Test race condition
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
for _ in range(0, 20):
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(exit_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
self.hass.block_till_done()
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(self.context.mobile_beacons_active['greg_phone'], [])
def test_mobile_multiple_enter_exit(self):
"""Test the multiple entering."""
# Should only happen if the iphone dies
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(self.context.mobile_beacons_active['greg_phone'], [])
def test_waypoint_import_simple(self):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[1])
self.assertTrue(wayp is not None)
def test_waypoint_import_blacklist(self):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_no_whitelist(self):
"""Test import of list of waypoints with no whitelist set."""
@asyncio.coroutine
def mock_see(**kwargs):
"""Fake see method for owntracks."""
return
test_config = {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True
}
run_coroutine_threadsafe(owntracks.async_setup_scanner(
self.hass, test_config, mock_see), self.hass.loop).result()
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is not None)
def test_waypoint_import_bad_json(self):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_existing(self):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
new_wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp == new_wayp)
def mock_cipher():
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
(mkey, plaintext) = pickle.loads(ciphertext)
if key != mkey:
raise ValueError()
return plaintext
return (len(TEST_SECRET_KEY), mock_decrypt)
class TestDeviceTrackerOwnTrackConfigs(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
mock_component(self.hass, 'group')
mock_component(self.hass, 'zone')
patch_load = patch(
'homeassistant.components.device_tracker.async_load_config',
return_value=mock_coro([]))
patch_load.start()
self.addCleanup(patch_load.stop)
patch_save = patch('homeassistant.components.device_tracker.'
'DeviceTracker.async_update_config')
patch_save.start()
self.addCleanup(patch_save.stop)
def teardown_method(self, method):
"""Tear down resources."""
self.hass.stop()
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload(self):
"""Test encrypted payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_topic_key(self):
"""Test encrypted payload with a topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: TEST_SECRET_KEY,
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_key(self):
"""Test encrypted payload with no key, ."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
# key missing
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_key(self):
"""Test encrypted payload with wrong key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: 'wrong key',
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_topic_key(self):
"""Test encrypted payload with wrong topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_topic_key(self):
"""Test encrypted payload with no topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
assert self.hass.states.get(DEVICE_TRACKER_STATE) is None
try:
import libnacl
except (ImportError, OSError):
libnacl = None
@unittest.skipUnless(libnacl, "libnacl/libsodium is not installed")
def test_encrypted_payload_libsodium(self):
"""Test sending encrypted message payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: TEST_SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
| 35.118138
| 79
| 0.640253
|
41072594cc550039e7dd24bfc41f900046691dbb
| 2,793
|
py
|
Python
|
Studenti/hash_generator.py
|
romeorizzi/exams_in_remote
|
5d037ecbab1998fd2eeca00cbe999872ba9031ab
|
[
"MIT"
] | null | null | null |
Studenti/hash_generator.py
|
romeorizzi/exams_in_remote
|
5d037ecbab1998fd2eeca00cbe999872ba9031ab
|
[
"MIT"
] | null | null | null |
Studenti/hash_generator.py
|
romeorizzi/exams_in_remote
|
5d037ecbab1998fd2eeca00cbe999872ba9031ab
|
[
"MIT"
] | null | null | null |
import hashlib
import os
import xml.etree.cElementTree as ET
import time
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
BLOCKSIZE = 65536
def fn_hash(input_path):
hasher = hashlib.sha1()
with open(str(input_path), "rb") as file:
buf = file.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = file.read(BLOCKSIZE)
return str(hasher.hexdigest())
def settings():
try:
tree = ET.ElementTree(file="settings.xml")
root = tree.getroot()
input_path = None
output_path = None
matricola = None
for child in root:
if child.tag == "input-files-path":
input_path = child.text
elif child.tag == "output-files-path":
output_path = child.text
elif child.tag == "matricola":
matricola = child.text
if input_path is not None and output_path is not None:
return [input_path, output_path, matricola]
else:
return None
except FileNotFoundError:
print("File settings not found!")
except Exception as exc:
print("Problem parsing settings file! " + str(exc))
class MyEventHandler(FileSystemEventHandler):
def __init__(self, observer, created):
self.observer = observer
self.created = created
def on_created(self, event):
for key in self.created.keys():
if self.created[key] == "":
self.created[key] = fn_hash(key) + ";\n"
self.created[str(event.src_path)] = ""
print("Created: " + str(event.src_path))
def main():
settings_vect = settings()
if settings_vect is None:
print("Problem parsing settings file! ")
exit(1)
PATH = settings_vect[0]
OUTPUT_PATH = settings_vect[1]
matricola = settings_vect[2]
observer = Observer()
files = {}
event_handler = MyEventHandler(observer, files)
observer.schedule(event_handler, PATH, recursive=False)
observer.start()
try:
while True:
# Set the thread sleep time
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
hash_list = []
for file_name in files.keys():
head, tail = os.path.split(file_name)
if files[file_name] == "":
files[file_name] = fn_hash(file_name) + ";\n"
hash_list.append(str(tail) + ";" + files[file_name])
else:
hash_list.append(str(tail) + ";" + files[file_name])
with open(os.path.join(OUTPUT_PATH, matricola + "_output.csv"), "w") as output_file:
for line in hash_list:
output_file.write(line)
return
if __name__ == "__main__":
main()
| 29.712766
| 88
| 0.601862
|
3cd8c0c90d98e6fd217a3606ac0aaeccb1d6fb4c
| 751
|
py
|
Python
|
LanguageModel/anna/predict.py
|
STHSF/DeepNaturallanguageprocessing
|
1fe79a961abf3e55fb3ce2b0266b26f56ade2483
|
[
"Apache-2.0"
] | 15
|
2016-09-13T05:41:53.000Z
|
2019-12-30T13:01:33.000Z
|
LanguageModel/anna/predict.py
|
STHSF/DeepNaturallanguageprocessing
|
1fe79a961abf3e55fb3ce2b0266b26f56ade2483
|
[
"Apache-2.0"
] | 20
|
2020-01-28T21:42:25.000Z
|
2022-02-10T00:44:02.000Z
|
LanguageModel/anna/predict.py
|
STHSF/DeepNaturallanguageprocessing
|
1fe79a961abf3e55fb3ce2b0266b26f56ade2483
|
[
"Apache-2.0"
] | 10
|
2016-09-28T02:56:11.000Z
|
2022-03-12T16:41:20.000Z
|
#!/usr/bin/env python
# coding=utf-8
"""
@function:
@version: ??
@author: Li Yu
@license: Apache Licence
@file: predict.py
@time: 2017/7/3 下午2:26
"""
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
file_path = './data/anna.txt'
with open(file_path, 'r') as f:
text = f.read()
vocab = set(text)
print(vocab)
# # 字符数字映射
# vocab_to_int = {c: i for i, c in enumerate(vocab)}
#
# # print(vocab_to_int)
# # 数字字符映射
# int_to_vocab = dict(enumerate(vocab))
# # print(int_to_vocab)
#
# # 对文本进行编码
# encode = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
# print(text[:100])
# print('encode\n', encode[:100])
| 18.317073
| 68
| 0.693742
|
8acb53bc8cda258e6a4d9cc736746060af28e826
| 15,068
|
py
|
Python
|
tensorflow_asr/models/encoders/conformer.py
|
Thumb-Technologies/TensorFlowASR
|
37ea12af04a8f2c13f75a617f4aa4331f95ce945
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_asr/models/encoders/conformer.py
|
Thumb-Technologies/TensorFlowASR
|
37ea12af04a8f2c13f75a617f4aa4331f95ce945
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_asr/models/encoders/conformer.py
|
Thumb-Technologies/TensorFlowASR
|
37ea12af04a8f2c13f75a617f4aa4331f95ce945
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from ...utils import shape_util
from ..activations.glu import GLU
from ..layers.multihead_attention import MultiHeadAttention, RelPositionMultiHeadAttention
from ..layers.positional_encoding import PositionalEncoding, PositionalEncodingConcat
from ..layers.subsampling import Conv2dSubsampling, VggSubsampling
L2 = tf.keras.regularizers.l2(1e-6)
class FFModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
kernel_regularizer=L2,
bias_regularizer=L2,
name="ff_module",
**kwargs,
):
super(FFModule, self).__init__(name=name, **kwargs)
self.fc_factor = fc_factor
self.ln = tf.keras.layers.LayerNormalization(
name=f"{name}_ln",
gamma_regularizer=kernel_regularizer,
beta_regularizer=bias_regularizer,
)
self.ffn1 = tf.keras.layers.Dense(
4 * input_dim,
name=f"{name}_dense_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.swish = tf.keras.layers.Activation(tf.nn.swish, name=f"{name}_swish_activation")
self.do1 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_1")
self.ffn2 = tf.keras.layers.Dense(
input_dim,
name=f"{name}_dense_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.do2 = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout_2")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(
self,
inputs,
training=False,
**kwargs,
):
outputs = self.ln(inputs, training=training)
outputs = self.ffn1(outputs, training=training)
outputs = self.swish(outputs)
outputs = self.do1(outputs, training=training)
outputs = self.ffn2(outputs, training=training)
outputs = self.do2(outputs, training=training)
outputs = self.res_add([inputs, self.fc_factor * outputs])
return outputs
def get_config(self):
conf = super(FFModule, self).get_config()
conf.update({"fc_factor": self.fc_factor})
conf.update(self.ln.get_config())
conf.update(self.ffn1.get_config())
conf.update(self.swish.get_config())
conf.update(self.do1.get_config())
conf.update(self.ffn2.get_config())
conf.update(self.do2.get_config())
conf.update(self.res_add.get_config())
return conf
class MHSAModule(tf.keras.layers.Layer):
def __init__(
self,
head_size,
num_heads,
dropout=0.0,
mha_type="relmha",
kernel_regularizer=L2,
bias_regularizer=L2,
name="mhsa_module",
**kwargs,
):
super(MHSAModule, self).__init__(name=name, **kwargs)
self.ln = tf.keras.layers.LayerNormalization(
name=f"{name}_ln",
gamma_regularizer=kernel_regularizer,
beta_regularizer=bias_regularizer,
)
if mha_type == "relmha":
self.mha = RelPositionMultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size,
num_heads=num_heads,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
elif mha_type == "mha":
self.mha = MultiHeadAttention(
name=f"{name}_mhsa",
head_size=head_size,
num_heads=num_heads,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
else:
raise ValueError("mha_type must be either 'mha' or 'relmha'")
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
self.mha_type = mha_type
def call(
self,
inputs,
training=False,
mask=None,
**kwargs,
):
inputs, pos = inputs # pos is positional encoding
outputs = self.ln(inputs, training=training)
if self.mha_type == "relmha":
outputs = self.mha([outputs, outputs, outputs, pos], training=training, mask=mask)
else:
outputs = outputs + pos
outputs = self.mha([outputs, outputs, outputs], training=training, mask=mask)
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
def get_config(self):
conf = super(MHSAModule, self).get_config()
conf.update({"mha_type": self.mha_type})
conf.update(self.ln.get_config())
conf.update(self.mha.get_config())
conf.update(self.do.get_config())
conf.update(self.res_add.get_config())
return conf
class ConvModule(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
kernel_size=32,
dropout=0.0,
depth_multiplier=1,
kernel_regularizer=L2,
bias_regularizer=L2,
name="conv_module",
**kwargs,
):
super(ConvModule, self).__init__(name=name, **kwargs)
self.ln = tf.keras.layers.LayerNormalization()
self.pw_conv_1 = tf.keras.layers.Conv2D(
filters=2 * input_dim,
kernel_size=1,
strides=1,
padding="valid",
name=f"{name}_pw_conv_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.glu = GLU(name=f"{name}_glu")
self.dw_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_size, 1),
strides=1,
padding="same",
name=f"{name}_dw_conv",
depth_multiplier=depth_multiplier,
depthwise_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.bn = tf.keras.layers.BatchNormalization(
name=f"{name}_bn",
gamma_regularizer=kernel_regularizer,
beta_regularizer=bias_regularizer,
)
self.swish = tf.keras.layers.Activation(
tf.nn.swish,
name=f"{name}_swish_activation",
)
self.pw_conv_2 = tf.keras.layers.Conv2D(
filters=input_dim,
kernel_size=1,
strides=1,
padding="valid",
name=f"{name}_pw_conv_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.res_add = tf.keras.layers.Add(name=f"{name}_add")
def call(
self,
inputs,
training=False,
**kwargs,
):
outputs = self.ln(inputs, training=training)
B, T, E = shape_util.shape_list(outputs)
outputs = tf.reshape(outputs, [B, T, 1, E])
outputs = self.pw_conv_1(outputs, training=training)
outputs = self.glu(outputs)
outputs = self.dw_conv(outputs, training=training)
outputs = self.bn(outputs, training=training)
outputs = self.swish(outputs)
outputs = self.pw_conv_2(outputs, training=training)
outputs = tf.reshape(outputs, [B, T, E])
outputs = self.do(outputs, training=training)
outputs = self.res_add([inputs, outputs])
return outputs
def get_config(self):
conf = super(ConvModule, self).get_config()
conf.update(self.ln.get_config())
conf.update(self.pw_conv_1.get_config())
conf.update(self.glu.get_config())
conf.update(self.dw_conv.get_config())
conf.update(self.bn.get_config())
conf.update(self.swish.get_config())
conf.update(self.pw_conv_2.get_config())
conf.update(self.do.get_config())
conf.update(self.res_add.get_config())
return conf
class ConformerBlock(tf.keras.layers.Layer):
def __init__(
self,
input_dim,
dropout=0.0,
fc_factor=0.5,
head_size=36,
num_heads=4,
mha_type="relmha",
kernel_size=32,
depth_multiplier=1,
kernel_regularizer=L2,
bias_regularizer=L2,
name="conformer_block",
**kwargs,
):
super(ConformerBlock, self).__init__(name=name, **kwargs)
self.ffm1 = FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
name=f"{name}_ff_module_1",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.mhsam = MHSAModule(
mha_type=mha_type,
head_size=head_size,
num_heads=num_heads,
dropout=dropout,
name=f"{name}_mhsa_module",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.convm = ConvModule(
input_dim=input_dim,
kernel_size=kernel_size,
dropout=dropout,
name=f"{name}_conv_module",
depth_multiplier=depth_multiplier,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.ffm2 = FFModule(
input_dim=input_dim,
dropout=dropout,
fc_factor=fc_factor,
name=f"{name}_ff_module_2",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.ln = tf.keras.layers.LayerNormalization(
name=f"{name}_ln",
gamma_regularizer=kernel_regularizer,
beta_regularizer=kernel_regularizer,
)
def call(
self,
inputs,
training=False,
mask=None,
**kwargs,
):
inputs, pos = inputs # pos is positional encoding
outputs = self.ffm1(inputs, training=training, **kwargs)
outputs = self.mhsam([outputs, pos], training=training, mask=mask, **kwargs)
outputs = self.convm(outputs, training=training, **kwargs)
outputs = self.ffm2(outputs, training=training, **kwargs)
outputs = self.ln(outputs, training=training)
return outputs
def get_config(self):
conf = super(ConformerBlock, self).get_config()
conf.update(self.ffm1.get_config())
conf.update(self.mhsam.get_config())
conf.update(self.convm.get_config())
conf.update(self.ffm2.get_config())
conf.update(self.ln.get_config())
return conf
class ConformerEncoder(tf.keras.Model):
def __init__(
self,
subsampling,
positional_encoding="sinusoid",
dmodel=144,
num_blocks=16,
mha_type="relmha",
head_size=36,
num_heads=4,
kernel_size=32,
depth_multiplier=1,
fc_factor=0.5,
dropout=0.0,
kernel_regularizer=L2,
bias_regularizer=L2,
name="conformer_encoder",
**kwargs,
):
super(ConformerEncoder, self).__init__(name=name, **kwargs)
subsampling_name = subsampling.pop("type", "conv2d")
if subsampling_name == "vgg":
subsampling_class = VggSubsampling
elif subsampling_name == "conv2d":
subsampling_class = Conv2dSubsampling
else:
raise ValueError("subsampling must be either 'conv2d' or 'vgg'")
self.conv_subsampling = subsampling_class(
**subsampling,
name=f"{name}_subsampling",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
if positional_encoding == "sinusoid":
self.pe = PositionalEncoding(name=f"{name}_pe")
elif positional_encoding == "sinusoid_v2":
self.pe = PositionalEncoding(alpha=2, beta=0, name=f"{name}_pe")
elif positional_encoding == "sinusoid_concat":
self.pe = PositionalEncodingConcat(name=f"{name}_pe")
elif positional_encoding == "sinusoid_concat_v2":
self.pe = PositionalEncodingConcat(alpha=2, beta=-1, name=f"{name}_pe")
elif positional_encoding == "subsampling":
self.pe = tf.keras.layers.Activation("linear", name=f"{name}_pe")
else:
raise ValueError(
"positional_encoding must be either 'sinusoid', \
'sinusoid_concat', 'sinusoid_v2', 'sinusoid_concat_v2' or 'subsampling'"
)
self.linear = tf.keras.layers.Dense(
dmodel,
name=f"{name}_linear",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.do = tf.keras.layers.Dropout(dropout, name=f"{name}_dropout")
self.conformer_blocks = []
for i in range(num_blocks):
conformer_block = ConformerBlock(
input_dim=dmodel,
dropout=dropout,
fc_factor=fc_factor,
head_size=head_size,
num_heads=num_heads,
mha_type=mha_type,
kernel_size=kernel_size,
depth_multiplier=depth_multiplier,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=f"{name}_block_{i}",
)
self.conformer_blocks.append(conformer_block)
def call(
self,
inputs,
training=False,
mask=None,
**kwargs,
):
# input with shape [B, T, V1, V2]
outputs = self.conv_subsampling(inputs, training=training)
outputs = self.linear(outputs, training=training)
pe = self.pe(outputs)
outputs = self.do(outputs, training=training)
for cblock in self.conformer_blocks:
outputs = cblock([outputs, pe], training=training, mask=mask, **kwargs)
return outputs
def get_config(self):
conf = super(ConformerEncoder, self).get_config()
conf.update(self.conv_subsampling.get_config())
conf.update(self.linear.get_config())
conf.update(self.do.get_config())
conf.update(self.pe.get_config())
for cblock in self.conformer_blocks:
conf.update(cblock.get_config())
return conf
| 35.123543
| 94
| 0.603265
|
a75329d9d9f01c358efa767b3e9f3e2167416ea0
| 2,171
|
py
|
Python
|
lists/tests/test_forms.py
|
elyak123/obeying-the-testing-goat
|
8359febf7d28215133c5933512f8e737aacdff8f
|
[
"MIT"
] | null | null | null |
lists/tests/test_forms.py
|
elyak123/obeying-the-testing-goat
|
8359febf7d28215133c5933512f8e737aacdff8f
|
[
"MIT"
] | null | null | null |
lists/tests/test_forms.py
|
elyak123/obeying-the-testing-goat
|
8359febf7d28215133c5933512f8e737aacdff8f
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from lists.models import Item, List
from lists.forms import (ItemForm, EMPTY_ITEM_ERROR, DUPLICATE_ITEM_ERROR,
ExistingListItemForm
)
class ItemFormTest(TestCase):
def test_form_item_input_has_placeholder_and_css_classes(self):
form = ItemForm()
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
def test_validation_form_for_blank_items(self):
list_ = List.objects.create()
form = ItemForm(data={'text': ''})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['text'],
[EMPTY_ITEM_ERROR]
)
with self.assertRaises(ValueError):
form.save(for_list= list_)
def test_form_save_handles_saving_to_a_list(self):
list_ = List.objects.create()
form = ItemForm(data={'text': 'do me'})
new_item = form.save(for_list=list_)
self.assertEqual(new_item, Item.objects.first())
self.assertEqual(new_item.text, 'do me')
self.assertEqual(new_item.list,list_)
class ExistingListItemFormTest(TestCase):
def test_form_renders_item_text_input(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_)
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
def test_form_validation_for_blank_items(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_, data={'data': ''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [EMPTY_ITEM_ERROR])
def test_validation_for_duplicate_items(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text='no twins!')
form = ExistingListItemForm(for_list=list_, data={'text': 'no twins!'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [DUPLICATE_ITEM_ERROR])
def test_form_save(self):
list_ = List.objects.create()
form = ExistingListItemForm(for_list=list_, data={'text': 'hi'})
new_item = form.save()
self.assertEqual(new_item, Item.objects.first())
| 38.767857
| 79
| 0.667895
|
019902fd823def4e117ea65ffc273ad7678112be
| 7,817
|
py
|
Python
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2015-07-17T19:01:07.000Z
|
2015-07-17T19:01:07.000Z
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re, os, sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, **kwargs):
self.forceFirst = kwargs.get('forceFirst', [])
self.forceLast = kwargs.get('forceLast', [])
self.include = kwargs.get('include', [])
self.exclude = kwargs.get('exclude', [])
def read(self, filename):
"""
Parses the content of the named file and stores the values.
:param filename: the path to a configuration file
:return none
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def scanjs(sourceDirectory, config = None):
""" scans scanDirectory recursively and returns a list of paths to javascript files
:param sourceDirectory: the directory root
:return list object of all file paths
"""
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if config and config.include:
if filepath in config.include or filepath in config.forceFirst:
allFiles.append(filepath)
elif (not config) or (filepath not in config.exclude):
allFiles.append(filepath)
return allFiles
def merge (sourceDirectory, config = None):
""" Merges source files within a given directory according to a configuration
:param sourceDirectory: a string designating the path of the OpenLayers source
:param config: a mergejs.Config object
"""
from toposort import toposort
allFiles = scanjs(sourceDirectory, config)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
## Move forced first and last files to the required position
if config:
order = config.forceFirst + [item
for item in order
if ((item not in config.forceFirst) and
(item not in config.forceLast))] + config.forceLast
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
return "".join(result)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: mergejs.py <output.js> <source directory> [--config config filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config', dest="config_filename", action="store",
help="Config file name")
(options, args) = parser.parse_args()
try:
outputFilename = sys.argv[0]
sourceDirectory = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit()
if options.config_filename:
config = Config()
config.read(options.config_filename)
else:
config = None
output = merge(sourceDirectory, config)
file(outputFilename, "w").write(output)
| 32.707113
| 102
| 0.612639
|
0c2e9cca1ca218441828f9e1b709c00c7ac13a3d
| 98
|
py
|
Python
|
output/models/ms_data/attribute/att_j004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/attribute/att_j004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/attribute/att_j004_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.ms_data.attribute.att_j004_xsd.att_j004 import Test
__all__ = [
"Test",
]
| 16.333333
| 70
| 0.744898
|
7fc788421d5f34f4379493ea2bd07d336465209f
| 716
|
py
|
Python
|
change_png_color.py
|
prasertcbs/HueIconPackPng
|
269476ce74c05ed26e4213fb1d3492b99c7b8ef9
|
[
"MIT"
] | 6
|
2020-06-07T14:10:54.000Z
|
2021-07-16T06:20:33.000Z
|
change_png_color.py
|
prasertcbs/HueIconPackPng
|
269476ce74c05ed26e4213fb1d3492b99c7b8ef9
|
[
"MIT"
] | null | null | null |
change_png_color.py
|
prasertcbs/HueIconPackPng
|
269476ce74c05ed26e4213fb1d3492b99c7b8ef9
|
[
"MIT"
] | 5
|
2020-10-02T14:44:16.000Z
|
2022-01-21T21:21:16.000Z
|
# source: https://stackoverflow.com/questions/3752476/python-pil-replace-a-single-rgba-color
from PIL import Image
import numpy as np
def x():
im = Image.open('test.png')
im = im.convert('RGBA')
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, alpha = data.T # Temporarily unpack the bands for readability
# Replace white with red... (leaves alpha values alone...)
# Hue icon use RGB(16, 16, 16)
black_areas = (red == 16) & (blue == 16) & (green == 16)
data[..., :-1][black_areas.T] = (200, 200, 200) # Transpose back needed
im2 = Image.fromarray(data)
im2.save('test_white.png')
im2.show()
if __name__ == "__main__":
x()
| 29.833333
| 92
| 0.629888
|
d9b4ef912739d9a4c8ff489f8a38535394aedc3d
| 1,193
|
bzl
|
Python
|
dotnet/private/actions/resx_net.bzl
|
Perilynn/rules_dotnet
|
0c80abcfe023fe32f8908d6cac6dbab5c4455ee6
|
[
"Apache-2.0"
] | null | null | null |
dotnet/private/actions/resx_net.bzl
|
Perilynn/rules_dotnet
|
0c80abcfe023fe32f8908d6cac6dbab5c4455ee6
|
[
"Apache-2.0"
] | null | null | null |
dotnet/private/actions/resx_net.bzl
|
Perilynn/rules_dotnet
|
0c80abcfe023fe32f8908d6cac6dbab5c4455ee6
|
[
"Apache-2.0"
] | null | null | null |
load(
"@io_bazel_rules_dotnet//dotnet/private:common.bzl",
"as_iterable",
"sets"
)
load(
"@io_bazel_rules_dotnet//dotnet/private:providers.bzl",
"DotnetLibrary",
)
load(
"@io_bazel_rules_dotnet//dotnet/private:common.bzl",
"paths",
)
def _make_runner_arglist(dotnet, source, output):
args = dotnet.actions.args()
args.add(source.files, format = "%s")
args.add(output)
return args
def emit_resx_net(dotnet,
name = "",
src = None,
identifier = None,
out = None):
if name == "" and out == None:
fail("either name or out must be set")
if not out:
result = dotnet.declare_file(dotnet, path=name+".resources")
else:
result = dotnet.declare_file(dotnet, path=out)
args = _make_runner_arglist(dotnet, src, result)
dotnet.actions.run(
inputs = src.files,
outputs = [result],
executable = dotnet.resgen,
arguments = [args],
mnemonic = "NetResxCompile",
progress_message = (
"Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name))
return dotnet.new_resource(
dotnet = dotnet,
name = name,
result = result,
identifier = identifier)
| 20.929825
| 83
| 0.640402
|
e26c0f8864ddfcc75f2b4862f1ee5b59414a29ee
| 2,969
|
py
|
Python
|
lp2jira/user.py
|
pawelzny/launchpad2jira
|
b3724275837ae2968172d51676d01c2636489419
|
[
"MIT"
] | null | null | null |
lp2jira/user.py
|
pawelzny/launchpad2jira
|
b3724275837ae2968172d51676d01c2636489419
|
[
"MIT"
] | null | null | null |
lp2jira/user.py
|
pawelzny/launchpad2jira
|
b3724275837ae2968172d51676d01c2636489419
|
[
"MIT"
] | 2
|
2018-08-30T12:34:19.000Z
|
2018-12-14T10:55:30.000Z
|
# -*- coding: utf-8 -*-
import logging
import os
from tqdm import tqdm
from lp2jira.config import config, lp
from lp2jira.export import Export
from lp2jira.utils import clean_id, get_user_groups, json_dump, generate_mail
class User:
def __init__(self, name, display_name, email=None, user_groups=None, active=True):
self.name = name
self.display_name = display_name
self.active = active
self.user_groups = user_groups
self.email = email
@classmethod
def create(cls, username):
try:
lp_user = lp.people[username]
display_name = lp_user.display_name
if not lp_user.hide_email_addresses and lp_user.preferred_email_address:
email = lp_user.preferred_email_address.email
else:
email = generate_mail(display_name)
logging.warning(f"No access to mail for {username}. Auto-generated email: {email}")
except Exception as exc:
logging.warning(f"Failed to get user data for {username}")
logging.warning(exc, exc_info=True)
display_name = username
email = generate_mail(display_name)
return cls(name=username, display_name=display_name,
email=email, user_groups=get_user_groups())
@staticmethod
def filename(username):
return os.path.normpath(os.path.join(config["local"]["users"], f'{username}.json'))
@staticmethod
def exists(username):
return os.path.exists(User.filename(username))
def export(self):
filename = self.filename(self.name)
if self.exists(filename):
logging.debug(f'User {self.display_name} already exists, skipping: "{filename}"')
return True
with open(filename, 'w') as f:
json_dump(self._dump(), f)
logging.debug(f'User User {self.display_name} export success')
return True
def _dump(self):
dmp = {
'name': self.name,
'fullname': self.display_name,
'active': self.active
}
if self.user_groups:
dmp['groups'] = self.user_groups
if self.email:
dmp['email'] = self.email
return dmp
class ExportUser(Export):
def __init__(self):
super().__init__(entity=User)
class ExportSubscribers(ExportUser):
def run(self):
logging.info('===== Export: Subscribers =====')
project = lp.projects[config['launchpad']['project']]
subscriptions = project.getSubscriptions()
counter = 0
for sub in tqdm(subscriptions, desc='Export subscribers'):
username = clean_id(sub.subscriber_link)
if User.exists(username):
counter += 1
continue
if super().run(username):
counter += 1
logging.info(f'Exported subscribers: {counter}/{len(subscriptions)}')
| 29.989899
| 99
| 0.607275
|
75cc237bd6a8664e50af164f4724f5008013d381
| 369
|
py
|
Python
|
lib/external/setup.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 245
|
2019-11-29T02:55:25.000Z
|
2022-03-30T07:30:18.000Z
|
lib/external/setup.py
|
shachargluska/centerpose
|
01c2c8bfa9d3ee91807f2ffdcc48728d104265bd
|
[
"MIT"
] | 24
|
2019-11-29T10:05:00.000Z
|
2022-03-30T07:16:06.000Z
|
lib/external/setup.py
|
FishLiuabc/centerpose
|
555d753cd82693476f91f78c53aa4147f5a83015
|
[
"MIT"
] | 45
|
2019-11-29T05:12:02.000Z
|
2022-03-21T02:20:36.000Z
|
from distutils.core import setup
from distutils.extension import Extension
import numpy
from Cython.Build import cythonize
extensions = [
Extension(
"nms",
["nms.pyx"],
extra_compile_args=["-Wno-cpp", "-Wno-unused-function"]
)
]
setup(
name="coco",
ext_modules=cythonize(extensions),
include_dirs=[numpy.get_include()]
)
| 18.45
| 63
| 0.666667
|
43762d4a93a68a87619a65a64d8427722aaef450
| 58,947
|
py
|
Python
|
pyNastran/bdf/bdf_interface/write_mesh.py
|
numenic/pyNastran
|
fd5d3f0bf18db6595d85b9ac152f611e23122a68
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/bdf/bdf_interface/write_mesh.py
|
numenic/pyNastran
|
fd5d3f0bf18db6595d85b9ac152f611e23122a68
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/bdf/bdf_interface/write_mesh.py
|
numenic/pyNastran
|
fd5d3f0bf18db6595d85b9ac152f611e23122a68
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
This file defines:
- WriteMesh
"""
import sys
from io import StringIO, IOBase
from typing import List, Dict, Union, Optional, Tuple, Any, cast
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.attributes import BDFAttributes
from pyNastran.bdf.cards.nodes import write_xpoints
class WriteMesh(BDFAttributes):
"""
Defines methods for writing cards
Major methods:
- model.write_bdf(...)
- model.echo_bdf(...)
- model.auto_reject_bdf(...)
"""
def __init__(self):
"""creates methods for writing cards"""
BDFAttributes.__init__(self)
self._auto_reject = True
self.cards_to_read = set()
def get_encoding(self, encoding: Optional[str]=None) -> str:
"""gets the file encoding"""
if encoding is not None:
pass
else:
encoding = self._encoding
if encoding is None:
encoding = sys.getdefaultencoding()
encoding = cast(str, encoding)
return encoding
def _output_helper(self, out_filename: Optional[str], interspersed: bool,
size: int, is_double: bool) -> str:
"""
Performs type checking on the write_bdf inputs
"""
if out_filename is None:
from pyNastran.utils.gui_io import save_file_dialog
wildcard_wx = "Nastran BDF (*.bdf; *.dat; *.nas; *.pch)|" \
"*.bdf;*.dat;*.nas;*.pch|" \
"All files (*.*)|*.*"
wildcard_qt = "Nastran BDF (*.bdf *.dat *.nas *.pch);;All files (*)"
title = 'Save BDF/DAT/PCH'
out_filename = save_file_dialog(title, wildcard_wx, wildcard_qt)
assert out_filename is not None, out_filename
has_read_write = hasattr(out_filename, 'read') and hasattr(out_filename, 'write')
if has_read_write or isinstance(out_filename, IOBase):
return out_filename
elif not isinstance(out_filename, str):
msg = 'out_filename=%r must be a string; type=%s' % (
out_filename, type(out_filename))
raise TypeError(msg)
if size == 8:
assert is_double is False, 'is_double=%r' % is_double
elif size == 16:
assert is_double in [True, False], 'is_double=%r' % is_double
else:
assert size in [8, 16], size
assert isinstance(interspersed, bool)
#fname = print_filename(out_filename)
#self.log.debug("***writing %s" % fname)
return out_filename
def write_bdf(self, out_filename=None, encoding=None,
size=8, is_double=False,
interspersed=False, enddata=None, write_header=True, close=True):
# type: (Optional[Union[str, StringIO]], Optional[str], int, bool, bool, Optional[bool], bool, bool) -> None
"""
Writes the BDF.
Parameters
----------
out_filename : varies; default=None
str - the name to call the output bdf
file - a file object
StringIO() - a StringIO object
None - pops a dialog
encoding : str; default=None -> system specified encoding
the unicode encoding
latin1, and utf8 are generally good options
size : int; {8, 16}
the field size
is_double : bool; default=False
False : small field
True : large field
interspersed : bool; default=True
Writes a bdf with properties & elements
interspersed like how Patran writes the bdf. This takes
slightly longer than if interspersed=False, but makes it
much easier to compare to a Patran-formatted bdf and is
more clear.
enddata : bool; default=None
bool - enable/disable writing ENDDATA
None - depends on input BDF
write_header : bool; default=True
flag for writing the pyNastran header
close : bool; default=True
should the output file be closed
"""
is_long_ids = False
if self.is_bdf_vectorized:
pass
else:
# required for MasterModelTaxi
is_long_ids = (
self.nodes and max(self.nodes) > 100000000 or
self.coords and max(self.coords) > 100000000 or
self.elements and max(self.elements) > 100000000 or
self.properties and max(self.properties) > 100000000 or
self.materials and max(self.materials) > 100000000 or
self.thermal_materials and max(self.thermal_materials) > 100000000 or
self.nsms and max(self.nsms) > 100000000 or
self.nsmadds and max(self.nsmadds) > 100000000)
if is_long_ids:
size = 16
out_filename = self._output_helper(out_filename,
interspersed, size, is_double)
encoding = self.get_encoding(encoding)
#assert encoding.lower() in ['ascii', 'latin1', 'utf8'], encoding
has_read_write = hasattr(out_filename, 'read') and hasattr(out_filename, 'write')
if has_read_write:
bdf_file = out_filename
else:
self.log.debug('---starting BDF.write_bdf of %s---' % out_filename)
bdf_file = open(out_filename, 'w', encoding=encoding)
self._write_header(bdf_file, encoding, write_header=write_header)
if self.superelement_models:
bdf_file.write('$' + '*'*80+'\n')
for superelement_id, superelement in sorted(self.superelement_models.items()):
bdf_file.write('BEGIN SUPER=%s\n' % superelement_id)
superelement.write_bdf(out_filename=bdf_file, encoding=encoding,
size=size, is_double=is_double,
interspersed=interspersed, enddata=False,
write_header=False, close=False)
bdf_file.write('$' + '*'*80+'\n')
bdf_file.write('BEGIN BULK\n')
self._write_params(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_nodes(bdf_file, size, is_double, is_long_ids=is_long_ids)
if interspersed:
self._write_elements_interspersed(bdf_file, size, is_double, is_long_ids=is_long_ids)
else:
self._write_elements(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_properties(bdf_file, size, is_double, is_long_ids=is_long_ids)
#self._write_properties_by_element_type(bdf_file, size, is_double, is_long_ids)
self._write_materials(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_masses(bdf_file, size, is_double, is_long_ids=is_long_ids)
# split out for write_bdf_symmetric
self._write_rigid_elements(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_aero(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_common(bdf_file, size, is_double, is_long_ids=is_long_ids)
if (enddata is None and 'ENDDATA' in self.card_count) or enddata:
bdf_file.write('ENDDATA\n')
if close:
bdf_file.close()
def _write_header(self, bdf_file: Any, encoding: str, write_header: bool=True) -> None:
"""Writes the executive and case control decks."""
if self.punch is None:
# writing a mesh without using read_bdf
if self.system_command_lines or self.executive_control_lines or self.case_control_deck:
self.punch = False
else:
self.punch = True
if self.nastran_format and write_header:
bdf_file.write('$pyNastran: version=%s\n' % self.nastran_format)
bdf_file.write('$pyNastran: punch=%s\n' % self.punch)
bdf_file.write('$pyNastran: encoding=%s\n' % encoding)
bdf_file.write('$pyNastran: nnodes=%s\n' % len(self.nodes))
bdf_file.write('$pyNastran: nelements=%s\n' % len(self.elements))
if not self.punch:
self._write_executive_control_deck(bdf_file)
self._write_case_control_deck(bdf_file)
def _write_executive_control_deck(self, bdf_file: Any) -> None:
"""
Writes the executive control deck.
"""
msg = ''
for line in self.system_command_lines:
msg += line + '\n'
if self.executive_control_lines:
msg += '$EXECUTIVE CONTROL DECK\n'
if self.sol == 600:
new_sol = 'SOL 600,%s' % self.sol_method
else:
new_sol = 'SOL %s' % self.sol
if self.sol_iline is not None:
self.executive_control_lines[self.sol_iline] = new_sol
for line in self.executive_control_lines:
msg += line + '\n'
bdf_file.write(msg)
def _write_case_control_deck(self, bdf_file: Any) -> None:
"""Writes the Case Control Deck."""
if self.case_control_deck:
msg = '$CASE CONTROL DECK\n'
if self.superelement_models:
msg += self.case_control_deck.write(write_begin_bulk=False)
else:
msg += str(self.case_control_deck)
assert 'BEGIN BULK' in msg, msg
bdf_file.write(''.join(msg))
def _write_elements(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the elements in a sorted order"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.elements:
bdf_file.write('$ELEMENTS\n')
if is_long_ids:
for (eid, element) in sorted(self.elements.items()):
bdf_file.write(element.write_card_16(is_double))
else:
for (eid, element) in sorted(self.elements.items()):
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
if self.ao_element_flags:
for (eid, element) in sorted(self.ao_element_flags.items()):
bdf_file.write(element.write_card(size, is_double))
if self.normals:
for (unused_nid, snorm) in sorted(self.normals.items()):
bdf_file.write(snorm.write_card(size, is_double))
self._write_nsm(bdf_file, size, is_double)
def _write_nsm(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the nsm in a sorted order"""
if self.nsms or self.nsmadds:
bdf_file.write('$NSM\n')
for (unused_id, nsmadds) in sorted(self.nsmadds.items()):
for nsmadd in nsmadds:
bdf_file.write(str(nsmadd))
for (key, nsms) in sorted(self.nsms.items()):
for nsm in nsms:
try:
bdf_file.write(nsm.write_card(size, is_double))
except:
print('failed printing nsm...type=%s key=%r'
% (nsm.type, key))
raise
def _write_elements_interspersed(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the elements and properties in and interspersed order"""
missing_properties = []
if self.properties:
bdf_file.write('$ELEMENTS_WITH_PROPERTIES\n')
eids_written = [] # type: List[int]
pids = sorted(self.properties.keys())
pid_eids = self.get_element_ids_dict_with_pids(pids, stop_if_no_eids=False)
#failed_element_types = set()
for (pid, eids) in sorted(pid_eids.items()):
prop = self.properties[pid]
if eids:
bdf_file.write(prop.write_card(size, is_double))
eids.sort()
for eid in eids:
element = self.elements[eid]
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...' 'type=%r eid=%s'
% (element.type, eid))
raise
eids_written += eids
else:
missing_properties.append(prop.write_card(size, is_double))
eids_missing = set(self.elements.keys()).difference(set(eids_written))
if eids_missing:
bdf_file.write('$ELEMENTS_WITH_NO_PROPERTIES '
'(PID=0 and unanalyzed properties)\n')
for eid in sorted(eids_missing):
element = self.elements[eid]
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
if missing_properties or self.pdampt or self.pbusht or self.pelast:
bdf_file.write('$UNASSOCIATED_PROPERTIES\n')
for card in sorted(self.pbusht.values()):
bdf_file.write(card.write_card(size, is_double))
for card in sorted(self.pdampt.values()):
bdf_file.write(card.write_card(size, is_double))
for card in sorted(self.pelast.values()):
bdf_file.write(card.write_card(size, is_double))
for card in missing_properties:
# this is a string...
#print("missing_property = ", card
bdf_file.write(card)
if self.ao_element_flags:
for (eid, element) in sorted(self.ao_element_flags.items()):
bdf_file.write(element.write_card(size, is_double))
if self.normals:
for (unused_nid, snorm) in sorted(self.normals.items()):
bdf_file.write(snorm.write_card(size, is_double))
self._write_nsm(bdf_file, size, is_double)
def _write_aero(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the aero cards"""
if self.caeros or self.paeros or self.monitor_points or self.splines:
bdf_file.write('$AERO\n')
for (unused_id, caero) in sorted(self.caeros.items()):
bdf_file.write(caero.write_card(size, is_double))
for (unused_id, paero) in sorted(self.paeros.items()):
bdf_file.write(paero.write_card(size, is_double))
for (unused_id, spline) in sorted(self.splines.items()):
bdf_file.write(spline.write_card(size, is_double))
for monitor_point in self.monitor_points:
bdf_file.write(monitor_point.write_card(size, is_double))
self.zona.write_bdf(bdf_file, size=8, is_double=False)
def _write_aero_control(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the aero control surface cards"""
if(self.aecomps or self.aefacts or self.aeparams or self.aelinks or
self.aelists or self.aestats or self.aesurf or self.aesurfs):
bdf_file.write('$AERO CONTROL SURFACES\n')
for (unused_id, aelinks) in sorted(self.aelinks.items()):
for aelink in aelinks:
bdf_file.write(aelink.write_card(size, is_double))
for (unused_id, aecomp) in sorted(self.aecomps.items()):
bdf_file.write(aecomp.write_card(size, is_double))
for (unused_id, aeparam) in sorted(self.aeparams.items()):
bdf_file.write(aeparam.write_card(size, is_double))
for (unused_id, aestat) in sorted(self.aestats.items()):
bdf_file.write(aestat.write_card(size, is_double))
for (unused_id, aelist) in sorted(self.aelists.items()):
bdf_file.write(aelist.write_card(size, is_double))
for (unused_id, aesurf) in sorted(self.aesurf.items()):
bdf_file.write(aesurf.write_card(size, is_double))
for (unused_id, aesurfs) in sorted(self.aesurfs.items()):
bdf_file.write(aesurfs.write_card(size, is_double))
for (unused_id, aefact) in sorted(self.aefacts.items()):
bdf_file.write(aefact.write_card(size, is_double))
def _write_static_aero(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the static aero cards"""
if self.aeros or self.trims or self.divergs:
bdf_file.write('$STATIC AERO\n')
# static aero
if self.aeros:
bdf_file.write(self.aeros.write_card(size, is_double))
for (unused_id, trim) in sorted(self.trims.items()):
bdf_file.write(trim.write_card(size, is_double))
for (unused_id, diverg) in sorted(self.divergs.items()):
bdf_file.write(diverg.write_card(size, is_double))
def _find_aero_location(self) -> Tuple[bool, bool]:
"""Determines where the AERO card should be written"""
write_aero_in_flutter = False
write_aero_in_gust = False
if self.aero:
if self.flfacts or self.flutters or self.mkaeros:
write_aero_in_flutter = True
elif self.gusts:
write_aero_in_gust = True
else:
# an AERO card exists, but no FLUTTER, FLFACT, MKAEROx or GUST card
write_aero_in_flutter = True
return write_aero_in_flutter, write_aero_in_gust
def _write_flutter(self, bdf_file: Any, size: int=8, is_double: bool=False,
write_aero_in_flutter: bool=True,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the flutter cards"""
if (write_aero_in_flutter and self.aero) or self.flfacts or self.flutters or self.mkaeros:
bdf_file.write('$FLUTTER\n')
if write_aero_in_flutter:
bdf_file.write(self.aero.write_card(size, is_double))
for (unused_id, flutter) in sorted(self.flutters.items()):
bdf_file.write(flutter.write_card(size, is_double))
for (unused_id, flfact) in sorted(self.flfacts.items()):
bdf_file.write(flfact.write_card(size, is_double))
for mkaero in self.mkaeros:
bdf_file.write(mkaero.write_card(size, is_double))
def _write_gust(self, bdf_file: Any, size: int=8, is_double: bool=False,
write_aero_in_gust: bool=True,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the gust cards"""
if (write_aero_in_gust and self.aero) or self.gusts:
bdf_file.write('$GUST\n')
if write_aero_in_gust:
for (unused_id, aero) in sorted(self.aero.items()):
bdf_file.write(aero.write_card(size, is_double))
for (unused_id, gust) in sorted(self.gusts.items()):
bdf_file.write(gust.write_card(size, is_double))
def _write_common(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""
Write the common outputs so none get missed...
Parameters
----------
bdf_file : file
the file object
size : int (default=8)
the field width
is_double : bool (default=False)
is this double precision
"""
self._write_dmigs(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_loads(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_dynamic(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_aero_control(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_static_aero(bdf_file, size, is_double, is_long_ids=is_long_ids)
write_aero_in_flutter, write_aero_in_gust = self._find_aero_location()
self._write_flutter(bdf_file, size, is_double, write_aero_in_flutter,
is_long_ids=is_long_ids)
self._write_gust(bdf_file, size, is_double, write_aero_in_gust, is_long_ids=is_long_ids)
self._write_thermal(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_thermal_materials(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_constraints(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_optimization(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_tables(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_sets(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_superelements(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_contact(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_parametric(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_rejects(bdf_file, size, is_double, is_long_ids=is_long_ids)
self._write_coords(bdf_file, size, is_double, is_long_ids=is_long_ids)
def _write_constraints(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the constraint cards sorted by ID"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.suport or self.suport1:
bdf_file.write('$CONSTRAINTS\n')
for suport in self.suport:
bdf_file.write(suport.write_card(size, is_double))
for unused_suport_id, suport in sorted(self.suport1.items()):
bdf_file.write(suport.write_card(size, is_double))
if self.spcs or self.spcadds or self.spcoffs:
#bdf_file.write('$SPCs\n')
#str_spc = str(self.spcObject) # old
#if str_spc:
#bdf_file.write(str_spc)
#else:
bdf_file.write('$SPCs\n')
for (unused_id, spcadds) in sorted(self.spcadds.items()):
for spcadd in spcadds:
bdf_file.write(str(spcadd))
for (unused_id, spcs) in sorted(self.spcs.items()):
for spc in spcs:
bdf_file.write(str(spc))
for (unused_id, spcoffs) in sorted(self.spcoffs.items()):
for spc in spcoffs:
bdf_file.write(str(spc))
if self.mpcs or self.mpcadds:
bdf_file.write('$MPCs\n')
for (unused_id, mpcadds) in sorted(self.mpcadds.items()):
for mpcadd in mpcadds:
bdf_file.write(str(mpcadd))
for (unused_id, mpcs) in sorted(self.mpcs.items()):
for mpc in mpcs:
bdf_file.write(mpc.write_card(size, is_double))
def _write_contact(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the contact cards sorted by ID"""
is_contact = (self.bcrparas or self.bctadds or self.bctparas
or self.bctsets or self.bsurf or self.bsurfs
or self.bconp or self.blseg)
if is_contact:
bdf_file.write('$CONTACT\n')
for (unused_id, bcrpara) in sorted(self.bcrparas.items()):
bdf_file.write(bcrpara.write_card(size, is_double))
for (unused_id, bctadds) in sorted(self.bctadds.items()):
bdf_file.write(bctadds.write_card(size, is_double))
for (unused_id, bctpara) in sorted(self.bctparas.items()):
bdf_file.write(bctpara.write_card(size, is_double))
for (unused_id, bctset) in sorted(self.bctsets.items()):
bdf_file.write(bctset.write_card(size, is_double))
for (unused_id, bsurfi) in sorted(self.bsurf.items()):
bdf_file.write(bsurfi.write_card(size, is_double))
for (unused_id, bsurfsi) in sorted(self.bsurfs.items()):
bdf_file.write(bsurfsi.write_card(size, is_double))
for (unused_id, bconp) in sorted(self.bconp.items()):
bdf_file.write(bconp.write_card(size, is_double))
for (unused_id, blseg) in sorted(self.blseg.items()):
bdf_file.write(blseg.write_card(size, is_double))
def _write_coords(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the coordinate cards in a sorted order"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if len(self.coords) > 1:
bdf_file.write('$COORDS\n')
for (unused_id, coord) in sorted(self.coords.items()):
if unused_id != 0:
try:
bdf_file.write(coord.write_card(size, is_double))
except RuntimeError:
bdf_file.write(coord.write_card(16, is_double))
def _write_dmigs(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""
Writes the DMIG cards
Parameters
----------
size : int
large field (16) or small field (8)
"""
for (unused_name, dmig) in sorted(self.dmigs.items()):
bdf_file.write(dmig.write_card(size, is_double))
for (unused_name, dmi) in sorted(self.dmis.items()):
bdf_file.write(dmi.write_card(size, is_double))
for (unused_name, dmij) in sorted(self.dmijs.items()):
bdf_file.write(dmij.write_card(size, is_double))
for (unused_name, dmiji) in sorted(self.dmijis.items()):
bdf_file.write(dmiji.write_card(size, is_double))
for (unused_name, dmik) in sorted(self.dmiks.items()):
bdf_file.write(dmik.write_card(size, is_double))
for (unused_name, dmiax) in sorted(self.dmiax.items()):
bdf_file.write(dmiax.write_card(size, is_double))
def _write_dynamic(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the dynamic cards sorted by ID"""
is_dynamic = (self.dareas or self.dphases or self.nlparms or self.frequencies or
self.methods or self.cMethods or self.tsteps or self.tstepnls or
self.transfer_functions or self.delays or self.rotors or self.tics or
self.nlpcis)
if is_dynamic:
bdf_file.write('$DYNAMIC\n')
for (unused_id, method) in sorted(self.methods.items()):
bdf_file.write(method.write_card(size, is_double))
for (unused_id, cmethod) in sorted(self.cMethods.items()):
bdf_file.write(cmethod.write_card(size, is_double))
for (unused_id, darea) in sorted(self.dareas.items()):
bdf_file.write(darea.write_card(size, is_double))
for (unused_id, dphase) in sorted(self.dphases.items()):
bdf_file.write(dphase.write_card(size, is_double))
for (unused_id, nlparm) in sorted(self.nlparms.items()):
bdf_file.write(nlparm.write_card(size, is_double))
for (unused_id, nlpci) in sorted(self.nlpcis.items()):
bdf_file.write(nlpci.write_card(size, is_double))
for (unused_id, tstep) in sorted(self.tsteps.items()):
bdf_file.write(tstep.write_card(size, is_double))
for (unused_id, tstepnl) in sorted(self.tstepnls.items()):
bdf_file.write(tstepnl.write_card(size, is_double))
for (unused_id, freqs) in sorted(self.frequencies.items()):
for freq in freqs:
bdf_file.write(freq.write_card(size, is_double))
for (unused_id, delay) in sorted(self.delays.items()):
bdf_file.write(delay.write_card(size, is_double))
for (unused_id, rotor) in sorted(self.rotors.items()):
bdf_file.write(rotor.write_card(size, is_double))
for (unused_id, tic) in sorted(self.tics.items()):
bdf_file.write(tic.write_card(size, is_double))
for (unused_id, tfs) in sorted(self.transfer_functions.items()):
for transfer_function in tfs:
bdf_file.write(transfer_function.write_card(size, is_double))
def _write_mesh_long_ids_size(self, size: bool, is_long_ids: bool) -> Tuple[int, bool]:
"""helper method"""
if is_long_ids and size == 16 or is_long_ids is False:
return size, is_long_ids
if size == 16 and is_long_ids is None or self.is_long_ids:
size = 16
is_long_ids = True
else:
is_long_ids = False
return size, is_long_ids
def _write_loads(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the load cards sorted by ID"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.load_combinations or self.loads or self.tempds:
bdf_file.write('$LOADS\n')
for (key, load_combinations) in sorted(self.load_combinations.items()):
for load_combination in load_combinations:
try:
bdf_file.write(load_combination.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load_combination.type, key))
raise
for (key, loadcase) in sorted(self.loads.items()):
for load in loadcase:
try:
bdf_file.write(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
for unused_key, tempd in sorted(self.tempds.items()):
bdf_file.write(tempd.write_card(size, is_double))
self._write_dloads(bdf_file, size=size, is_double=is_double, is_long_ids=is_long_ids)
def _write_dloads(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the dload cards sorted by ID"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.dloads or self.dload_entries:
bdf_file.write('$DLOADS\n')
for (key, loadcase) in sorted(self.dloads.items()):
for load in loadcase:
try:
bdf_file.write(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
for (key, loadcase) in sorted(self.dload_entries.items()):
for load in loadcase:
try:
bdf_file.write(load.write_card(size, is_double))
except:
print('failed printing load...type=%s key=%r'
% (load.type, key))
raise
def _write_masses(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the mass cards sorted by ID"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.properties_mass:
bdf_file.write('$PROPERTIES_MASS\n')
for (pid, mass) in sorted(self.properties_mass.items()):
try:
bdf_file.write(mass.write_card(size, is_double))
except:
print('failed printing mass property...'
'type=%s eid=%s' % (mass.type, pid))
raise
if self.masses:
bdf_file.write('$MASSES\n')
for (eid, mass) in sorted(self.masses.items()):
try:
bdf_file.write(mass.write_card(size, is_double))
except:
print('failed printing masses...'
'type=%s eid=%s' % (mass.type, eid))
raise
def _write_materials(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the materials in a sorted order"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
is_big_materials = hasattr(self, 'big_materials') and self.big_materials
is_materials = (self.materials or self.hyperelastic_materials or self.creep_materials or
self.MATS1 or self.MATS3 or self.MATS8 or self.MATT1 or
self.MATT2 or self.MATT3 or self.MATT4 or self.MATT5 or
self.MATT8 or self.MATT9 or self.nxstrats or is_big_materials)
if is_materials:
bdf_file.write('$MATERIALS\n')
for (unused_mid, material) in sorted(self.materials.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.hyperelastic_materials.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.creep_materials.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATS1.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATS3.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATS8.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT1.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT2.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT3.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT4.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT5.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT8.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_mid, material) in sorted(self.MATT9.items()):
bdf_file.write(material.write_card(size, is_double))
for (unused_sid, nxstrat) in sorted(self.nxstrats.items()):
bdf_file.write(nxstrat.write_card(size, is_double))
if is_big_materials:
for unused_mid, mat in sorted(self.big_materials.items()):
bdf_file.write(mat.write_card_16(is_double))
def _write_nodes(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the NODE-type cards"""
if self.spoints:
bdf_file.write('$SPOINTS\n')
bdf_file.write(write_xpoints('SPOINT', self.spoints))
if self.epoints:
bdf_file.write('$EPOINTS\n')
bdf_file.write(write_xpoints('EPOINT', self.epoints))
if self.points:
bdf_file.write('$POINTS\n')
for unused_point_id, point in sorted(self.points.items()):
bdf_file.write(point.write_card(size, is_double))
if self._is_axis_symmetric:
if self.axic:
bdf_file.write(self.axic.write_card(size, is_double))
if self.axif:
bdf_file.write(self.axif.write_card(size, is_double))
for unused_nid, ringax_pointax in sorted(self.ringaxs.items()):
bdf_file.write(ringax_pointax.write_card(size, is_double))
for unused_ringfl, ringfl in sorted(self.ringfl.items()):
bdf_file.write(ringfl.write_card(size, is_double))
for unused_nid, gridb in sorted(self.gridb.items()):
bdf_file.write(gridb.write_card(size, is_double))
self._write_grids(bdf_file, size=size, is_double=is_double)
if self.seqgp:
bdf_file.write(self.seqgp.write_card(size, is_double))
#if 0: # not finished
#self._write_nodes_associated(bdf_file, size, is_double)
def _write_grids(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the GRID-type cards"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.nodes:
bdf_file.write('$NODES\n')
if self.grdset:
bdf_file.write(self.grdset.write_card(size))
if is_long_ids:
for (unused_nid, node) in sorted(self.nodes.items()):
bdf_file.write(node.write_card_16(is_double))
else:
for (unused_nid, node) in sorted(self.nodes.items()):
bdf_file.write(node.write_card(size, is_double))
#def _write_nodes_associated(self, bdf_file, size=8, is_double=False):
#"""
#Writes the NODE-type in associated and unassociated groups.
#.. warning:: Sometimes crashes, probably on invalid BDFs.
#"""
#associated_nodes = set()
#for (eid, element) in self.elements.items():
#associated_nodes = associated_nodes.union(set(element.node_ids))
#all_nodes = set(self.nodes.keys())
#unassociated_nodes = list(all_nodes.difference(associated_nodes))
##missing_nodes = all_nodes.difference(
## TODO: this really shouldn't be a list...???
#associated_nodes = list(associated_nodes)
#if associated_nodes:
#bdf_file.write('$ASSOCIATED NODES\n')
#if self.grdset:
#bdf_file.write(self.grdset.write_card(size, is_double))
## TODO: this really shouldn't be a dictionary...???
#for key, node in sorted(associated_nodes.items()):
#bdf_file.write(node.write_card(size, is_double))
#if unassociated_nodes:
#bdf_file.write('$UNASSOCIATED NODES\n')
#if self.grdset and not associated_nodes:
#v(self.grdset.write_card(size, is_double))
#for key, node in sorted(unassociated_nodes.items()):
#if key in self.nodes:
#bdf_file.write(node.write_card(size, is_double))
#else:
#bdf_file.write('$ Missing NodeID=%s' % key)
def _write_optimization(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the optimization cards sorted by ID"""
is_optimization = (self.dconadds or self.dconstrs or self.desvars or self.ddvals or
self.dresps or
self.dvprels or self.dvmrels or self.dvcrels or self.doptprm or
self.dlinks or self.dequations or self.dtable is not None or
self.dvgrids or self.dscreen)
if is_optimization:
bdf_file.write('$OPTIMIZATION\n')
for (unused_id, dconadd) in sorted(self.dconadds.items()):
bdf_file.write(dconadd.write_card(size, is_double))
for (unused_id, dconstrs) in sorted(self.dconstrs.items()):
for dconstr in dconstrs:
bdf_file.write(dconstr.write_card(size, is_double))
for (unused_id, desvar) in sorted(self.desvars.items()):
bdf_file.write(desvar.write_card(size, is_double))
for (unused_id, ddval) in sorted(self.ddvals.items()):
bdf_file.write(ddval.write_card(size, is_double))
for (unused_id, dlink) in sorted(self.dlinks.items()):
bdf_file.write(dlink.write_card(size, is_double))
for (unused_id, dresp) in sorted(self.dresps.items()):
bdf_file.write(dresp.write_card(size, is_double))
for (unused_id, dvcrel) in sorted(self.dvcrels.items()):
bdf_file.write(dvcrel.write_card(size, is_double))
for (unused_id, dvmrel) in sorted(self.dvmrels.items()):
bdf_file.write(dvmrel.write_card(size, is_double))
for (unused_id, dvprel) in sorted(self.dvprels.items()):
bdf_file.write(dvprel.write_card(size, is_double))
for (unused_id, dvgrids) in sorted(self.dvgrids.items()):
for dvgrid in dvgrids:
bdf_file.write(dvgrid.write_card(size, is_double))
for (unused_id, dscreen) in sorted(self.dscreen.items()):
bdf_file.write(str(dscreen))
for (unused_id, equation) in sorted(self.dequations.items()):
bdf_file.write(str(equation))
if self.dtable is not None:
bdf_file.write(self.dtable.write_card(size, is_double))
if self.doptprm is not None:
bdf_file.write(self.doptprm.write_card(size, is_double))
def _write_parametric(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the optimization cards sorted by ID"""
is_parametric = self.pset or self.pval or self.gmcurv or self.feedge or self.feface
if is_parametric:
for (unused_id, pset) in sorted(self.pset.items()):
bdf_file.write(pset.write_card(size, is_double))
for (unused_adapt_id, pvals) in sorted(self.pval.items()):
for pval in pvals:
bdf_file.write(pval.write_card(size, is_double))
for (unused_id, gmcurv) in sorted(self.gmcurv.items()):
bdf_file.write(gmcurv.write_card(size, is_double))
for (unused_id, gmsurf) in sorted(self.gmsurf.items()):
bdf_file.write(gmsurf.write_card(size, is_double))
for (unused_id, feedge) in sorted(self.feedge.items()):
bdf_file.write(feedge.write_card(size, is_double))
for (unused_id, feface) in sorted(self.feface.items()):
bdf_file.write(feface.write_card(size, is_double))
def _write_params(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the PARAM cards"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.params or self.dti:
bdf_file.write('$PARAMS\n') # type: List[str]
for unused_name, dti in sorted(self.dti.items()):
bdf_file.write(dti.write_card(size=size, is_double=is_double))
for (unused_key, param) in sorted(self.params.items()):
bdf_file.write(param.write_card(size, is_double))
def _write_properties(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the properties in a sorted order"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
is_big_properties = hasattr(self, 'big_properties') and self.big_properties
is_properties = (self.properties or self.pelast or
self.pdampt or self.pbusht or is_big_properties)
if is_properties:
bdf_file.write('$PROPERTIES\n')
prop_groups = (self.properties, self.pelast, self.pdampt, self.pbusht)
if is_long_ids:
for prop_group in prop_groups:
for unused_pid, prop in sorted(prop_group.items()):
bdf_file.write(prop.write_card_16(is_double))
#except:
#print('failed printing property type=%s' % prop.type)
#raise
else:
for prop_group in prop_groups:
for unused_pid, prop in sorted(prop_group.items()):
bdf_file.write(prop.write_card(size, is_double))
if is_big_properties:
for unused_pid, prop in sorted(self.big_properties.items()):
bdf_file.write(prop.write_card_16(is_double))
def _write_properties_by_element_type(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""
Writes the properties in a sorted order by property type grouping
TODO: Missing some property types.
"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
is_properties = self.properties or self.pelast or self.pdampt or self.pbusht
if not is_properties:
return
from collections import defaultdict, OrderedDict
propertys_class_to_property_types = OrderedDict()
# prop_class -> property types
propertys_class_to_property_types['spring'] = ['PELAS', 'PELAST']
propertys_class_to_property_types['damper'] = ['PDAMP', 'PDAMPT']
propertys_class_to_property_types['rod'] = ['PROD', 'PTUBE']
propertys_class_to_property_types['bar'] = ['PBAR', 'PBARL', 'PBRSECT']
propertys_class_to_property_types['beam'] = ['PBEAM', 'PBEAML', 'PBMSECT']
propertys_class_to_property_types['bush'] = ['PBUSH', 'PBUSH1D', 'PBUSH2D']
propertys_class_to_property_types['shell'] = ['PSHEAR', 'PSHELL', 'PCOMP', 'PCOMPG']
propertys_class_to_property_types['solid'] = ['PSOLID']
property_type_to_property_class = {
#'other' : [],
}
# the inverse of propertys_class_to_property_types
for prop_class, prop_types in propertys_class_to_property_types.items():
for prop_type in prop_types:
property_type_to_property_class[prop_type] = prop_class
#if is_properties:
# put each property object into a class (e.g., CQUAD4 -> PCOMP)
properties_by_class = defaultdict(list)
prop_groups = (self.properties, self.pelast, self.pdampt, self.pbusht)
for properties in prop_groups:
for unused_pid, prop in properties.items():
prop_class = property_type_to_property_class[prop.type]
print(prop.type, '->', prop_class)
properties_by_class[prop_class].append(prop)
bdf_file.write('$PROPERTIES\n')
for prop_class, prop_types in propertys_class_to_property_types.items():
print(prop_class, prop_types)
#for prop_type in prop_types:
#if prop_type not in properties_by_class:
#continue
#print(' ', prop_type)
props = properties_by_class[prop_class]
if not props:
continue
bdf_file.write('$' + '-' * 80 + '\n')
bdf_file.write('$ %s\n' % prop_class)
for prop in props:
bdf_file.write(prop.write_card(size, is_double))
bdf_file.write('$' + '-' * 80 + '\n')
def _write_rejects(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""
Writes the rejected (processed) cards and the rejected unprocessed
cardlines
"""
if size == 8:
print_func = print_card_8
else:
print_func = print_card_16
if self.reject_cards:
bdf_file.write('$REJECT_CARDS\n')
for reject_card in self.reject_cards:
try:
bdf_file.write(print_func(reject_card))
except RuntimeError:
for field in reject_card:
if field is not None and '=' in field:
raise SyntaxError('cannot reject equal signed '
'cards\ncard=%s\n' % reject_card)
raise
if self.reject_lines:
bdf_file.write('$REJECT_LINES\n')
for reject_lines in self.reject_lines:
if isinstance(reject_lines, (list, tuple)):
for reject in reject_lines:
reject2 = reject.rstrip()
if reject2:
bdf_file.write('%s\n' % reject2)
elif isinstance(reject_lines, str):
reject2 = reject_lines.rstrip()
if reject2:
bdf_file.write('%s\n' % reject2)
else:
raise TypeError(reject_lines)
def _write_rigid_elements(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the rigid elements in a sorted order"""
size, is_long_ids = self._write_mesh_long_ids_size(size, is_long_ids)
if self.rigid_elements:
bdf_file.write('$RIGID ELEMENTS\n')
if is_long_ids:
for (eid, element) in sorted(self.rigid_elements.items()):
try:
bdf_file.write(element.write_card_16(is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
else:
for (eid, element) in sorted(self.rigid_elements.items()):
try:
bdf_file.write(element.write_card(size, is_double))
except:
print('failed printing element...'
'type=%s eid=%s' % (element.type, eid))
raise
if self.plotels:
bdf_file.write('$PLOT ELEMENTS\n')
for (eid, element) in sorted(self.plotels.items()):
bdf_file.write(element.write_card(size, is_double))
def _write_sets(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the SETx cards sorted by ID"""
is_sets = (self.sets or self.asets or self.omits or self.bsets or self.csets or self.qsets
or self.usets)
if is_sets:
bdf_file.write('$SETS\n') # type: List[str]
for (unused_id, set_obj) in sorted(self.sets.items()): # dict
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.asets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.omits: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.bsets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.csets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.qsets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for unused_name, usets in sorted(self.usets.items()): # dict
for set_obj in usets: # list
bdf_file.write(set_obj.write_card(size, is_double))
def _write_superelements(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""
Writes the Superelement cards
Parameters
----------
size : int
large field (16) or small field (8)
"""
is_sets = (self.se_sets or self.se_bsets or self.se_csets or self.se_qsets
or self.se_usets)
if is_sets:
bdf_file.write('$SUPERELEMENTS\n') # type: List[str]
for set_obj in self.se_bsets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.se_csets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for set_obj in self.se_qsets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for (unused_set_id, set_obj) in sorted(self.se_sets.items()): # dict
bdf_file.write(set_obj.write_card(size, is_double))
for unused_name, usets in sorted(self.se_usets.items()): # dict
for set_obj in usets: # list
bdf_file.write(set_obj.write_card(size, is_double))
for suport in self.se_suport: # list
bdf_file.write(suport.write_card(size, is_double))
for unused_seid, csuper in sorted(self.csuper.items()):
bdf_file.write(csuper.write_card(size, is_double))
for unused_seid, csupext in sorted(self.csupext.items()):
bdf_file.write(csupext.write_card(size, is_double))
for unused_seid, sebulk in sorted(self.sebulk.items()):
bdf_file.write(sebulk.write_card(size, is_double))
for unused_seid, seconct in sorted(self.seconct.items()):
bdf_file.write(seconct.write_card(size, is_double))
for unused_seid, sebndry in sorted(self.sebndry.items()):
bdf_file.write(sebndry.write_card(size, is_double))
for unused_seid, seelt in sorted(self.seelt.items()):
bdf_file.write(seelt.write_card(size, is_double))
for unused_seid, seexcld in sorted(self.seexcld.items()):
bdf_file.write(seexcld.write_card(size, is_double))
for unused_seid, selabel in sorted(self.selabel.items()):
bdf_file.write(selabel.write_card(size, is_double))
for unused_seid, seloc in sorted(self.seloc.items()):
bdf_file.write(seloc.write_card(size, is_double))
for unused_seid, seload in sorted(self.seload.items()):
bdf_file.write(seload.write_card(size, is_double))
for unused_seid, sempln in sorted(self.sempln.items()):
bdf_file.write(sempln.write_card(size, is_double))
for unused_setid, senqset in sorted(self.senqset.items()):
bdf_file.write(senqset.write_card(size, is_double))
for unused_seid, setree in sorted(self.setree.items()):
bdf_file.write(setree.write_card(size, is_double))
def _write_tables(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the TABLEx cards sorted by ID"""
if self.tables or self.tables_d or self.tables_m or self.tables_sdamping:
bdf_file.write('$TABLES\n') # type: List[str]
for (unused_id, table) in sorted(self.tables.items()):
bdf_file.write(table.write_card(size, is_double))
for (unused_id, table) in sorted(self.tables_d.items()):
bdf_file.write(table.write_card(size, is_double))
for (unused_id, table) in sorted(self.tables_m.items()):
bdf_file.write(table.write_card(size, is_double))
for (unused_id, table) in sorted(self.tables_sdamping.items()):
bdf_file.write(table.write_card(size, is_double))
if self.random_tables:
bdf_file.write('$RANDOM TABLES\n')
for (unused_id, table) in sorted(self.random_tables.items()):
bdf_file.write(table.write_card(size, is_double))
def _write_thermal(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the thermal cards"""
# PHBDY
is_thermal = (self.phbdys or self.convection_properties or self.bcs or
self.views or self.view3ds or self.radset or self.radcavs)
if is_thermal:
bdf_file.write('$THERMAL\n')
for (unused_key, phbdy) in sorted(self.phbdys.items()):
bdf_file.write(phbdy.write_card(size, is_double))
#for unused_key, prop in sorted(self.thermal_properties.items()):
# bdf_file.write(str(prop))
for (unused_key, prop) in sorted(self.convection_properties.items()):
bdf_file.write(prop.write_card(size, is_double))
# BCs
for (unused_key, bcs) in sorted(self.bcs.items()):
for boundary_condition in bcs: # list
bdf_file.write(boundary_condition.write_card(size, is_double))
for (unused_key, view) in sorted(self.views.items()):
bdf_file.write(view.write_card(size, is_double))
for (unused_key, view3d) in sorted(self.view3ds.items()):
bdf_file.write(view3d.write_card(size, is_double))
if self.radset:
bdf_file.write(self.radset.write_card(size, is_double))
for unused_icavity, radcav in self.radcavs.items():
bdf_file.write(radcav.write_card(size, is_double))
def _write_thermal_materials(self, bdf_file: Any, size: int=8, is_double: bool=False,
is_long_ids: Optional[bool]=None) -> None:
"""Writes the thermal materials in a sorted order"""
if self.thermal_materials:
bdf_file.write('$THERMAL MATERIALS\n')
for (unused_mid, material) in sorted(self.thermal_materials.items()):
bdf_file.write(material.write_card(size, is_double))
| 49.081599
| 116
| 0.590055
|
4c0d3fa49e5ece2ecf9e464bb9bad46ebf95c4ac
| 11,355
|
py
|
Python
|
sunpy/net/dataretriever/client.py
|
drewleonard42/sunpy
|
79ca90a032213d82d42a3657a693b20b99b22464
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/net/dataretriever/client.py
|
drewleonard42/sunpy
|
79ca90a032213d82d42a3657a693b20b99b22464
|
[
"BSD-2-Clause"
] | null | null | null |
sunpy/net/dataretriever/client.py
|
drewleonard42/sunpy
|
79ca90a032213d82d42a3657a693b20b99b22464
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
import os
from collections import OrderedDict, namedtuple
from functools import partial
import pathlib
import numpy as np
import astropy.table
import astropy.units as u
import sunpy
from sunpy.time import TimeRange
from sunpy.util import replacement_filename
from sunpy import config
from sunpy.net.base_client import BaseClient
from sunpy.net.download import Downloader, Results
from sunpy.net.vso.attrs import Time, Wavelength, _Range
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['QueryResponse', 'GenericClient']
def simple_path(path, sock, url):
return path
class QueryResponseBlock(object):
"""
Represents url, source along with other information
"""
def __init__(self, map0, url, time=None):
"""
Parameters
----------
map0 : Dict with relevant information
url : Uniform Resource Locator
"""
self._map = map0
self.source = map0.get('source', "Data not Available")
self.provider = map0.get('provider', "Data not Available")
self.physobs = map0.get('physobs', "Data not Available")
self.instrument = map0.get('instrument', "Data not Available")
self.url = url
self.time = TimeRange(map0.get('Time_start'),
map0.get('Time_end')) if time is None else time
self.wave = map0.get('wavelength', np.NaN)
def iter_urls(amap, url_list, time):
"""Helper Function"""
for aurl, t in zip(url_list, time):
tmp = QueryResponseBlock(amap, aurl, t)
yield tmp
class QueryResponse(list):
"""
Container of QueryResponseBlocks
"""
def __init__(self, lst):
super(QueryResponse, self).__init__(lst)
@classmethod
def create(cls, amap, lst, time=None):
if time is None:
time = [None] * len(lst)
return cls(iter_urls(amap, lst, time))
def time_range(self):
"""
Returns the time-span for which records are available
"""
return TimeRange(min(qrblock.time.start for qrblock in self),
max(qrblock.time.end for qrblock in self))
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
"""
s = {a if not a.startswith('_') else None for a in dir(self[0])}
for resp in self[1:]:
s = s.intersection({a if not a.startswith('_') else None for a in dir(resp)})
s.remove(None)
return s
def __repr__(self):
return repr(type(self)) + repr(self._build_table())
def __str__(self):
return str(self._build_table())
def _repr_html_(self):
return self._build_table()._repr_html_()
def _build_table(self):
columns = OrderedDict((('Start Time', []), ('End Time', []),
('Source', []), ('Instrument', []),
('Wavelength', [])))
for i, qrblock in enumerate(self):
columns['Start Time'].append(
(qrblock.time.start).strftime(TIME_FORMAT))
columns['End Time'].append(
(qrblock.time.end).strftime(TIME_FORMAT))
columns['Source'].append(qrblock.source)
columns['Instrument'].append(qrblock.instrument)
columns['Wavelength'].append(str(u.Quantity(qrblock.wave)))
return astropy.table.Table(columns)
class GenericClient(BaseClient):
"""
Base class for simple web clients for the data retriever module. This class
is mainly designed for downloading data from FTP and HTTP type data
sources, although should in theory be general enough to get data from any
web service.
This class has two user facing methods
`~sunpy.net.dataretriever.client.GenericClient.search` and
`~sunpy.net.dataretriever.client.GenericClient.fetch` the former generates a
set of results for files available through the service the client is
querying and the latter downloads that data.
The `~sunpy.net.dataretriever.client.GenericClient.search` method takes a
set of `sunpy.net.attrs` objects and then converts these into a call to
`~sunpy.net.dataretriever.client.GenericClient._get_url_for_timerange`. It
does this through the `map\\_` dictionary which represents the
`~sunpy.net.attrs` objects as a dictionary.
"""
def __init__(self):
self.map_ = {}
def _makeargs(self, *args):
"""
Construct the `map\\_` internal representation of the query.
This `map\\_` dictionary is passed through to the
`_get_url_for_timerange` method to get the URL results.
Parameters
----------
\\*args: `tuple`
The query attributes.
"""
for elem in args:
if isinstance(elem, Time):
self.map_['TimeRange'] = TimeRange(elem.start, elem.end)
self.map_['Time_start'] = elem.start
self.map_['Time_end'] = elem.end
elif isinstance(elem, _Range):
a_min = elem.min
a_max = elem.max
if a_min == a_max:
self.map_[elem.__class__.__name__.lower()] = a_min
else:
if isinstance(elem, Wavelength):
prefix = 'wave'
else:
prefix = ''
minmax = namedtuple("minmax", "{0}min {0}max".format(prefix))
self.map_[elem.__class__.__name__.lower()] = minmax(a_min, a_max)
else:
if hasattr(elem, 'value'):
self.map_[elem.__class__.__name__.lower()] = elem.value
else:
# This will only get hit if the attr is something like
# Extent, which is a unique subclass of Attr. Currently no
# unidown Clients support this, so we skip this line.
# Anything that hits this will require special code to
# convert it into the map_ dict.
raise ValueError(
"GenericClient can not add {} to the map_ dictionary to pass "
"to the Client.".format(elem.__class__.__name__)) # pragma: no cover
self._makeimap()
@classmethod
def _get_url_for_timerange(cls, timerange, **kwargs):
"""
Method which generates URL results from a timerange and the `map\\_`
dictionary.
Parameters
----------
timerange: `sunpy.time.TimeRange`
The timerange to extract the URLs for.
\\*\\*kwargs: `dict`
Any extra keywords to refine the search. Generated from the
attributes passed to
`~sunpy.net.dataretriever.client.GenericClient.search`.
"""
raise NotImplementedError
def _makeimap(self):
"""
Add client specific information to the _map dict.
Normally this is extra metadata which is not downloaded, but known
a priori.
"""
raise NotImplementedError
@classmethod
def _can_handle_query(cls, *query):
"""
Method the
`sunpy.net.fido_factory.UnifiedDownloaderFactory`
class uses to dispatch queries to this Client.
"""
raise NotImplementedError
def _get_full_filenames(self, qres, filenames, path):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
filenames : list
List of base filenames (ex - "xyz.txt")
path : str
Path to download files to
Returns
-------
List of full pathnames for each file (download_directory + filename)
"""
# Create function to compute the filepath to download to if not set
default_dir = sunpy.config.get("downloads", "download_dir")
paths = []
for i, filename in enumerate(filenames):
if path is None:
fname = os.path.join(default_dir, '{file}')
elif isinstance(path, str) and '{file}' not in path:
fname = os.path.join(path, '{file}')
temp_dict = qres[i]._map.copy()
temp_dict['file'] = filename
fname = fname.format(**temp_dict)
fname = os.path.expanduser(fname)
if os.path.exists(fname):
fname = replacement_filename(fname)
fname = partial(simple_path, fname)
paths.append(fname)
return paths
def _get_time_for_url(self, urls):
"""
This method allows clients to customise the timerange displayed for
each URL.
It should return a sunpy.time.TimeRange object per URL.
"""
return NotImplemented
def search(self, *args, **kwargs):
"""
Query this client for a list of results.
Parameters
----------
\\*args: `tuple`
`sunpy.net.attrs` objects representing the query.
"""
GenericClient._makeargs(self, *args, **kwargs)
kwergs = copy.copy(self.map_)
kwergs.update(kwargs)
urls = self._get_url_for_timerange(
self.map_.get('TimeRange'), **kwergs)
if urls:
times = self._get_time_for_url(urls)
if times and times is not NotImplemented:
return QueryResponse.create(self.map_, urls, times)
return QueryResponse.create(self.map_, urls)
def fetch(self, qres, path=None, error_callback=None, **kwargs):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
path : string or pathlib.Path
Path to the download directory
error_callback : Function
Callback function for error during downloads
Returns
-------
Results Object
"""
# Check for type of path
if path is not None:
if isinstance(path, pathlib.Path):
path = str(path.absolute())
elif not isinstance(path, str):
err = "path should be either 'pathlib.Path' or 'str'. "\
"Got '{}'.".format(type(path))
raise TypeError(err)
urls = [qrblock.url for qrblock in qres]
filenames = [url.split('/')[-1] for url in urls]
paths = self._get_full_filenames(qres, filenames, path)
res = Results(lambda x: None, 0, lambda map_: self._link(map_))
dobj = Downloader(max_conn=len(urls), max_total=len(urls))
# We cast to list here in list(zip... to force execution of
# res.require([x]) at the start of the loop.
for aurl, ncall, fname in list(zip(urls, map(lambda x: res.require([x]),
urls), paths)):
dobj.download(aurl, fname, ncall, error_callback)
return res
def _link(self, map_):
"""Helper Function"""
paths = []
for k, v in map_.items():
paths.append(map_[k]['path'])
return paths
| 32.913043
| 93
| 0.579568
|
985684c0b72bed6996bbe60c27150d194a82a223
| 4,321
|
py
|
Python
|
reinforcement/valueIterationAgents.py
|
yuhang-lin/Reinforcement-Learning-Pacman
|
c178ce5f06f4132992c254c75c1e43e97348da47
|
[
"MIT"
] | null | null | null |
reinforcement/valueIterationAgents.py
|
yuhang-lin/Reinforcement-Learning-Pacman
|
c178ce5f06f4132992c254c75c1e43e97348da47
|
[
"MIT"
] | null | null | null |
reinforcement/valueIterationAgents.py
|
yuhang-lin/Reinforcement-Learning-Pacman
|
c178ce5f06f4132992c254c75c1e43e97348da47
|
[
"MIT"
] | null | null | null |
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
for i in range(self.iterations):
new_values = util.Counter()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
continue
action_values = []
for action in self.mdp.getPossibleActions(state):
sum = 0
for next_state, prob in self.mdp.getTransitionStatesAndProbs(state, action):
sum += prob * (self.mdp.getReward(state, action, next_state) + self.discount * self.values[next_state])
action_values.append(sum)
new_values[state] = max(action_values)
self.values = new_values # Update values in batch
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
sum = 0
for next_state, prob in self.mdp.getTransitionStatesAndProbs(state, action):
sum += prob * (self.mdp.getReward(state, action, next_state) + self.discount * self.values[next_state])
return sum
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
actions = self.mdp.getPossibleActions(state)
best_action = None
action_values = util.Counter()
if len(actions) > 0:
for action in self.mdp.getPossibleActions(state):
action_values[action] = self.computeQValueFromValues(state, action)
best_action = action_values.argMax()
return best_action
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
| 39.642202
| 127
| 0.629715
|
472fb9cb14aac89eea64bcbddca688d1afe7ecca
| 1,803
|
py
|
Python
|
ipproxytool/spiders/proxy/usproxy.py
|
Victorchi/proxy
|
caabc2bfc6ee7fd2eaed76f5630028e4e3117de3
|
[
"MIT"
] | null | null | null |
ipproxytool/spiders/proxy/usproxy.py
|
Victorchi/proxy
|
caabc2bfc6ee7fd2eaed76f5630028e4e3117de3
|
[
"MIT"
] | 1
|
2022-03-02T14:53:50.000Z
|
2022-03-02T14:53:50.000Z
|
ipproxytool/spiders/proxy/usproxy.py
|
Victorchi/proxy
|
caabc2bfc6ee7fd2eaed76f5630028e4e3117de3
|
[
"MIT"
] | 1
|
2017-12-05T06:00:09.000Z
|
2017-12-05T06:00:09.000Z
|
# coding=utf-8
import re
from proxy import Proxy
from .basespider import BaseSpider
class UsProxySpider(BaseSpider):
name = 'usproxy'
def __init__(self, *a, **kwargs):
super(UsProxySpider, self).__init__(*a, **kwargs)
self.urls = [
'http://www.us-proxy.org/',
'http://free-proxy-list.net/uk-proxy.html',
'http://www.socks-proxy.net/',
]
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'www.us-proxy.org',
'If-Modified-Since': 'Tue, 24 Jan 2017 03:32:01 GMT',
'Referer': 'http://www.sslproxies.org/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:50.0) Gecko/20100101 Firefox/50.0',
}
self.init()
def parse_page(self, response):
datas = response.xpath('//tbody/tr')
for data in datas:
ip = data.xpath('td[1]/text()').extract_first()
port = data.xpath('td[2]/text()').extract_first()
country = data.xpath('td[3]/text()').extract_first()
anonymity = data.xpath('td[5]/text()').extract_first()
https = data.xpath('td[7]/text()').extract_first()
proxy = Proxy()
proxy.set_value(
ip = ip,
port = port,
country = country,
anonymity = anonymity,
https = https,
source = self.name,
)
self.add_proxy(proxy)
| 33.388889
| 111
| 0.510261
|
6a34afd951e5ffae5cab2768911cbe3fe66af99d
| 28,995
|
py
|
Python
|
flexx/app/_component2.py
|
tbnorth/flexx
|
458823ccd554e57b1db709e97dce287db6b06c80
|
[
"BSD-2-Clause"
] | null | null | null |
flexx/app/_component2.py
|
tbnorth/flexx
|
458823ccd554e57b1db709e97dce287db6b06c80
|
[
"BSD-2-Clause"
] | null | null | null |
flexx/app/_component2.py
|
tbnorth/flexx
|
458823ccd554e57b1db709e97dce287db6b06c80
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Implementation of the app Component classes (LocalComponent,
ProxyComponent, StubComponent), which form the basis for the
PyComponent and JsComponent classes (and their proxies).
"""
import sys
from pscript import window, JSString, this_is_js
from .. import event
from ..event import Component, loop, Dict
from ..event._component import (with_metaclass, ComponentMeta)
from ..event._property import Property
from ..event._emitter import EmitterDescriptor
from ..event._action import ActionDescriptor
from ..event._js import create_js_component_class
from ._asset import get_mod_name
from . import logger
# The clientcore module is a PScript module that forms the core of the
# client-side of Flexx. We import the serializer instance, and can use
# that name in both Python and JS. Of course, in JS it's just the
# corresponding instance from the module that's being used.
# By using something from clientcore in JS here, we make clientcore a
# dependency of the the current module.
from ._clientcore import serializer, bsdf
manager = None # Set by __init__ to prevent circular dependencies
def make_proxy_action(action):
# Note: the flx_prefixes are picked up by the code in flexx.event that
# compiles component classes, so it can fix /insert the name for JS.
flx_name = action._name
def flx_proxy_action(self, *args):
self._proxy_action(flx_name, *args)
return self
flx_proxy_action.__doc__ = action.__doc__
flx_proxy_action.__qualname__ = 'flx_proxy_action'
return flx_proxy_action # ActionDescriptor(flx_proxy_action, flx_name, '')
def make_proxy_emitter(emitter):
# Note: the flx_prefixes are picked up by the code in flexx.event that
# compiles component classes, so it can fix /insert the name for JS.
flx_name = emitter._name
def flx_proxy_emitter(self, *args):
self._proxy_emitter(flx_name, *args)
flx_proxy_emitter.__doc__ = emitter.__doc__
flx_proxy_emitter.__qualname__ = 'flx_proxy_emitter'
return flx_proxy_emitter # EmitterDescriptor(flx_proxy_emitter, flx_name, '')
def get_component_classes():
""" Get a list of all known PyComponent and JsComponent subclasses.
"""
return [c for c in AppComponentMeta.CLASSES]
def meta_repr(cls):
""" A repr function to provide some context on the purpose of a class.
"""
if issubclass(cls, PyComponent):
prefix = 'PyComponent class'
elif issubclass(cls, PyComponent.JS):
prefix = 'proxy PyComponent class for JS '
elif issubclass(cls, JsComponent):
prefix = 'proxy JsComponent class'
elif issubclass(cls, JsComponent.JS):
prefix = 'JsComponent class for JS'
else:
prefix = 'class'
return "<%s '%s.%s'>" % (prefix, cls.__module__, cls.__name__)
class LocalProperty(Property):
""" A generic property that is only present at the local side of
the component, i.e. not at the proxy. Intended for properties that
the other side should not care about, and/or for wich syncing would be
problematic, e.g. for performance or because it contains components
that we want to keep local.
"""
class ComponentMetaJS(ComponentMeta):
""" Meta class for autogenerated classes intended for JavaScript:
Proxy PyComponent and local JsComponents.
"""
__repr__ = meta_repr
def __init__(cls, name, *args):
name = name.encode() if sys.version_info[0] == 2 else name
return super().__init__(name, *args)
class AppComponentMeta(ComponentMeta):
""" Meta class for PyComponent and JsComponent
that generate a matching class for JS.
"""
# Keep track of all subclasses
CLASSES = []
__repr__ = meta_repr
def _init_hook1(cls, cls_name, bases, dct):
# cls is the class to be
# cls.__dict__ is its current dict, which may contain inherited items
# dct is the dict represented by exactly this class (no inheritance)
# Get CSS from the class now
CSS = dct.get('CSS', '')
# Create corresponding class for JS
if issubclass(cls, LocalComponent):
cls._make_js_proxy_class(cls_name, bases, dct)
elif issubclass(cls, ProxyComponent):
cls._make_js_local_class(cls_name, bases, dct)
else: # pragma: no cover
raise TypeError('Expected class to inherit from '
'LocalComponent or ProxyComponent.')
# Write __jsmodule__; an optimization for our module/asset system
cls.__jsmodule__ = get_mod_name(sys.modules[cls.__module__])
cls.JS.__jsmodule__ = cls.__jsmodule__ # need it in JS too
cls.JS.__module__ = cls.__module__
# Set CSS
cls.CSS = CSS
try:
delattr(cls.JS, 'CSS')
except AttributeError:
pass
def _init_hook2(cls, cls_name, bases, dct):
# Set __proxy_properties__ and __emitters__
if issubclass(cls, LocalComponent):
cls.__proxy_properties__ = cls.JS.__properties__
cls.JS.__emitters__ = cls.__emitters__
else:
cls.JS.__proxy_properties__ = cls.__properties__
cls.__emitters__ = cls.JS.__emitters__
# Set JS on the JS class
cls.JS.CODE = cls._get_js()
# Register this class. The classes in this list will be automatically
# "pushed to JS" in a JIT fashion. We have to make sure that we include
# the code for base classes not in this list, which we do in _get_js().
AppComponentMeta.CLASSES.append(cls)
def _make_js_proxy_class(cls, cls_name, bases, dct):
for c in bases:
assert not issubclass(cls, ProxyComponent)
# Fix inheritance for JS variant
jsbases = [getattr(b, 'JS') for b in cls.__bases__ if hasattr(b, 'JS')]
if not jsbases:
jsbases.append(ProxyComponent)
jsdict = {}
# Copy properties from this class to the JS proxy class.
# in Python 3.6 we iterate in the order in which the items are defined,
for name, val in dct.items():
if name.startswith('__') and name.endswith('__'):
continue
elif isinstance(val, LocalProperty):
pass # do not copy over
elif isinstance(val, Property):
jsdict[name] = val # properties are the same
elif isinstance(val, EmitterDescriptor):
jsdict[name] = make_proxy_emitter(val) # proxy emitter
elif isinstance(val, ActionDescriptor):
jsdict[name] = make_proxy_action(val) # proxy actions
else:
pass # no reactions/functions/class attributes on the proxy side
# Create JS class
cls.JS = ComponentMetaJS(cls_name, tuple(jsbases), jsdict)
def _make_js_local_class(cls, cls_name, bases, dct):
for c in bases:
assert not issubclass(cls, LocalComponent)
# Fix inheritance for JS variant
jsbases = [getattr(b, 'JS') for b in cls.__bases__ if hasattr(b, 'JS')]
if not jsbases:
jsbases.append(LocalComponent)
jsdict = {}
# Names that should stay in Python in addition to magic methods
py_only = ['_repr_html_']
# Copy properties from this class to the JS proxy class.
# in Python 3.6 we iterate in the order in which the items are defined,
for name, val in list(dct.items()):
# Skip?
if isinstance(val, classmethod):
continue
elif name in py_only or name.startswith('__') and name.endswith('__'):
if name not in ('__init__', '__linenr__'):
continue
# Move over to JS
if (isinstance(val, Property) or (callable(val) and
name.endswith('_validate'))):
jsdict[name] = val # properties are the same
if isinstance(val, LocalProperty):
delattr(cls, name)
dct.pop(name, None)
elif isinstance(val, EmitterDescriptor):
# JS part gets the proper emitter, Py side gets a proxy
jsdict[name] = val
setattr(cls, name, make_proxy_emitter(val))
elif isinstance(val, ActionDescriptor):
# JS part gets the proper action, Py side gets a proxy
jsdict[name] = val
setattr(cls, name, make_proxy_action(val))
else:
# Move attribute from the Py class to the JS class
jsdict[name] = val
delattr(cls, name)
dct.pop(name, None) # is this necessary?
# Create JS class
cls.JS = ComponentMetaJS(cls_name, tuple(jsbases), jsdict)
def _get_js(cls):
""" Get source code for this class plus the meta info about the code.
"""
# Since classes are defined in a module, we can safely name the classes
# by their plain name.
cls_name = cls.__name__
base_class = cls.JS.mro()[1]
base_class_name = '%s.prototype' % base_class.__name__
code = []
# Add this class
c = create_js_component_class(cls.JS, cls_name, base_class_name)
meta = c.meta
code.append(c)
# code.append(c.replace('var %s =' % cls_name,
# 'var %s = flexx.classes.%s =' % (cls_name, cls_name), 1))
# Add JS version of the base classes - but only once
if cls.__name__ == 'JsComponent':
c = cls._get_js_of_base_classes()
for k in ['vars_unknown', 'vars_global', 'std_functions', 'std_methods']:
meta[k].update(c.meta[k])
code.insert(0, c)
# Return with meta info
js = JSString('\n'.join(code))
js.meta = meta
return js
def _get_js_of_base_classes(cls):
""" Get JS for BaseAppComponent, LocalComponent, and ProxyComponent.
"""
c1 = create_js_component_class(BaseAppComponent, 'BaseAppComponent',
'Component.prototype')
c2 = create_js_component_class(LocalComponent, 'LocalComponent',
'BaseAppComponent.prototype')
c3 = create_js_component_class(ProxyComponent, 'ProxyComponent',
'BaseAppComponent.prototype')
c4 = create_js_component_class(StubComponent, 'StubComponent',
'BaseAppComponent.prototype')
meta = c1.meta
for k in ['vars_unknown', 'vars_global', 'std_functions', 'std_methods']:
for c in (c2, c3, c4):
meta[k].update(c.meta[k])
js = JSString('\n'.join([c1, c2, c3, c4]))
js.meta = meta
return js
class BaseAppComponent(Component):
""" Abstract class for Component classes that can be "shared" between
Python and JavaScript. The concrete implementations are:
* The ``PyComponent`` class, which operates in Python, but has a proxy
object in JavaSript to which properties are synced and from which actions
can be invoked.
* The ``JsComponent`` class, which operates in JavaScript, but can have a proxy
object in Python to which properties are synced and from which actions
can be invoked.
* The ``StubComponent`` class, which represents a component class that is
somewhere else, perhaps in another session. It does not have any
properties, nor actions. But it can be "moved around".
"""
session = event.Attribute(doc="""
The session to which this component belongs. The component id
is unique within its session.
""")
root = event.Attribute(doc="""
The component that represents the root of the application. Alias for
session.app.
""")
uid = event.Attribute(doc="""
A unique identifier for this component; a combination of the
session and component id's.
""")
def _comp_init_app_component(self, property_values):
# Pop special attribute
property_values.pop('flx_is_app', None)
# Pop and apply id if given
custom_id = property_values.pop('flx_id', None)
# Pop session or derive from active component
self._session = None
session = property_values.pop('flx_session', None)
if session is not None:
self._session = session
else:
active = loop.get_active_components() # Note that self is active too
active = active[-2] if len(active) > 1 else None
if active is not None:
self._session = active._session
else:
if not this_is_js():
self._session = manager.get_default_session()
# Register this component with the session (sets _id and _uid)
if self._session is None:
raise RuntimeError('%s needs a session!' % (custom_id or self._id))
self._session._register_component(self, custom_id)
self._root = self._session.app
# Return whether this instance was instantiated locally
return custom_id is None
class LocalComponent(BaseAppComponent):
"""
Base class for PyComponent in Python and JsComponent in JavaScript.
"""
def _comp_init_property_values(self, property_values):
# This is a good time to register with the session, and
# instantiate the proxy class. Property values have been set at this
# point, but init() has not yet been called.
# Keep track of what events are registered at the proxy
self.__event_types_at_proxy = []
# Init more
self._comp_init_app_component(property_values) # pops items
# Pop whether this local instance has a proxy at the other side
self._has_proxy = property_values.pop('flx_has_proxy', False)
# Call original method
super()._comp_init_property_values(property_values)
if this_is_js():
# This is a local JsComponent in JavaScript
self._event_listeners = []
else:
# This is a local PyComponent in Python
# A PyComponent always has a corresponding proxy in JS
self._ensure_proxy_instance(False)
def _ensure_proxy_instance(self, include_props=True):
""" Make the other end instantiate a proxy if necessary. This is e.g.
called by the BSDF serializer when a LocalComponent gets serialized.
A PyComponent always has a Proxy component, and we should not
dispose or delete it until the local component is disposed.
A JsComponent may be instantiated (as its proxy) from Python, in which
case we receive the flx_has_proxy kwarg. Still, Python can "loose" the
proxy class. To ensure that it exists in Python when needed, the BSDF
serializer will ensure it (by calling this method) when it gets
serialized.
In certain cases, it might be that the other end *does* have a proxy
while this end's _has_proxy is False. In that case the INSTANTIATE
command is send, but when handled, will be a no-op.
In certain cases, it might be that the other end just lost its
reference; this end's _has_proxy is True, and a new reference to this
component will fail to resolve. This is countered by keeping hold
of JsComponent proxy classes for at least one roundtrip (upon
initialization as well as disposal).
"""
if self._has_proxy is False and self._disposed is False:
if self._session.status > 0:
props = {}
if include_props:
for name in self.__proxy_properties__:
props[name] = getattr(self, name)
self._session.send_command('INSTANTIATE', self.__jsmodule__,
self.__class__.__name__,
self._id, [], props)
self._has_proxy = True
def emit(self, type, info=None):
# Overload emit() to send events to the proxy object at the other end
ev = super().emit(type, info)
isprop = type in self.__proxy_properties__
if self._has_proxy is True and self._session.status > 0:
# implicit: and self._disposed is False:
if isprop or type in self.__event_types_at_proxy:
self._session.send_command('INVOKE', self._id,
'_emit_at_proxy', [ev])
def _dispose(self):
# Let proxy side know that we no longer exist, and that it should
# dispose too. Send regardless of whether we have a proxy!
was_disposed = self._disposed
super()._dispose()
self._has_proxy = False # because we will tell it to dispose
if was_disposed is False and self._session is not None:
self._session._unregister_component(self)
if self._session.status > 0:
self._session.send_command('DISPOSE', self._id)
def _flx_set_has_proxy(self, has_proxy):
self._has_proxy = has_proxy
def _flx_set_event_types_at_proxy(self, event_types):
self.__event_types_at_proxy = event_types
class ProxyComponent(BaseAppComponent):
"""
Base class for JSComponent in Python and PyComponent in JavaScript.
"""
def __init__(self, *init_args, **kwargs):
# Need to overload this to handle init_args
if this_is_js():
# This is a proxy PyComponent in JavaScript.
# Always instantiated via an INSTANTIATE command from Python.
assert len(init_args) == 0
if 'flx_id' not in kwargs:
raise RuntimeError('Cannot instantiate a PyComponent from JS.')
super().__init__(**kwargs)
else:
# This is a proxy JsComponent in Python.
# Can be instantiated in Python,
self._flx_init_args = init_args
super().__init__(**kwargs)
def _comp_init_property_values(self, property_values):
# Init more
local_inst = self._comp_init_app_component(property_values) # pops items
# Call original method, only set props if this is instantiated "by the local"
props2set = {} if local_inst else property_values
super()._comp_init_property_values(props2set)
if this_is_js():
# This is a proxy PyComponent in JavaScript
assert len(property_values.keys()) == 0
else:
# This is a proxy JsComponent in Python
# Instantiate JavaScript version of this class
if local_inst is True: # i.e. only if Python "instantiated" it
property_values['flx_has_proxy'] = True
active_components = [c for c in loop.get_active_components()[:-1]
if isinstance(c, (PyComponent, JsComponent))]
self._session.send_command('INSTANTIATE', self.__jsmodule__,
self.__class__.__name__, self._id,
self._flx_init_args, property_values,
active_components)
del self._flx_init_args
def _comp_apply_property_values(self, values):
# Apply props in silence
for name, value in values:
setattr(self, '_' + name + '_value', value)
def _proxy_action(self, name, *args, **kwargs):
""" To invoke actions on the real object.
"""
assert not kwargs
# if self._session.status > 0, mmm, or rather error?
self._session.send_command('INVOKE', self._id, name, args)
def _proxy_emitter(self, name, *args, **kwargs):
""" To handle use of placeholder emitters.
"""
# todo: I am not sure yet whether to allow or disallow it. We disallow now;
# we can always INVOKE the emitter at the other side if that proves needed
if this_is_js():
logger.error('Cannot use emitters of a PyComponent in JS.')
else:
logger.error('Cannot use emitters of a JsComponent in Py.')
def _mutate(self, *args, **kwargs): # pragma: no cover
""" Disable mutations on the proxy class.
"""
raise RuntimeError('Cannot mutate properties from a proxy class.')
# Reference objects to get them collected into the JS variant of this
# module. Do it here, in a place where it wont hurt.
serializer # to bring in _clientcore as a way of bootstrapping
BsdfComponentExtension
def _registered_reactions_hook(self):
""" Keep the local component informed about what event types this proxy
is interested in. This way, the trafic can be minimized, e.g. not send
mouse move events if they're not used anyway.
"""
event_types = super()._registered_reactions_hook()
try:
if self._disposed is False and self._session.status > 0:
self._session.send_command('INVOKE', self._id,
'_flx_set_event_types_at_proxy',
[event_types])
finally:
return event_types
@event.action
def _emit_at_proxy(self, ev):
""" Action used by the local component to push an event to the proxy
component. If the event represents a property-update, the mutation
is applied, otherwise the event is emitted here.
"""
if not this_is_js():
ev = Dict(ev)
if ev.type in self.__properties__ and hasattr(ev, 'mutation'):
# Mutate the property - this will cause an emit
if ev.mutation == 'set':
super()._mutate(ev.type, ev.new_value)
else:
super()._mutate(ev.type, ev.objects, ev.mutation, ev.index)
else:
self.emit(ev.type, ev)
def dispose(self):
if this_is_js():
# The server is leading ...
raise RuntimeError('Cannot dispose a PyComponent from JS.')
else:
# Disposing a JsComponent from JS is like invoking an action;
# we don't actually dispose ourselves just yet.
if self._session.status > 0:
self._session.send_command('INVOKE', self._id, 'dispose', [])
else:
super().dispose()
def _dispose(self):
# This gets called by the session upon a DISPOSE command,
# or on Python from __delete__ (via call_soon).
was_disposed = self._disposed
super()._dispose()
if was_disposed is False and self._session is not None:
self._session._unregister_component(self)
if self._session.status > 0:
# Let other side know that we no longer exist.
self._session.send_command('INVOKE', self._id,
'_flx_set_has_proxy', [False])
class StubComponent(BaseAppComponent):
"""
Class to represent stub proxy components to take the place of components
that do not belong to the current session, or that do not exist
for whatever reason. These objects cannot really be used, but they can
be moved around.
"""
def __init__(self, session, id):
super().__init__()
self._session = session
self._id = id
self._uid = session.id + '_' + id
def __repr__(self):
return ("<StubComponent for '%s' in session '%s' at 0x%x>" %
(self._id, self._session.id, id(self)))
# LocalComponent and ProxyComponent need __jsmodule__, but they do not
# participate in the AppComponentMeta class, so we add it here.
LocalComponent.__jsmodule__ = __name__
ProxyComponent.__jsmodule__ = __name__
StubComponent.__jsmodule__ = __name__
class JsComponent(with_metaclass(AppComponentMeta, ProxyComponent)):
""" Base component class that operates in JavaScript, but is accessible
in Python, where its properties and events can be observed,
and actions can be invoked.
JsComponents can be instantiated from both JavaScript and Python. A
corresponding proxy component is not necessarily present in Python. It
is created automatically when needed (e.g. when referenced by a property).
A JsComponent can be explicitly disposed from both Python and JavaScript.
When the Python garbage collector collects a JsComponent (or really, the
proxy thereof), only the Python side proxy is disposed; the JsComponent
in JS itself will be unaffected. Make sure to call ``dispose()`` when
needed!
"""
# The meta class will generate a JsComponent local class for JS
# and move all props, actions, etc. to it.
def __repr__(self):
d = ' (disposed)' if self._disposed else ''
return "<JsComponent '%s'%s at 0x%x>" % (self._id, d, id(self))
def _addEventListener(self, node, type, callback, capture=False):
""" Register events with DOM nodes, to be automatically cleaned up
when this object is disposed.
"""
node.addEventListener(type, callback, capture)
self._event_listeners.append((node, type, callback, capture))
def _dispose(self):
super()._dispose()
while len(self._event_listeners) > 0:
try:
node, type, callback, capture = self._event_listeners.pop()
node.removeEventListener(type, callback, capture)
except Exception as err:
print(err)
# Note: positioned below JSComponent, because linenr is used to sort JS defs,
# and the JS for the base component classes is attached to JSComponent.
class PyComponent(with_metaclass(AppComponentMeta, LocalComponent)):
""" Base component class that operates in Python, but is accessible
in JavaScript, where its properties and events can be observed,
and actions can be invoked.
PyComponents can only be instantiated in Python, and always have
a corresponding proxy object in JS. PyComponents can be disposed only
from Python. Disposal also happens if the Python garbage collector
collects a PyComponent.
"""
# The meta class generates a PyComponent proxy class for JS.
def __repr__(self):
d = ' (disposed)' if self._disposed else ''
return "<PyComponent '%s'%s at 0x%x>" % (self._id, d, id(self))
class BsdfComponentExtension(bsdf.Extension):
""" A BSDF extension to encode flexx.app Component objects based on their
session id and component id.
"""
name = 'flexx.app.component'
cls = BaseAppComponent # PyComponent, JsComponent, StubComponent
def match(self, s, c):
# This is actually the default behavior, but added for completenes
return isinstance(c, self.cls)
def encode(self, s, c):
if isinstance(c, PyComponent): # i.e. LocalComponent in Python
c._ensure_proxy_instance()
return dict(session_id=c._session.id, id=c._id)
def decode(self, s, d):
c = None
session = manager.get_session_by_id(d['session_id'])
if session is None:
# object from other session
session = object()
session.id = d['session_id']
c = StubComponent(session, d['id'])
else:
c = session.get_component_instance(d['id'])
if c is None: # This should probably not happen
logger.warn('Using stub component for %s.' % d['id'])
c = StubComponent(session, d['id'])
else:
# Keep it alive for a bit
session.keep_alive(c)
return c
# The name and below methods get collected to produce a JS BSDF extension
def match_js(self, s, c): # pragma: no cover
return isinstance(c, BaseAppComponent)
def encode_js(self, s, c): # pragma: no cover
if isinstance(c, JsComponent): # i.e. LocalComponent in JS
c._ensure_proxy_instance()
return dict(session_id=c._session.id, id=c._id)
def decode_js(self, s, d): # pragma: no cover
c = None
session = window.flexx.sessions.get(d['session_id'], None)
if session is None:
session = dict(id=d['session_id'])
c = StubComponent(session, d['id'])
else:
c = session.get_component_instance(d['id'])
if c is None:
logger.warn('Using stub component for %s.' % d['id'])
c = StubComponent(session, d['id'])
return c
# todo: can the mechanism for defining BSDF extensions be simplified? (issue #429)
# Add BSDF extension for serializing components. The JS variant of the
# serializer is added by referencing the extension is JS code.
serializer.add_extension(BsdfComponentExtension)
| 40.214979
| 85
| 0.62866
|
abf27d180991fc068917f476f9f25200f7461429
| 8,086
|
py
|
Python
|
6-VGG16-CIFAR10-xavier_init.py
|
SunnyHaze/CIFAR10-VGG-Pytorch
|
c2794f7c709b85059ee06779ac2b8e1cc672e2c5
|
[
"Apache-2.0"
] | null | null | null |
6-VGG16-CIFAR10-xavier_init.py
|
SunnyHaze/CIFAR10-VGG-Pytorch
|
c2794f7c709b85059ee06779ac2b8e1cc672e2c5
|
[
"Apache-2.0"
] | null | null | null |
6-VGG16-CIFAR10-xavier_init.py
|
SunnyHaze/CIFAR10-VGG-Pytorch
|
c2794f7c709b85059ee06779ac2b8e1cc672e2c5
|
[
"Apache-2.0"
] | null | null | null |
import os
from matplotlib.pyplot import imshow
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch import conv2d, dropout, nn, sigmoid, tensor
import numpy as np
from imports.ParametersManager import *
from imports.utlis import *
from matplotlib import pyplot as plt
import torchvision.transforms as transforms
# 超参数
MODELNAME='VGG16-xavier_init'
MODELFILEDIR = 'PretrainedModels' # 模型参数存储路径
BatchSize = 128
LEARNINGRATE = 0.0005
epochNums = 0
SaveModelEveryNEpoch = 2 # 每执行多少次保存一个模型
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 构建模型参数文件存取路径
if not os.path.exists(MODELFILEDIR):
os.mkdir(MODELFILEDIR)
MODELFILEPATH = os.path.join(MODELFILEDIR, MODELNAME+'_model.pt')
# 可以将数据线包装为Dataset,然后传入DataLoader中取样
class MyDataset(Dataset):
def __init__(self,SetType) -> None:
with open(SetType + 'Images.npy','rb') as f:
self.images =torch.tensor(np.load(f), dtype=torch.float32)
self.images = (self.images - 0.5) / 0.5
with open(SetType + 'Labels.npy','rb') as f:
tmp = np.load(f)
print(tmp)
self.labels=[]
for num in tmp:
self.labels.append([1 if x == num else 0 for x in range(10)])
self.labels = torch.tensor(self.labels, dtype=torch.float32)
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return len(self.labels)
def blockVGG(covLayerNum,inputChannel, outputChannel, kernelSize, withFinalCov1:bool):
layer = nn.Sequential()
layer.add_module('conv2D-1',nn.Conv2d(inputChannel, outputChannel,kernelSize,padding=1))
layer.add_module('relu-1',nn.ReLU())
for i in range(covLayerNum - 1):
layer.add_module('conv2D{}'.format(i),nn.Conv2d(outputChannel, outputChannel,kernelSize,padding=1))
layer.add_module('relu{}'.format(i),nn.ReLU())
if withFinalCov1:
layer.add_module('Conv2dOne',nn.Conv2d(outputChannel,outputChannel, 1))
layer.add_module('FinalRelu',nn.ReLU())
layer.add_module('max-pool',nn.MaxPool2d(2,2))
return layer
# 定义网络结构
class VGG11(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = blockVGG(2,3,64,3,False)
self.layer2 = blockVGG(2,64,128,3,False)
self.layer3 = blockVGG(2,128,256,3,True)
self.layer4 = blockVGG(2,256,512,3,True)
self.layer5 = blockVGG(2,512,512,3,True)
self.layer6 = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(512,100),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(100,10),
# nn.ReLU(),
# nn.Softmax(1)
)
for m in self.modules():
if isinstance(m, nn.Linear):
pass
# 也可以判断是否为conv2d,使用相应的初始化方式
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
print('init_xavier')
def forward(self,x:torch.Tensor):
x = self.layer1(x) # 执行卷积神经网络部分
x = self.layer2(x) # 执行全连接部分
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = x.view(x.shape[0], -1)
x = self.layer6(x)
return x
# 定义准确率函数
def accuracy(output , label):
rightNum = torch.sum(torch.max(label,1)[1].eq(torch.max(output,1)[1]))
return rightNum / len(label)
if __name__ == "__main__":
# 模型实例化
model = VGG11()
print(model)
# # 如果有“半成品”则导入参数
parManager = ParametersManager(device)
if os.path.exists(MODELFILEPATH):
parManager.loadFromFile(MODELFILEPATH)
parManager.setModelParameters(model)
else:
print('===No pre-trained model found!===')
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNINGRATE)
dirOfDataset = 'Cifar-10_Unpacked/'
# 构建训练集
TrainDataset = MyDataset(dirOfDataset + 'Train')
# 构建测试集
TestDataset = MyDataset(dirOfDataset + 'Test')
# 构建训练集读取器
TrainLoader = DataLoader(TrainDataset,num_workers=8, pin_memory=True, batch_size=BatchSize, sampler= torch.utils.data.sampler.SubsetRandomSampler(range(len(TrainDataset))))
# 构建测试集读取器:
TestLoader = DataLoader(TestDataset,num_workers=8, pin_memory=True, batch_size=BatchSize, sampler= torch.utils.data.sampler.SubsetRandomSampler(range(len(TestDataset))))
#
print('len(TrainLoader):{}'.format(len(TrainLoader)))
# 检查分割是否正确的函数,分为两行,以行为顺序排列和输出结果一一对应
def testLoader():
inputs, classes = next(iter(TrainLoader))
inputs = inputs[:10]
classes = classes[:10]
print(inputs.shape)
print(classes.shape)
print(classes) # 查看标签
for i in range(len(inputs)):
plt.subplot(2,5,i+1)
img = torch.transpose(inputs[i], 0,2)
img = torch.transpose(img,1,0)
plt.imshow(img)
plt.title(label2name(oneHot2label(classes[i])))
plt.show()
# testLoader()
TrainACC = []
TestACC = []
GlobalLoss = []
for epoch in range(epochNums):
print("===开始本轮的Epoch {} == 总计是Epoch {}===".format(epoch, parManager.EpochDone))
# 收集训练参数
epochAccuracy = []
epochLoss = []
model.train()
#=============实际训练流程=================
for batch_id, (inputs,label) in enumerate(TrainLoader):
# torch.train()
# 先初始化梯度0
optimizer.zero_grad()
output = model(inputs.cuda())
loss = criterion(output,label.cuda())
loss.backward()
optimizer.step()
epochAccuracy.append(accuracy(output,label.cuda()).cpu())
epochLoss.append(loss.item()) # 需要获取数值来转换
if batch_id % (int(len(TrainLoader) / 20)) == 0:
print(" 当前运行到[{}/{}], 目前Epoch准确率为:{:.2f}%,Loss:{:.6f}".format(batch_id,len(TrainLoader), np.mean(epochAccuracy) * 100, loss))
#==============本轮训练结束==============
# 收集训练集准确率
TrainACC.append(np.mean(epochAccuracy))
GlobalLoss.append(np.mean(epochLoss))
# ==========进行一次验证集测试============
localTestACC = []
model.eval() # 进入评估模式,节约开销
for inputs, label in TestLoader:
torch.no_grad() # 上下文管理器,此部分内不会追踪梯度/
output = model(inputs.cuda())
localTestACC.append(accuracy(output,label.cuda()).cpu())
# ==========验证集测试结束================
# 收集验证集准确率
TestACC.append(np.mean(localTestACC))
print("当前Epoch结束,训练集准确率为:{:3f}%,测试集准确率为:{:3f}%".format(TrainACC[-1] * 100, TestACC[-1] * 100))
# 暂存结果到参数管理器
parManager.oneEpochDone(LEARNINGRATE,TrainACC[-1],TestACC[-1],GlobalLoss[-1])
# 周期性保存结果到文件
if epoch == epochNums - 1 or epoch % SaveModelEveryNEpoch == 0:
parManager.loadModelParameters(model)
parManager.saveToFile(MODELFILEPATH)
# 查看此次训练之后结果
parManager.show()
# 绘图
plt.figure(figsize=(10,7))
plt.subplots_adjust(left=0.1,bottom=0.1,top=0.9,right=0.9,wspace=0.1,hspace=0.3)
plt.subplot(2,1,1)
plt.plot(range(parManager.EpochDone),parManager.TrainACC,marker='*' ,color='r',label='Train')
plt.plot(range(parManager.EpochDone),parManager.TestACC,marker='*' ,color='b',label='Test')
plt.xlabel('Epochs')
plt.ylabel('ACC')
plt.legend()
plt.title("{} on Cifar-10".format(MODELNAME))
plt.text(int(parManager.EpochDone *0.8),0.5,'Train ACC: {:.6f}\nTest ACC: {:.6f}\nEpoch:{}'.format(parManager.TrainACC[-1],parManager.TestACC[-1], parManager.EpochDone))
plt.subplot(2,1,2)
plt.title('Learning Rates')
plt.xlabel('Epoch')
plt.ylabel('$log_{10}$(Learning Rates)')
plt.ylim(0,-5)
plt.plot([x for x in range(parManager.EpochDone)], np.log(parManager.LearningRate) / np.log(10))
plt.savefig('Train-{}-{}Epoch.jpg'.format(MODELNAME,parManager.EpochDone))
plt.show()
| 37.262673
| 176
| 0.607593
|
5751013ff187b2d2d07594aff3ecb037faa7e656
| 81,210
|
py
|
Python
|
elf/src/python/elf.py
|
kaitai-io/formats-kaitai-io.github.io
|
2700514a2a8f67c5351fe93962c70abea02fd3d3
|
[
"0BSD"
] | 4
|
2018-12-10T09:21:19.000Z
|
2021-11-03T16:43:22.000Z
|
elf/src/python/elf.py
|
kaitai-io/formats-kaitai-io.github.io
|
2700514a2a8f67c5351fe93962c70abea02fd3d3
|
[
"0BSD"
] | null | null | null |
elf/src/python/elf.py
|
kaitai-io/formats-kaitai-io.github.io
|
2700514a2a8f67c5351fe93962c70abea02fd3d3
|
[
"0BSD"
] | 3
|
2019-04-08T08:22:22.000Z
|
2021-10-10T19:11:51.000Z
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Elf(KaitaiStruct):
"""
.. seealso::
Source - https://sourceware.org/git/?p=glibc.git;a=blob;f=elf/elf.h;hb=0f62fe0532
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/contents.html
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/glcfv.html
"""
class SymbolVisibility(Enum):
default = 0
internal = 1
hidden = 2
protected = 3
exported = 4
singleton = 5
eliminate = 6
class SymbolBinding(Enum):
local = 0
global_symbol = 1
weak = 2
os10 = 10
os11 = 11
os12 = 12
proc13 = 13
proc14 = 14
proc15 = 15
class Endian(Enum):
le = 1
be = 2
class ShType(Enum):
null_type = 0
progbits = 1
symtab = 2
strtab = 3
rela = 4
hash = 5
dynamic = 6
note = 7
nobits = 8
rel = 9
shlib = 10
dynsym = 11
init_array = 14
fini_array = 15
preinit_array = 16
group = 17
symtab_shndx = 18
sunw_symnsort = 1879048172
sunw_phname = 1879048173
sunw_ancillary = 1879048174
sunw_capchain = 1879048175
sunw_capinfo = 1879048176
sunw_symsort = 1879048177
sunw_tlssort = 1879048178
sunw_ldynsym = 1879048179
sunw_dof = 1879048180
sunw_cap = 1879048181
sunw_signature = 1879048182
sunw_annotate = 1879048183
sunw_debugstr = 1879048184
sunw_debug = 1879048185
sunw_move = 1879048186
sunw_comdat = 1879048187
sunw_syminfo = 1879048188
sunw_verdef = 1879048189
sunw_verneed = 1879048190
sunw_versym = 1879048191
sparc_gotdata = 1879048192
amd64_unwind = 1879048193
arm_preemptmap = 1879048194
arm_attributes = 1879048195
arm_debugoverlay = 1879048196
arm_overlaysection = 1879048197
class OsAbi(Enum):
system_v = 0
hp_ux = 1
netbsd = 2
gnu = 3
solaris = 6
aix = 7
irix = 8
freebsd = 9
tru64 = 10
modesto = 11
openbsd = 12
openvms = 13
nsk = 14
aros = 15
fenixos = 16
cloudabi = 17
openvos = 18
class Machine(Enum):
no_machine = 0
m32 = 1
sparc = 2
x86 = 3
m68k = 4
m88k = 5
iamcu = 6
i860 = 7
mips = 8
s370 = 9
mips_rs3_le = 10
parisc = 15
vpp500 = 17
sparc32plus = 18
i960 = 19
powerpc = 20
powerpc64 = 21
s390 = 22
spu = 23
v800 = 36
fr20 = 37
rh32 = 38
rce = 39
arm = 40
alpha = 41
superh = 42
sparc_v9 = 43
tricore = 44
arc = 45
h8_300 = 46
h8_300h = 47
h8s = 48
h8_500 = 49
ia_64 = 50
mips_x = 51
coldfire = 52
m68hc12 = 53
mma = 54
pcp = 55
ncpu = 56
ndr1 = 57
starcore = 58
me16 = 59
st100 = 60
tinyj = 61
x86_64 = 62
pdsp = 63
pdp10 = 64
pdp11 = 65
fx66 = 66
st9plus = 67
st7 = 68
mc68hc16 = 69
mc68hc11 = 70
mc68hc08 = 71
mc68hc05 = 72
svx = 73
st19 = 74
vax = 75
cris = 76
javelin = 77
firepath = 78
zsp = 79
mmix = 80
huany = 81
prism = 82
avr = 83
fr30 = 84
d10v = 85
d30v = 86
v850 = 87
m32r = 88
mn10300 = 89
mn10200 = 90
picojava = 91
openrisc = 92
arc_compact = 93
xtensa = 94
videocore = 95
tmm_gpp = 96
ns32k = 97
tpc = 98
snp1k = 99
st200 = 100
ip2k = 101
max = 102
compact_risc = 103
f2mc16 = 104
msp430 = 105
blackfin = 106
se_c33 = 107
sep = 108
arca = 109
unicore = 110
excess = 111
dxp = 112
altera_nios2 = 113
crx = 114
xgate = 115
c166 = 116
m16c = 117
dspic30f = 118
freescale_ce = 119
m32c = 120
tsk3000 = 131
rs08 = 132
sharc = 133
ecog2 = 134
score7 = 135
dsp24 = 136
videocore3 = 137
latticemico32 = 138
se_c17 = 139
ti_c6000 = 140
ti_c2000 = 141
ti_c5500 = 142
ti_arp32 = 143
ti_pru = 144
mmdsp_plus = 160
cypress_m8c = 161
r32c = 162
trimedia = 163
qdsp6 = 164
i8051 = 165
stxp7x = 166
nds32 = 167
ecog1x = 168
maxq30 = 169
ximo16 = 170
manik = 171
craynv2 = 172
rx = 173
metag = 174
mcst_elbrus = 175
ecog16 = 176
cr16 = 177
etpu = 178
sle9x = 179
l10m = 180
k10m = 181
aarch64 = 183
avr32 = 185
stm8 = 186
tile64 = 187
tilepro = 188
microblaze = 189
cuda = 190
tilegx = 191
cloudshield = 192
corea_1st = 193
corea_2nd = 194
arcv2 = 195
open8 = 196
rl78 = 197
videocore5 = 198
renesas_78kor = 199
freescale_56800ex = 200
ba1 = 201
ba2 = 202
xcore = 203
mchp_pic = 204
intelgt = 205
intel206 = 206
intel207 = 207
intel208 = 208
intel209 = 209
km32 = 210
kmx32 = 211
kmx16 = 212
kmx8 = 213
kvarc = 214
cdp = 215
coge = 216
cool = 217
norc = 218
csr_kalimba = 219
z80 = 220
visium = 221
ft32 = 222
moxie = 223
amd_gpu = 224
riscv = 243
lanai = 244
ceva = 245
ceva_x2 = 246
bpf = 247
graphcore_ipu = 248
img1 = 249
nfp = 250
ve = 251
csky = 252
arc_compact3_64 = 253
mcs6502 = 254
arc_compact3 = 255
kvx = 256
wdc65816 = 257
loongarch = 258
kf32 = 259
class SymbolType(Enum):
no_type = 0
object = 1
func = 2
section = 3
file = 4
common = 5
tls = 6
relc = 8
srelc = 9
gnu_ifunc = 10
os11 = 11
os12 = 12
proc13 = 13
proc14 = 14
proc15 = 15
class DynamicArrayTags(Enum):
null = 0
needed = 1
pltrelsz = 2
pltgot = 3
hash = 4
strtab = 5
symtab = 6
rela = 7
relasz = 8
relaent = 9
strsz = 10
syment = 11
init = 12
fini = 13
soname = 14
rpath = 15
symbolic = 16
rel = 17
relsz = 18
relent = 19
pltrel = 20
debug = 21
textrel = 22
jmprel = 23
bind_now = 24
init_array = 25
fini_array = 26
init_arraysz = 27
fini_arraysz = 28
runpath = 29
flags = 30
preinit_array = 32
preinit_arraysz = 33
symtab_shndx = 34
deprecated_sparc_register = 117440513
sunw_auxiliary = 1610612749
sunw_rtldinf = 1610612750
sunw_filter = 1610612751
sunw_cap = 1610612752
sunw_symtab = 1610612753
sunw_symsz = 1610612754
sunw_sortent = 1610612755
sunw_symsort = 1610612756
sunw_symsortsz = 1610612757
sunw_tlssort = 1610612758
sunw_tlssortsz = 1610612759
sunw_capinfo = 1610612760
sunw_strpad = 1610612761
sunw_capchain = 1610612762
sunw_ldmach = 1610612763
sunw_symtab_shndx = 1610612764
sunw_capchainent = 1610612765
sunw_deferred = 1610612766
sunw_capchainsz = 1610612767
sunw_phname = 1610612768
sunw_parent = 1610612769
sunw_sx_aslr = 1610612771
sunw_relax = 1610612773
sunw_kmod = 1610612775
sunw_sx_nxheap = 1610612777
sunw_sx_nxstack = 1610612779
sunw_sx_adiheap = 1610612781
sunw_sx_adistack = 1610612783
sunw_sx_ssbd = 1610612785
sunw_symnsort = 1610612786
sunw_symnsortsz = 1610612787
gnu_flags_1 = 1879047668
gnu_prelinked = 1879047669
gnu_conflictsz = 1879047670
gnu_liblistsz = 1879047671
checksum = 1879047672
pltpadsz = 1879047673
moveent = 1879047674
movesz = 1879047675
feature_1 = 1879047676
posflag_1 = 1879047677
syminsz = 1879047678
syminent = 1879047679
gnu_hash = 1879047925
tlsdesc_plt = 1879047926
tlsdesc_got = 1879047927
gnu_conflict = 1879047928
gnu_liblist = 1879047929
config = 1879047930
depaudit = 1879047931
audit = 1879047932
pltpad = 1879047933
movetab = 1879047934
syminfo = 1879047935
versym = 1879048176
relacount = 1879048185
relcount = 1879048186
flags_1 = 1879048187
verdef = 1879048188
verdefnum = 1879048189
verneed = 1879048190
verneednum = 1879048191
sparc_register = 1879048193
auxiliary = 2147483645
used = 2147483646
filter = 2147483647
class Bits(Enum):
b32 = 1
b64 = 2
class PhType(Enum):
null_type = 0
load = 1
dynamic = 2
interp = 3
note = 4
shlib = 5
phdr = 6
tls = 7
gnu_eh_frame = 1685382480
gnu_stack = 1685382481
gnu_relro = 1685382482
gnu_property = 1685382483
pax_flags = 1694766464
arm_exidx = 1879048193
class ObjType(Enum):
no_file_type = 0
relocatable = 1
executable = 2
shared = 3
core = 4
class SectionHeaderIdxSpecial(Enum):
undefined = 0
before = 65280
after = 65281
amd64_lcommon = 65282
sunw_ignore = 65343
abs = 65521
common = 65522
xindex = 65535
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = self._io.read_bytes(4)
if not self.magic == b"\x7F\x45\x4C\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x7F\x45\x4C\x46", self.magic, self._io, u"/seq/0")
self.bits = KaitaiStream.resolve_enum(Elf.Bits, self._io.read_u1())
self.endian = KaitaiStream.resolve_enum(Elf.Endian, self._io.read_u1())
self.ei_version = self._io.read_u1()
if not self.ei_version == 1:
raise kaitaistruct.ValidationNotEqualError(1, self.ei_version, self._io, u"/seq/3")
self.abi = KaitaiStream.resolve_enum(Elf.OsAbi, self._io.read_u1())
self.abi_version = self._io.read_u1()
self.pad = self._io.read_bytes(7)
self.header = Elf.EndianElf(self._io, self, self._root)
class EndianElf(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
_on = self._root.endian
if _on == Elf.Endian.le:
self._is_le = True
elif _on == Elf.Endian.be:
self._is_le = False
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.e_type = KaitaiStream.resolve_enum(Elf.ObjType, self._io.read_u2le())
self.machine = KaitaiStream.resolve_enum(Elf.Machine, self._io.read_u2le())
self.e_version = self._io.read_u4le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.entry_point = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.entry_point = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.program_header_offset = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.program_header_offset = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.section_header_offset = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.section_header_offset = self._io.read_u8le()
self.flags = self._io.read_bytes(4)
self.e_ehsize = self._io.read_u2le()
self.program_header_entry_size = self._io.read_u2le()
self.qty_program_header = self._io.read_u2le()
self.section_header_entry_size = self._io.read_u2le()
self.qty_section_header = self._io.read_u2le()
self.section_names_idx = self._io.read_u2le()
def _read_be(self):
self.e_type = KaitaiStream.resolve_enum(Elf.ObjType, self._io.read_u2be())
self.machine = KaitaiStream.resolve_enum(Elf.Machine, self._io.read_u2be())
self.e_version = self._io.read_u4be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.entry_point = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.entry_point = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.program_header_offset = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.program_header_offset = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.section_header_offset = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.section_header_offset = self._io.read_u8be()
self.flags = self._io.read_bytes(4)
self.e_ehsize = self._io.read_u2be()
self.program_header_entry_size = self._io.read_u2be()
self.qty_program_header = self._io.read_u2be()
self.section_header_entry_size = self._io.read_u2be()
self.qty_section_header = self._io.read_u2be()
self.section_names_idx = self._io.read_u2be()
class NoteSection(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/note_section")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.NoteSectionEntry(self._io, self, self._root, self._is_le))
i += 1
def _read_be(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.NoteSectionEntry(self._io, self, self._root, self._is_le))
i += 1
class ProgramHeader(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/program_header")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.type = KaitaiStream.resolve_enum(Elf.PhType, self._io.read_u4le())
if self._root.bits == Elf.Bits.b64:
self.flags64 = self._io.read_u4le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.offset = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.offset = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.vaddr = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.vaddr = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.paddr = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.paddr = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.filesz = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.filesz = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.memsz = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.memsz = self._io.read_u8le()
if self._root.bits == Elf.Bits.b32:
self.flags32 = self._io.read_u4le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.align = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.align = self._io.read_u8le()
def _read_be(self):
self.type = KaitaiStream.resolve_enum(Elf.PhType, self._io.read_u4be())
if self._root.bits == Elf.Bits.b64:
self.flags64 = self._io.read_u4be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.offset = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.offset = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.vaddr = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.vaddr = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.paddr = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.paddr = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.filesz = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.filesz = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.memsz = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.memsz = self._io.read_u8be()
if self._root.bits == Elf.Bits.b32:
self.flags32 = self._io.read_u4be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.align = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.align = self._io.read_u8be()
@property
def flags_obj(self):
if hasattr(self, '_m_flags_obj'):
return self._m_flags_obj if hasattr(self, '_m_flags_obj') else None
if self._is_le:
_on = self._root.bits
if _on == Elf.Bits.b32:
self._m_flags_obj = Elf.PhdrTypeFlags(self.flags32, self._io, self, self._root)
elif _on == Elf.Bits.b64:
self._m_flags_obj = Elf.PhdrTypeFlags(self.flags64, self._io, self, self._root)
else:
_on = self._root.bits
if _on == Elf.Bits.b32:
self._m_flags_obj = Elf.PhdrTypeFlags(self.flags32, self._io, self, self._root)
elif _on == Elf.Bits.b64:
self._m_flags_obj = Elf.PhdrTypeFlags(self.flags64, self._io, self, self._root)
return self._m_flags_obj if hasattr(self, '_m_flags_obj') else None
class DynamicSectionEntry(KaitaiStruct):
"""
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/chapter6-42444.html
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/ch5.dynamic.html#dynamic_section
"""
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/dynamic_section_entry")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
_on = self._root.bits
if _on == Elf.Bits.b32:
self.tag = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.tag = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.value_or_ptr = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.value_or_ptr = self._io.read_u8le()
def _read_be(self):
_on = self._root.bits
if _on == Elf.Bits.b32:
self.tag = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.tag = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.value_or_ptr = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.value_or_ptr = self._io.read_u8be()
@property
def flag_1_values(self):
if hasattr(self, '_m_flag_1_values'):
return self._m_flag_1_values if hasattr(self, '_m_flag_1_values') else None
if self.tag_enum == Elf.DynamicArrayTags.flags_1:
if self._is_le:
self._m_flag_1_values = Elf.DtFlag1Values(self.value_or_ptr, self._io, self, self._root)
else:
self._m_flag_1_values = Elf.DtFlag1Values(self.value_or_ptr, self._io, self, self._root)
return self._m_flag_1_values if hasattr(self, '_m_flag_1_values') else None
@property
def value_str(self):
if hasattr(self, '_m_value_str'):
return self._m_value_str if hasattr(self, '_m_value_str') else None
if ((self.is_value_str) and (self._parent.is_string_table_linked)) :
io = self._parent._parent.linked_section.body._io
_pos = io.pos()
io.seek(self.value_or_ptr)
if self._is_le:
self._m_value_str = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
else:
self._m_value_str = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
io.seek(_pos)
return self._m_value_str if hasattr(self, '_m_value_str') else None
@property
def tag_enum(self):
if hasattr(self, '_m_tag_enum'):
return self._m_tag_enum if hasattr(self, '_m_tag_enum') else None
self._m_tag_enum = KaitaiStream.resolve_enum(Elf.DynamicArrayTags, self.tag)
return self._m_tag_enum if hasattr(self, '_m_tag_enum') else None
@property
def flag_values(self):
if hasattr(self, '_m_flag_values'):
return self._m_flag_values if hasattr(self, '_m_flag_values') else None
if self.tag_enum == Elf.DynamicArrayTags.flags:
if self._is_le:
self._m_flag_values = Elf.DtFlagValues(self.value_or_ptr, self._io, self, self._root)
else:
self._m_flag_values = Elf.DtFlagValues(self.value_or_ptr, self._io, self, self._root)
return self._m_flag_values if hasattr(self, '_m_flag_values') else None
@property
def is_value_str(self):
if hasattr(self, '_m_is_value_str'):
return self._m_is_value_str if hasattr(self, '_m_is_value_str') else None
self._m_is_value_str = ((self.value_or_ptr != 0) and ( ((self.tag_enum == Elf.DynamicArrayTags.needed) or (self.tag_enum == Elf.DynamicArrayTags.soname) or (self.tag_enum == Elf.DynamicArrayTags.rpath) or (self.tag_enum == Elf.DynamicArrayTags.runpath) or (self.tag_enum == Elf.DynamicArrayTags.sunw_auxiliary) or (self.tag_enum == Elf.DynamicArrayTags.sunw_filter) or (self.tag_enum == Elf.DynamicArrayTags.auxiliary) or (self.tag_enum == Elf.DynamicArrayTags.filter) or (self.tag_enum == Elf.DynamicArrayTags.config) or (self.tag_enum == Elf.DynamicArrayTags.depaudit) or (self.tag_enum == Elf.DynamicArrayTags.audit)) ))
return self._m_is_value_str if hasattr(self, '_m_is_value_str') else None
class SectionHeader(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/section_header")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.ofs_name = self._io.read_u4le()
self.type = KaitaiStream.resolve_enum(Elf.ShType, self._io.read_u4le())
_on = self._root.bits
if _on == Elf.Bits.b32:
self.flags = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.flags = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.addr = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.addr = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.ofs_body = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.ofs_body = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.len_body = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.len_body = self._io.read_u8le()
self.linked_section_idx = self._io.read_u4le()
self.info = self._io.read_bytes(4)
_on = self._root.bits
if _on == Elf.Bits.b32:
self.align = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.align = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.entry_size = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.entry_size = self._io.read_u8le()
def _read_be(self):
self.ofs_name = self._io.read_u4be()
self.type = KaitaiStream.resolve_enum(Elf.ShType, self._io.read_u4be())
_on = self._root.bits
if _on == Elf.Bits.b32:
self.flags = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.flags = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.addr = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.addr = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.ofs_body = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.ofs_body = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.len_body = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.len_body = self._io.read_u8be()
self.linked_section_idx = self._io.read_u4be()
self.info = self._io.read_bytes(4)
_on = self._root.bits
if _on == Elf.Bits.b32:
self.align = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.align = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.entry_size = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.entry_size = self._io.read_u8be()
@property
def body(self):
if hasattr(self, '_m_body'):
return self._m_body if hasattr(self, '_m_body') else None
if self.type != Elf.ShType.nobits:
io = self._root._io
_pos = io.pos()
io.seek(self.ofs_body)
if self._is_le:
_on = self.type
if _on == Elf.ShType.rel:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.RelocationSection(False, _io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.note:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.NoteSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.symtab:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynsymSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.strtab:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.StringsStruct(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.dynamic:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynamicSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.dynsym:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynsymSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.rela:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.RelocationSection(True, _io__raw__m_body, self, self._root, self._is_le)
else:
self._m_body = io.read_bytes(self.len_body)
else:
_on = self.type
if _on == Elf.ShType.rel:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.RelocationSection(False, _io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.note:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.NoteSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.symtab:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynsymSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.strtab:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.StringsStruct(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.dynamic:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynamicSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.dynsym:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.DynsymSection(_io__raw__m_body, self, self._root, self._is_le)
elif _on == Elf.ShType.rela:
self._raw__m_body = io.read_bytes(self.len_body)
_io__raw__m_body = KaitaiStream(BytesIO(self._raw__m_body))
self._m_body = Elf.EndianElf.RelocationSection(True, _io__raw__m_body, self, self._root, self._is_le)
else:
self._m_body = io.read_bytes(self.len_body)
io.seek(_pos)
return self._m_body if hasattr(self, '_m_body') else None
@property
def linked_section(self):
"""may reference a later section header, so don't try to access too early (use only lazy `instances`).
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.sheader.html#sh_link
"""
if hasattr(self, '_m_linked_section'):
return self._m_linked_section if hasattr(self, '_m_linked_section') else None
if ((self.linked_section_idx != Elf.SectionHeaderIdxSpecial.undefined.value) and (self.linked_section_idx < self._root.header.qty_section_header)) :
self._m_linked_section = self._root.header.section_headers[self.linked_section_idx]
return self._m_linked_section if hasattr(self, '_m_linked_section') else None
@property
def name(self):
if hasattr(self, '_m_name'):
return self._m_name if hasattr(self, '_m_name') else None
io = self._root.header.section_names._io
_pos = io.pos()
io.seek(self.ofs_name)
if self._is_le:
self._m_name = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
else:
self._m_name = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
io.seek(_pos)
return self._m_name if hasattr(self, '_m_name') else None
@property
def flags_obj(self):
if hasattr(self, '_m_flags_obj'):
return self._m_flags_obj if hasattr(self, '_m_flags_obj') else None
if self._is_le:
self._m_flags_obj = Elf.SectionHeaderFlags(self.flags, self._io, self, self._root)
else:
self._m_flags_obj = Elf.SectionHeaderFlags(self.flags, self._io, self, self._root)
return self._m_flags_obj if hasattr(self, '_m_flags_obj') else None
class RelocationSection(KaitaiStruct):
"""
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/chapter6-54839.html
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.reloc.html
"""
def __init__(self, has_addend, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self.has_addend = has_addend
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/relocation_section")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.RelocationSectionEntry(self._io, self, self._root, self._is_le))
i += 1
def _read_be(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.RelocationSectionEntry(self._io, self, self._root, self._is_le))
i += 1
class DynamicSection(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/dynamic_section")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.DynamicSectionEntry(self._io, self, self._root, self._is_le))
i += 1
def _read_be(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.DynamicSectionEntry(self._io, self, self._root, self._is_le))
i += 1
@property
def is_string_table_linked(self):
if hasattr(self, '_m_is_string_table_linked'):
return self._m_is_string_table_linked if hasattr(self, '_m_is_string_table_linked') else None
self._m_is_string_table_linked = self._parent.linked_section.type == Elf.ShType.strtab
return self._m_is_string_table_linked if hasattr(self, '_m_is_string_table_linked') else None
class DynsymSection(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/dynsym_section")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.DynsymSectionEntry(self._io, self, self._root, self._is_le))
i += 1
def _read_be(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Elf.EndianElf.DynsymSectionEntry(self._io, self, self._root, self._is_le))
i += 1
@property
def is_string_table_linked(self):
if hasattr(self, '_m_is_string_table_linked'):
return self._m_is_string_table_linked if hasattr(self, '_m_is_string_table_linked') else None
self._m_is_string_table_linked = self._parent.linked_section.type == Elf.ShType.strtab
return self._m_is_string_table_linked if hasattr(self, '_m_is_string_table_linked') else None
class RelocationSectionEntry(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/relocation_section_entry")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
_on = self._root.bits
if _on == Elf.Bits.b32:
self.offset = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.offset = self._io.read_u8le()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.info = self._io.read_u4le()
elif _on == Elf.Bits.b64:
self.info = self._io.read_u8le()
if self._parent.has_addend:
_on = self._root.bits
if _on == Elf.Bits.b32:
self.addend = self._io.read_s4le()
elif _on == Elf.Bits.b64:
self.addend = self._io.read_s8le()
def _read_be(self):
_on = self._root.bits
if _on == Elf.Bits.b32:
self.offset = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.offset = self._io.read_u8be()
_on = self._root.bits
if _on == Elf.Bits.b32:
self.info = self._io.read_u4be()
elif _on == Elf.Bits.b64:
self.info = self._io.read_u8be()
if self._parent.has_addend:
_on = self._root.bits
if _on == Elf.Bits.b32:
self.addend = self._io.read_s4be()
elif _on == Elf.Bits.b64:
self.addend = self._io.read_s8be()
class DynsymSectionEntry(KaitaiStruct):
"""
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/man-sts.html
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.symtab.html
"""
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/dynsym_section_entry")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.ofs_name = self._io.read_u4le()
if self._root.bits == Elf.Bits.b32:
self.value_b32 = self._io.read_u4le()
if self._root.bits == Elf.Bits.b32:
self.size_b32 = self._io.read_u4le()
self.bind = KaitaiStream.resolve_enum(Elf.SymbolBinding, self._io.read_bits_int_be(4))
self.type = KaitaiStream.resolve_enum(Elf.SymbolType, self._io.read_bits_int_be(4))
self._io.align_to_byte()
self.other = self._io.read_u1()
self.sh_idx = self._io.read_u2le()
if self._root.bits == Elf.Bits.b64:
self.value_b64 = self._io.read_u8le()
if self._root.bits == Elf.Bits.b64:
self.size_b64 = self._io.read_u8le()
def _read_be(self):
self.ofs_name = self._io.read_u4be()
if self._root.bits == Elf.Bits.b32:
self.value_b32 = self._io.read_u4be()
if self._root.bits == Elf.Bits.b32:
self.size_b32 = self._io.read_u4be()
self.bind = KaitaiStream.resolve_enum(Elf.SymbolBinding, self._io.read_bits_int_be(4))
self.type = KaitaiStream.resolve_enum(Elf.SymbolType, self._io.read_bits_int_be(4))
self._io.align_to_byte()
self.other = self._io.read_u1()
self.sh_idx = self._io.read_u2be()
if self._root.bits == Elf.Bits.b64:
self.value_b64 = self._io.read_u8be()
if self._root.bits == Elf.Bits.b64:
self.size_b64 = self._io.read_u8be()
@property
def is_sh_idx_reserved(self):
if hasattr(self, '_m_is_sh_idx_reserved'):
return self._m_is_sh_idx_reserved if hasattr(self, '_m_is_sh_idx_reserved') else None
self._m_is_sh_idx_reserved = ((self.sh_idx >= self._root.sh_idx_lo_reserved) and (self.sh_idx <= self._root.sh_idx_hi_reserved))
return self._m_is_sh_idx_reserved if hasattr(self, '_m_is_sh_idx_reserved') else None
@property
def is_sh_idx_os(self):
if hasattr(self, '_m_is_sh_idx_os'):
return self._m_is_sh_idx_os if hasattr(self, '_m_is_sh_idx_os') else None
self._m_is_sh_idx_os = ((self.sh_idx >= self._root.sh_idx_lo_os) and (self.sh_idx <= self._root.sh_idx_hi_os))
return self._m_is_sh_idx_os if hasattr(self, '_m_is_sh_idx_os') else None
@property
def is_sh_idx_proc(self):
if hasattr(self, '_m_is_sh_idx_proc'):
return self._m_is_sh_idx_proc if hasattr(self, '_m_is_sh_idx_proc') else None
self._m_is_sh_idx_proc = ((self.sh_idx >= self._root.sh_idx_lo_proc) and (self.sh_idx <= self._root.sh_idx_hi_proc))
return self._m_is_sh_idx_proc if hasattr(self, '_m_is_sh_idx_proc') else None
@property
def size(self):
if hasattr(self, '_m_size'):
return self._m_size if hasattr(self, '_m_size') else None
self._m_size = (self.size_b32 if self._root.bits == Elf.Bits.b32 else (self.size_b64 if self._root.bits == Elf.Bits.b64 else 0))
return self._m_size if hasattr(self, '_m_size') else None
@property
def visibility(self):
if hasattr(self, '_m_visibility'):
return self._m_visibility if hasattr(self, '_m_visibility') else None
self._m_visibility = KaitaiStream.resolve_enum(Elf.SymbolVisibility, (self.other & 3))
return self._m_visibility if hasattr(self, '_m_visibility') else None
@property
def value(self):
if hasattr(self, '_m_value'):
return self._m_value if hasattr(self, '_m_value') else None
self._m_value = (self.value_b32 if self._root.bits == Elf.Bits.b32 else (self.value_b64 if self._root.bits == Elf.Bits.b64 else 0))
return self._m_value if hasattr(self, '_m_value') else None
@property
def name(self):
if hasattr(self, '_m_name'):
return self._m_name if hasattr(self, '_m_name') else None
if ((self.ofs_name != 0) and (self._parent.is_string_table_linked)) :
io = self._parent._parent.linked_section.body._io
_pos = io.pos()
io.seek(self.ofs_name)
if self._is_le:
self._m_name = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
else:
self._m_name = (io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
io.seek(_pos)
return self._m_name if hasattr(self, '_m_name') else None
@property
def sh_idx_special(self):
if hasattr(self, '_m_sh_idx_special'):
return self._m_sh_idx_special if hasattr(self, '_m_sh_idx_special') else None
self._m_sh_idx_special = KaitaiStream.resolve_enum(Elf.SectionHeaderIdxSpecial, self.sh_idx)
return self._m_sh_idx_special if hasattr(self, '_m_sh_idx_special') else None
class NoteSectionEntry(KaitaiStruct):
"""
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/chapter6-18048.html
.. seealso::
Source - https://refspecs.linuxfoundation.org/elf/gabi4+/ch5.pheader.html#note_section
"""
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/note_section_entry")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.len_name = self._io.read_u4le()
self.len_descriptor = self._io.read_u4le()
self.type = self._io.read_u4le()
self.name = KaitaiStream.bytes_terminate(self._io.read_bytes(self.len_name), 0, False)
self.name_padding = self._io.read_bytes((-(self.len_name) % 4))
self.descriptor = self._io.read_bytes(self.len_descriptor)
self.descriptor_padding = self._io.read_bytes((-(self.len_descriptor) % 4))
def _read_be(self):
self.len_name = self._io.read_u4be()
self.len_descriptor = self._io.read_u4be()
self.type = self._io.read_u4be()
self.name = KaitaiStream.bytes_terminate(self._io.read_bytes(self.len_name), 0, False)
self.name_padding = self._io.read_bytes((-(self.len_name) % 4))
self.descriptor = self._io.read_bytes(self.len_descriptor)
self.descriptor_padding = self._io.read_bytes((-(self.len_descriptor) % 4))
class StringsStruct(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/endian_elf/types/strings_struct")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append((self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII"))
i += 1
def _read_be(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append((self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII"))
i += 1
@property
def program_headers(self):
if hasattr(self, '_m_program_headers'):
return self._m_program_headers if hasattr(self, '_m_program_headers') else None
_pos = self._io.pos()
self._io.seek(self.program_header_offset)
if self._is_le:
self._raw__m_program_headers = [None] * (self.qty_program_header)
self._m_program_headers = [None] * (self.qty_program_header)
for i in range(self.qty_program_header):
self._raw__m_program_headers[i] = self._io.read_bytes(self.program_header_entry_size)
_io__raw__m_program_headers = KaitaiStream(BytesIO(self._raw__m_program_headers[i]))
self._m_program_headers[i] = Elf.EndianElf.ProgramHeader(_io__raw__m_program_headers, self, self._root, self._is_le)
else:
self._raw__m_program_headers = [None] * (self.qty_program_header)
self._m_program_headers = [None] * (self.qty_program_header)
for i in range(self.qty_program_header):
self._raw__m_program_headers[i] = self._io.read_bytes(self.program_header_entry_size)
_io__raw__m_program_headers = KaitaiStream(BytesIO(self._raw__m_program_headers[i]))
self._m_program_headers[i] = Elf.EndianElf.ProgramHeader(_io__raw__m_program_headers, self, self._root, self._is_le)
self._io.seek(_pos)
return self._m_program_headers if hasattr(self, '_m_program_headers') else None
@property
def section_headers(self):
if hasattr(self, '_m_section_headers'):
return self._m_section_headers if hasattr(self, '_m_section_headers') else None
_pos = self._io.pos()
self._io.seek(self.section_header_offset)
if self._is_le:
self._raw__m_section_headers = [None] * (self.qty_section_header)
self._m_section_headers = [None] * (self.qty_section_header)
for i in range(self.qty_section_header):
self._raw__m_section_headers[i] = self._io.read_bytes(self.section_header_entry_size)
_io__raw__m_section_headers = KaitaiStream(BytesIO(self._raw__m_section_headers[i]))
self._m_section_headers[i] = Elf.EndianElf.SectionHeader(_io__raw__m_section_headers, self, self._root, self._is_le)
else:
self._raw__m_section_headers = [None] * (self.qty_section_header)
self._m_section_headers = [None] * (self.qty_section_header)
for i in range(self.qty_section_header):
self._raw__m_section_headers[i] = self._io.read_bytes(self.section_header_entry_size)
_io__raw__m_section_headers = KaitaiStream(BytesIO(self._raw__m_section_headers[i]))
self._m_section_headers[i] = Elf.EndianElf.SectionHeader(_io__raw__m_section_headers, self, self._root, self._is_le)
self._io.seek(_pos)
return self._m_section_headers if hasattr(self, '_m_section_headers') else None
@property
def section_names(self):
if hasattr(self, '_m_section_names'):
return self._m_section_names if hasattr(self, '_m_section_names') else None
if ((self.section_names_idx != Elf.SectionHeaderIdxSpecial.undefined.value) and (self.section_names_idx < self._root.header.qty_section_header)) :
_pos = self._io.pos()
self._io.seek(self.section_headers[self.section_names_idx].ofs_body)
if self._is_le:
self._raw__m_section_names = self._io.read_bytes(self.section_headers[self.section_names_idx].len_body)
_io__raw__m_section_names = KaitaiStream(BytesIO(self._raw__m_section_names))
self._m_section_names = Elf.EndianElf.StringsStruct(_io__raw__m_section_names, self, self._root, self._is_le)
else:
self._raw__m_section_names = self._io.read_bytes(self.section_headers[self.section_names_idx].len_body)
_io__raw__m_section_names = KaitaiStream(BytesIO(self._raw__m_section_names))
self._m_section_names = Elf.EndianElf.StringsStruct(_io__raw__m_section_names, self, self._root, self._is_le)
self._io.seek(_pos)
return self._m_section_names if hasattr(self, '_m_section_names') else None
class DtFlag1Values(KaitaiStruct):
def __init__(self, value, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.value = value
self._read()
def _read(self):
pass
@property
def singleton(self):
"""Singleton symbols are used."""
if hasattr(self, '_m_singleton'):
return self._m_singleton if hasattr(self, '_m_singleton') else None
self._m_singleton = (self.value & 33554432) != 0
return self._m_singleton if hasattr(self, '_m_singleton') else None
@property
def ignmuldef(self):
if hasattr(self, '_m_ignmuldef'):
return self._m_ignmuldef if hasattr(self, '_m_ignmuldef') else None
self._m_ignmuldef = (self.value & 262144) != 0
return self._m_ignmuldef if hasattr(self, '_m_ignmuldef') else None
@property
def loadfltr(self):
"""Trigger filtee loading at runtime."""
if hasattr(self, '_m_loadfltr'):
return self._m_loadfltr if hasattr(self, '_m_loadfltr') else None
self._m_loadfltr = (self.value & 16) != 0
return self._m_loadfltr if hasattr(self, '_m_loadfltr') else None
@property
def initfirst(self):
"""Set RTLD_INITFIRST for this object."""
if hasattr(self, '_m_initfirst'):
return self._m_initfirst if hasattr(self, '_m_initfirst') else None
self._m_initfirst = (self.value & 32) != 0
return self._m_initfirst if hasattr(self, '_m_initfirst') else None
@property
def symintpose(self):
"""Object has individual interposers."""
if hasattr(self, '_m_symintpose'):
return self._m_symintpose if hasattr(self, '_m_symintpose') else None
self._m_symintpose = (self.value & 8388608) != 0
return self._m_symintpose if hasattr(self, '_m_symintpose') else None
@property
def noreloc(self):
if hasattr(self, '_m_noreloc'):
return self._m_noreloc if hasattr(self, '_m_noreloc') else None
self._m_noreloc = (self.value & 4194304) != 0
return self._m_noreloc if hasattr(self, '_m_noreloc') else None
@property
def confalt(self):
"""Configuration alternative created."""
if hasattr(self, '_m_confalt'):
return self._m_confalt if hasattr(self, '_m_confalt') else None
self._m_confalt = (self.value & 8192) != 0
return self._m_confalt if hasattr(self, '_m_confalt') else None
@property
def dispreldne(self):
"""Disp reloc applied at build time."""
if hasattr(self, '_m_dispreldne'):
return self._m_dispreldne if hasattr(self, '_m_dispreldne') else None
self._m_dispreldne = (self.value & 32768) != 0
return self._m_dispreldne if hasattr(self, '_m_dispreldne') else None
@property
def rtld_global(self):
"""Set RTLD_GLOBAL for this object."""
if hasattr(self, '_m_rtld_global'):
return self._m_rtld_global if hasattr(self, '_m_rtld_global') else None
self._m_rtld_global = (self.value & 2) != 0
return self._m_rtld_global if hasattr(self, '_m_rtld_global') else None
@property
def nodelete(self):
"""Set RTLD_NODELETE for this object."""
if hasattr(self, '_m_nodelete'):
return self._m_nodelete if hasattr(self, '_m_nodelete') else None
self._m_nodelete = (self.value & 8) != 0
return self._m_nodelete if hasattr(self, '_m_nodelete') else None
@property
def trans(self):
if hasattr(self, '_m_trans'):
return self._m_trans if hasattr(self, '_m_trans') else None
self._m_trans = (self.value & 512) != 0
return self._m_trans if hasattr(self, '_m_trans') else None
@property
def origin(self):
"""$ORIGIN must be handled."""
if hasattr(self, '_m_origin'):
return self._m_origin if hasattr(self, '_m_origin') else None
self._m_origin = (self.value & 128) != 0
return self._m_origin if hasattr(self, '_m_origin') else None
@property
def now(self):
"""Set RTLD_NOW for this object."""
if hasattr(self, '_m_now'):
return self._m_now if hasattr(self, '_m_now') else None
self._m_now = (self.value & 1) != 0
return self._m_now if hasattr(self, '_m_now') else None
@property
def nohdr(self):
if hasattr(self, '_m_nohdr'):
return self._m_nohdr if hasattr(self, '_m_nohdr') else None
self._m_nohdr = (self.value & 1048576) != 0
return self._m_nohdr if hasattr(self, '_m_nohdr') else None
@property
def endfiltee(self):
"""Filtee terminates filters search."""
if hasattr(self, '_m_endfiltee'):
return self._m_endfiltee if hasattr(self, '_m_endfiltee') else None
self._m_endfiltee = (self.value & 16384) != 0
return self._m_endfiltee if hasattr(self, '_m_endfiltee') else None
@property
def nodirect(self):
"""Object has no-direct binding."""
if hasattr(self, '_m_nodirect'):
return self._m_nodirect if hasattr(self, '_m_nodirect') else None
self._m_nodirect = (self.value & 131072) != 0
return self._m_nodirect if hasattr(self, '_m_nodirect') else None
@property
def globaudit(self):
"""Global auditing required."""
if hasattr(self, '_m_globaudit'):
return self._m_globaudit if hasattr(self, '_m_globaudit') else None
self._m_globaudit = (self.value & 16777216) != 0
return self._m_globaudit if hasattr(self, '_m_globaudit') else None
@property
def noksyms(self):
if hasattr(self, '_m_noksyms'):
return self._m_noksyms if hasattr(self, '_m_noksyms') else None
self._m_noksyms = (self.value & 524288) != 0
return self._m_noksyms if hasattr(self, '_m_noksyms') else None
@property
def interpose(self):
"""Object is used to interpose."""
if hasattr(self, '_m_interpose'):
return self._m_interpose if hasattr(self, '_m_interpose') else None
self._m_interpose = (self.value & 1024) != 0
return self._m_interpose if hasattr(self, '_m_interpose') else None
@property
def nodump(self):
"""Object can't be dldump'ed."""
if hasattr(self, '_m_nodump'):
return self._m_nodump if hasattr(self, '_m_nodump') else None
self._m_nodump = (self.value & 4096) != 0
return self._m_nodump if hasattr(self, '_m_nodump') else None
@property
def disprelpnd(self):
"""Disp reloc applied at run-time."""
if hasattr(self, '_m_disprelpnd'):
return self._m_disprelpnd if hasattr(self, '_m_disprelpnd') else None
self._m_disprelpnd = (self.value & 65536) != 0
return self._m_disprelpnd if hasattr(self, '_m_disprelpnd') else None
@property
def noopen(self):
"""Set RTLD_NOOPEN for this object."""
if hasattr(self, '_m_noopen'):
return self._m_noopen if hasattr(self, '_m_noopen') else None
self._m_noopen = (self.value & 64) != 0
return self._m_noopen if hasattr(self, '_m_noopen') else None
@property
def stub(self):
if hasattr(self, '_m_stub'):
return self._m_stub if hasattr(self, '_m_stub') else None
self._m_stub = (self.value & 67108864) != 0
return self._m_stub if hasattr(self, '_m_stub') else None
@property
def direct(self):
"""Direct binding enabled."""
if hasattr(self, '_m_direct'):
return self._m_direct if hasattr(self, '_m_direct') else None
self._m_direct = (self.value & 256) != 0
return self._m_direct if hasattr(self, '_m_direct') else None
@property
def edited(self):
"""Object is modified after built."""
if hasattr(self, '_m_edited'):
return self._m_edited if hasattr(self, '_m_edited') else None
self._m_edited = (self.value & 2097152) != 0
return self._m_edited if hasattr(self, '_m_edited') else None
@property
def group(self):
"""Set RTLD_GROUP for this object."""
if hasattr(self, '_m_group'):
return self._m_group if hasattr(self, '_m_group') else None
self._m_group = (self.value & 4) != 0
return self._m_group if hasattr(self, '_m_group') else None
@property
def pie(self):
if hasattr(self, '_m_pie'):
return self._m_pie if hasattr(self, '_m_pie') else None
self._m_pie = (self.value & 134217728) != 0
return self._m_pie if hasattr(self, '_m_pie') else None
@property
def nodeflib(self):
"""Ignore default lib search path."""
if hasattr(self, '_m_nodeflib'):
return self._m_nodeflib if hasattr(self, '_m_nodeflib') else None
self._m_nodeflib = (self.value & 2048) != 0
return self._m_nodeflib if hasattr(self, '_m_nodeflib') else None
class SectionHeaderFlags(KaitaiStruct):
def __init__(self, value, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.value = value
self._read()
def _read(self):
pass
@property
def merge(self):
"""might be merged."""
if hasattr(self, '_m_merge'):
return self._m_merge if hasattr(self, '_m_merge') else None
self._m_merge = (self.value & 16) != 0
return self._m_merge if hasattr(self, '_m_merge') else None
@property
def mask_os(self):
"""OS-specific."""
if hasattr(self, '_m_mask_os'):
return self._m_mask_os if hasattr(self, '_m_mask_os') else None
self._m_mask_os = (self.value & 267386880) != 0
return self._m_mask_os if hasattr(self, '_m_mask_os') else None
@property
def exclude(self):
"""section is excluded unless referenced or allocated (Solaris)."""
if hasattr(self, '_m_exclude'):
return self._m_exclude if hasattr(self, '_m_exclude') else None
self._m_exclude = (self.value & 134217728) != 0
return self._m_exclude if hasattr(self, '_m_exclude') else None
@property
def mask_proc(self):
"""Processor-specific."""
if hasattr(self, '_m_mask_proc'):
return self._m_mask_proc if hasattr(self, '_m_mask_proc') else None
self._m_mask_proc = (self.value & 4026531840) != 0
return self._m_mask_proc if hasattr(self, '_m_mask_proc') else None
@property
def strings(self):
"""contains nul-terminated strings."""
if hasattr(self, '_m_strings'):
return self._m_strings if hasattr(self, '_m_strings') else None
self._m_strings = (self.value & 32) != 0
return self._m_strings if hasattr(self, '_m_strings') else None
@property
def os_non_conforming(self):
"""non-standard OS specific handling required."""
if hasattr(self, '_m_os_non_conforming'):
return self._m_os_non_conforming if hasattr(self, '_m_os_non_conforming') else None
self._m_os_non_conforming = (self.value & 256) != 0
return self._m_os_non_conforming if hasattr(self, '_m_os_non_conforming') else None
@property
def alloc(self):
"""occupies memory during execution."""
if hasattr(self, '_m_alloc'):
return self._m_alloc if hasattr(self, '_m_alloc') else None
self._m_alloc = (self.value & 2) != 0
return self._m_alloc if hasattr(self, '_m_alloc') else None
@property
def exec_instr(self):
"""executable."""
if hasattr(self, '_m_exec_instr'):
return self._m_exec_instr if hasattr(self, '_m_exec_instr') else None
self._m_exec_instr = (self.value & 4) != 0
return self._m_exec_instr if hasattr(self, '_m_exec_instr') else None
@property
def info_link(self):
"""'sh_info' contains SHT index."""
if hasattr(self, '_m_info_link'):
return self._m_info_link if hasattr(self, '_m_info_link') else None
self._m_info_link = (self.value & 64) != 0
return self._m_info_link if hasattr(self, '_m_info_link') else None
@property
def write(self):
"""writable."""
if hasattr(self, '_m_write'):
return self._m_write if hasattr(self, '_m_write') else None
self._m_write = (self.value & 1) != 0
return self._m_write if hasattr(self, '_m_write') else None
@property
def link_order(self):
"""preserve order after combining."""
if hasattr(self, '_m_link_order'):
return self._m_link_order if hasattr(self, '_m_link_order') else None
self._m_link_order = (self.value & 128) != 0
return self._m_link_order if hasattr(self, '_m_link_order') else None
@property
def ordered(self):
"""special ordering requirement (Solaris)."""
if hasattr(self, '_m_ordered'):
return self._m_ordered if hasattr(self, '_m_ordered') else None
self._m_ordered = (self.value & 67108864) != 0
return self._m_ordered if hasattr(self, '_m_ordered') else None
@property
def tls(self):
"""section hold thread-local data."""
if hasattr(self, '_m_tls'):
return self._m_tls if hasattr(self, '_m_tls') else None
self._m_tls = (self.value & 1024) != 0
return self._m_tls if hasattr(self, '_m_tls') else None
@property
def group(self):
"""section is member of a group."""
if hasattr(self, '_m_group'):
return self._m_group if hasattr(self, '_m_group') else None
self._m_group = (self.value & 512) != 0
return self._m_group if hasattr(self, '_m_group') else None
class PhdrTypeFlags(KaitaiStruct):
def __init__(self, value, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.value = value
self._read()
def _read(self):
pass
@property
def read(self):
if hasattr(self, '_m_read'):
return self._m_read if hasattr(self, '_m_read') else None
self._m_read = (self.value & 4) != 0
return self._m_read if hasattr(self, '_m_read') else None
@property
def write(self):
if hasattr(self, '_m_write'):
return self._m_write if hasattr(self, '_m_write') else None
self._m_write = (self.value & 2) != 0
return self._m_write if hasattr(self, '_m_write') else None
@property
def execute(self):
if hasattr(self, '_m_execute'):
return self._m_execute if hasattr(self, '_m_execute') else None
self._m_execute = (self.value & 1) != 0
return self._m_execute if hasattr(self, '_m_execute') else None
@property
def mask_proc(self):
if hasattr(self, '_m_mask_proc'):
return self._m_mask_proc if hasattr(self, '_m_mask_proc') else None
self._m_mask_proc = (self.value & 4026531840) != 0
return self._m_mask_proc if hasattr(self, '_m_mask_proc') else None
class DtFlagValues(KaitaiStruct):
"""
.. seealso::
Figure 5-11: DT_FLAGS values - https://refspecs.linuxbase.org/elf/gabi4+/ch5.dynamic.html
.. seealso::
Source - https://github.com/golang/go/blob/48dfddbab3/src/debug/elf/elf.go#L1079-L1095
.. seealso::
Source - https://docs.oracle.com/cd/E37838_01/html/E36783/chapter6-42444.html#OSLLGchapter7-tbl-5
"""
def __init__(self, value, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.value = value
self._read()
def _read(self):
pass
@property
def bind_now(self):
"""all relocations for this object must be processed before returning
control to the program
"""
if hasattr(self, '_m_bind_now'):
return self._m_bind_now if hasattr(self, '_m_bind_now') else None
self._m_bind_now = (self.value & 8) != 0
return self._m_bind_now if hasattr(self, '_m_bind_now') else None
@property
def origin(self):
"""object may reference the $ORIGIN substitution string."""
if hasattr(self, '_m_origin'):
return self._m_origin if hasattr(self, '_m_origin') else None
self._m_origin = (self.value & 1) != 0
return self._m_origin if hasattr(self, '_m_origin') else None
@property
def textrel(self):
"""relocation entries might request modifications to a non-writable segment."""
if hasattr(self, '_m_textrel'):
return self._m_textrel if hasattr(self, '_m_textrel') else None
self._m_textrel = (self.value & 4) != 0
return self._m_textrel if hasattr(self, '_m_textrel') else None
@property
def static_tls(self):
"""object uses static thread-local storage scheme."""
if hasattr(self, '_m_static_tls'):
return self._m_static_tls if hasattr(self, '_m_static_tls') else None
self._m_static_tls = (self.value & 16) != 0
return self._m_static_tls if hasattr(self, '_m_static_tls') else None
@property
def symbolic(self):
"""symbolic linking."""
if hasattr(self, '_m_symbolic'):
return self._m_symbolic if hasattr(self, '_m_symbolic') else None
self._m_symbolic = (self.value & 2) != 0
return self._m_symbolic if hasattr(self, '_m_symbolic') else None
@property
def sh_idx_lo_os(self):
if hasattr(self, '_m_sh_idx_lo_os'):
return self._m_sh_idx_lo_os if hasattr(self, '_m_sh_idx_lo_os') else None
self._m_sh_idx_lo_os = 65312
return self._m_sh_idx_lo_os if hasattr(self, '_m_sh_idx_lo_os') else None
@property
def sh_idx_lo_reserved(self):
if hasattr(self, '_m_sh_idx_lo_reserved'):
return self._m_sh_idx_lo_reserved if hasattr(self, '_m_sh_idx_lo_reserved') else None
self._m_sh_idx_lo_reserved = 65280
return self._m_sh_idx_lo_reserved if hasattr(self, '_m_sh_idx_lo_reserved') else None
@property
def sh_idx_hi_proc(self):
if hasattr(self, '_m_sh_idx_hi_proc'):
return self._m_sh_idx_hi_proc if hasattr(self, '_m_sh_idx_hi_proc') else None
self._m_sh_idx_hi_proc = 65311
return self._m_sh_idx_hi_proc if hasattr(self, '_m_sh_idx_hi_proc') else None
@property
def sh_idx_lo_proc(self):
if hasattr(self, '_m_sh_idx_lo_proc'):
return self._m_sh_idx_lo_proc if hasattr(self, '_m_sh_idx_lo_proc') else None
self._m_sh_idx_lo_proc = 65280
return self._m_sh_idx_lo_proc if hasattr(self, '_m_sh_idx_lo_proc') else None
@property
def sh_idx_hi_os(self):
if hasattr(self, '_m_sh_idx_hi_os'):
return self._m_sh_idx_hi_os if hasattr(self, '_m_sh_idx_hi_os') else None
self._m_sh_idx_hi_os = 65343
return self._m_sh_idx_hi_os if hasattr(self, '_m_sh_idx_hi_os') else None
@property
def sh_idx_hi_reserved(self):
if hasattr(self, '_m_sh_idx_hi_reserved'):
return self._m_sh_idx_hi_reserved if hasattr(self, '_m_sh_idx_hi_reserved') else None
self._m_sh_idx_hi_reserved = 65535
return self._m_sh_idx_hi_reserved if hasattr(self, '_m_sh_idx_hi_reserved') else None
| 39.595319
| 641
| 0.545807
|
2c9bb8d1b8173c07629008dc36e41b77154f4a7d
| 120
|
py
|
Python
|
lg_activity/src/lg_activity/__init__.py
|
FuriousJulius/lg_ros_nodes
|
15a84c5022ab2f5b038d11a5589cd4a34010b1d6
|
[
"Apache-2.0"
] | 16
|
2015-10-10T11:55:37.000Z
|
2022-02-24T22:47:48.000Z
|
lg_activity/src/lg_activity/__init__.py
|
FuriousJulius/lg_ros_nodes
|
15a84c5022ab2f5b038d11a5589cd4a34010b1d6
|
[
"Apache-2.0"
] | 292
|
2015-09-29T21:59:53.000Z
|
2022-03-31T15:59:31.000Z
|
lg_activity/src/lg_activity/__init__.py
|
constantegonzalez/lg_ros_nodes
|
1c7b08c42e90205922602c86805285508d1b7971
|
[
"Apache-2.0"
] | 5
|
2017-05-03T06:22:43.000Z
|
2021-08-19T16:54:14.000Z
|
from .activity import ActivitySource
from .activity import ActivityTracker
from .activity import ActivitySourceDetector
| 30
| 44
| 0.875
|
ff5dd41c4e64a2eebf1e3235d404e18b243d67cf
| 14,598
|
py
|
Python
|
arelle/DialogOpenArchive.py
|
theredpea/Arelle
|
e53097f142a69b2fefc18298a72f1f1b219b973d
|
[
"Apache-2.0"
] | null | null | null |
arelle/DialogOpenArchive.py
|
theredpea/Arelle
|
e53097f142a69b2fefc18298a72f1f1b219b973d
|
[
"Apache-2.0"
] | null | null | null |
arelle/DialogOpenArchive.py
|
theredpea/Arelle
|
e53097f142a69b2fefc18298a72f1f1b219b973d
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Oct 20, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from tkinter import Toplevel, StringVar, VERTICAL, HORIZONTAL, N, S, E, W, messagebox
try:
from tkinter.ttk import Frame, Button, Treeview, Scrollbar
except ImportError:
from ttk import Frame, Button, Treeview, Scrollbar
import os, sys
try:
import regex as re
except ImportError:
import re
from arelle.CntlrWinTooltip import ToolTip
from arelle.UrlUtil import isHttpUrl
from arelle.PackageManager import parsePackage
'''
caller checks accepted, if True, caller retrieves url
'''
ARCHIVE = 1
ENTRY_POINTS = 2
DISCLOSURE_SYSTEM = 3
def askArchiveFile(mainWin, filesource):
filenames = filesource.dir
if filenames is not None: # an IO or other error can return None
if filesource.isTaxonomyPackage:
dialog = DialogOpenArchive(mainWin,
ENTRY_POINTS,
filesource,
filenames,
_("Select Entry Point"),
_("File"),
showAltViewButton=True)
else:
dialog = DialogOpenArchive(mainWin,
ARCHIVE,
filesource,
filenames,
_("Select Archive File"),
_("File"))
if dialog.accepted:
return filesource.url
return None
def selectDisclosureSystem(mainWin, disclosureSystem):
dialog = DialogOpenArchive(mainWin,
DISCLOSURE_SYSTEM,
disclosureSystem,
disclosureSystem.dir,
_("Select Disclosure System"),
_("Disclosure System"))
if dialog and dialog.accepted:
return disclosureSystem.selection
return None
class DialogOpenArchive(Toplevel):
def __init__(self, mainWin, openType, filesource, filenames, title, colHeader, showAltViewButton=False):
parent = mainWin.parent
super(DialogOpenArchive, self).__init__(parent)
self.parent = parent
self.showAltViewButton = showAltViewButton
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.accepted = False
self.transient(self.parent)
frame = Frame(self)
treeFrame = Frame(frame, width=500)
vScrollbar = Scrollbar(treeFrame, orient=VERTICAL)
hScrollbar = Scrollbar(treeFrame, orient=HORIZONTAL)
self.treeView = Treeview(treeFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set)
self.treeView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.treeView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.treeView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
treeFrame.columnconfigure(0, weight=1)
treeFrame.rowconfigure(0, weight=1)
treeFrame.grid(row=0, column=0, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.treeView.focus_set()
mainWin.showStatus(_("loading archive {0}").format(filesource.url))
self.filesource = filesource
self.filenames = filenames
self.selection = filesource.selection
self.hasToolTip = False
selectedNode = None
if openType == ENTRY_POINTS:
try:
metadataFiles = filesource.taxonomyPackageMetadataFiles
''' take first for now
if len(metadataFiles) != 1:
raise IOError(_("Taxonomy package contained more than one metadata file: {0}.")
.format(', '.join(metadataFiles)))
'''
metadataFile = metadataFiles[0]
metadata = filesource.url + os.sep + metadataFile
self.metadataFilePrefix = os.sep.join(os.path.split(metadataFile)[:-1])
if self.metadataFilePrefix:
self.metadataFilePrefix += "/" # zip contents have /, never \ file seps
self.taxonomyPkgMetaInf = '{}/META-INF/'.format(
os.path.splitext(os.path.basename(filesource.url))[0])
self.taxonomyPackage = parsePackage(mainWin, filesource, metadata,
os.sep.join(os.path.split(metadata)[:-1]) + os.sep)
# may be a catalog file with no entry oint names
if not self.taxonomyPackage["nameToUrls"]:
openType = ARCHIVE # no entry points to show, just archive
self.showAltViewButton = False
except Exception as e:
self.close()
err = _("Failed to parse metadata; the underlying error was: {0}").format(e)
messagebox.showerror(_("Malformed taxonomy package"), err)
mainWin.addToLog(err)
return
mainWin.showStatus(None)
if openType == DISCLOSURE_SYSTEM:
y = 3
else:
y = 1
okButton = Button(frame, text=_("OK"), command=self.ok)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
okButton.grid(row=y, column=2, sticky=(S,E,W), pady=3)
cancelButton.grid(row=y, column=3, sticky=(S,E,W), pady=3, padx=3)
if self.showAltViewButton:
self.altViewButton = Button(frame, command=self.showAltView)
self.altViewButton.grid(row=y, column=0, sticky=(S,W), pady=3, padx=3)
self.loadTreeView(openType, colHeader, title)
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.toolTipText = StringVar()
if self.hasToolTip:
self.treeView.bind("<Motion>", self.motion, '+')
self.treeView.bind("<Leave>", self.leave, '+')
self.toolTipText = StringVar()
self.toolTip = ToolTip(self.treeView,
textvariable=self.toolTipText,
wraplength=640,
follow_mouse=True,
state="disabled")
self.toolTipRowId = None
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeView(self, openType, title, colHeader):
self.title(title)
self.openType = openType
selectedNode = None
# clear previous treeview entries
for previousNode in self.treeView.get_children(""):
self.treeView.delete(previousNode)
# set up treeView widget and tabbed pane
if openType in (ARCHIVE, DISCLOSURE_SYSTEM):
self.treeView.column("#0", width=500, anchor="w")
self.treeView.heading("#0", text=colHeader)
try:
self.isRss = self.filesource.isRss
if self.isRss:
self.treeView.column("#0", width=350, anchor="w")
self.treeView["columns"] = ("descr", "date", "instDoc")
self.treeView.column("descr", width=50, anchor="center", stretch=False)
self.treeView.heading("descr", text="Form")
self.treeView.column("date", width=170, anchor="w", stretch=False)
self.treeView.heading("date", text="Pub Date")
self.treeView.column("instDoc", width=200, anchor="w", stretch=False)
self.treeView.heading("instDoc", text="Instance Document")
except AttributeError:
self.isRss = False
self.treeView["columns"] = tuple()
loadedPaths = []
for i, filename in enumerate(self.filenames):
if isinstance(filename,tuple):
if self.isRss:
form, date, instDoc = filename[2:5]
filename = filename[0] # ignore tooltip
self.hasToolTip = True
if filename.endswith("/"):
filename = filename[:-1]
path = filename.split("/")
if not self.isRss and len(path) > 1 and path[:-1] in loadedPaths:
parent = "file{0}".format(loadedPaths.index(path[:-1]))
else:
parent = ""
node = self.treeView.insert(parent, "end", "file{0}".format(i), text=path[-1])
if self.isRss:
self.treeView.set(node, "descr", form)
self.treeView.set(node, "date", date)
self.treeView.set(node, "instDoc", os.path.basename(instDoc))
if self.selection == filename:
selectedNode = node
loadedPaths.append(path)
elif openType == ENTRY_POINTS:
self.treeView.column("#0", width=150, anchor="w")
self.treeView.heading("#0", text="Name")
self.treeView["columns"] = ("url",)
self.treeView.column("url", width=350, anchor="w")
self.treeView.heading("url", text="URL")
for name, urls in self.taxonomyPackage["nameToUrls"].items():
displayUrl = urls[1] # display the canonical URL
self.treeView.insert("", "end", name, values=[displayUrl], text=name)
self.hasToolTip = True
else: # unknown openType
return None
if selectedNode:
self.treeView.see(selectedNode)
self.treeView.selection_set(selectedNode)
if self.showAltViewButton:
self.altViewButton.config(text=_("Show Files") if openType == ENTRY_POINTS else _("Show Entries"))
def ok(self, event=None):
selection = self.treeView.selection()
if len(selection) > 0:
if hasattr(self, "taxonomyPackage"):
# load file source remappings
self.filesource.mappedPaths = self.taxonomyPackage["remappings"]
filename = None
if self.openType in (ARCHIVE, DISCLOSURE_SYSTEM):
filename = self.filenames[int(selection[0][4:])]
if isinstance(filename,tuple):
if self.isRss:
filename = filename[4]
else:
filename = filename[0]
elif self.openType == ENTRY_POINTS:
epName = selection[0]
#index 0 is the remapped Url, as opposed to the canonical one used for display
# Greg Acsone reports [0] does not work for Corep 1.6 pkgs, need [1], old style packages
filename = self.taxonomyPackage["nameToUrls"][epName][0]
if not filename.endswith("/"):
# check if it's an absolute URL rather than a path into the archive
if not isHttpUrl(filename) and self.metadataFilePrefix != self.taxonomyPkgMetaInf:
# assume it's a path inside the archive:
filename = self.metadataFilePrefix + filename
if filename is not None and not filename.endswith("/"):
if hasattr(self, "taxonomyPackage"):
# attempt to unmap the filename to original file
# will be mapped again in loading, but this allows schemaLocation to be unmapped
for prefix, remapping in self.taxonomyPackage["remappings"].items():
if isHttpUrl(remapping):
remapStart = remapping
else:
remapStart = self.metadataFilePrefix + remapping
if filename.startswith(remapStart):
# set unmmapped file
filename = prefix + filename[len(remapStart):]
break
self.filesource.select(filename)
self.accepted = True
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def showAltView(self, event=None):
if self.openType == ENTRY_POINTS:
self.loadTreeView(ARCHIVE, _("Select Entry Point"), _("File"))
else:
self.loadTreeView(ENTRY_POINTS, _("Select Archive File"), _("File"))
def leave(self, *args):
self.toolTipRowId = None
def motion(self, *args):
tvRowId = self.treeView.identify_row(args[0].y)
if tvRowId != self.toolTipRowId:
text = None
if self.openType in (ARCHIVE, DISCLOSURE_SYSTEM):
self.toolTipRowId = tvRowId
if tvRowId and len(tvRowId) > 4:
try:
text = self.filenames[ int(tvRowId[4:]) ]
if isinstance(text, tuple):
text = text[1].replace("\\n","\n")
except (KeyError, ValueError):
pass
elif self.openType == ENTRY_POINTS:
try:
epUrl = self.taxonomyPackage["nameToUrls"][tvRowId][1]
text = "{0}\n{1}".format(tvRowId, epUrl)
except KeyError:
pass
self.setToolTip(text)
def setToolTip(self, text):
self.toolTip._hide()
if text:
self.toolTipText.set(text)
self.toolTip.configure(state="normal")
self.toolTip._schedule()
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
| 43.446429
| 110
| 0.534388
|
d4fb15afd9f2101ee993194855c7399b2ce1e7a9
| 1,477
|
py
|
Python
|
setup.py
|
webji/pyops
|
df3403c8900ea76bf62d68610ae9d024fa046fc3
|
[
"Apache-2.0"
] | 1
|
2018-01-11T06:27:19.000Z
|
2018-01-11T06:27:19.000Z
|
setup.py
|
webji/pyops
|
df3403c8900ea76bf62d68610ae9d024fa046fc3
|
[
"Apache-2.0"
] | 3
|
2018-01-10T05:51:18.000Z
|
2018-01-11T06:34:37.000Z
|
setup.py
|
webji/pyops
|
df3403c8900ea76bf62d68610ae9d024fa046fc3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: Will<willji@outlook.com>
# Created on 2018-01-10 12:45:12
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf8') as f:
long_description = f.read()
import pyops
install_requires = [
'click>=3.3',
'unittest2>=0.5.1'
]
setup(
name='pyops',
version=pyops.__version__,
description='IT maintenance and operation libs in Python.',
long_description=long_description,
url='https://github.com/webji/pyops',
author='Will Ji',
author_email='willji@outlook.com',
license='Apache License, Version 2.0',
classifiers=[
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='maintencance operations it',
packages=find_packages(exclude=['data', 'tests*']),
install_requires=install_requires,
extras_require={},
package_data={
'pyops': [
'logging.conf',
],
},
entry_points={
'console_scripts': [
'pyops=pyops.run:main'
]
},
test_suite='tests.all_suite',
)
| 20.802817
| 71
| 0.628978
|
372c8aca962b9fab23f7a5c1aea1a66332581d24
| 3,340
|
py
|
Python
|
lib/surface/compute/routers/add_bgp_peer.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/routers/add_bgp_peer.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/compute/routers/add_bgp_peer.py
|
bopopescu/SDK
|
e6d9aaee2456f706d1d86e8ec2a41d146e33550d
|
[
"Apache-2.0"
] | 2
|
2020-11-04T03:08:21.000Z
|
2020-11-05T08:14:41.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding a BGP peer to a router."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.third_party.py27 import py27_copy as copy
class AddBgpPeer(base_classes.ReadWriteCommand):
"""Add a BGP peer to a router."""
@staticmethod
def Args(parser):
parser.add_argument(
'--peer-name',
required=True,
help='The name of the peer being added.')
parser.add_argument(
'--interface',
required=True,
help='The interface of the peer being added.')
parser.add_argument(
'--peer-asn',
required=True,
type=int,
help='The asn of the peer being added.')
parser.add_argument(
'--peer-ip-address',
help='The link local address of the peer.')
parser.add_argument(
# TODO(user): document the default priority if any
'--advertised-route-priority',
type=int,
help='The priority of routes advertised to this BGP peer. In the case '
'where there is more than one matching route of maximum length, '
'the routes with lowest priority value win. 0 <= priority <= '
'65535.')
flags.AddRegionFlag(
parser,
resource_type='router',
operation_type='update')
parser.add_argument(
'name',
help='The name of the router.')
@property
def service(self):
return self.compute.routers
@property
def resource_type(self):
return 'routers'
def CreateReference(self, args):
return self.CreateRegionalReference(args.name, args.region)
def GetGetRequest(self, args):
return (self.service,
'Get',
self.messages.ComputeRoutersGetRequest(
router=self.ref.Name(),
region=self.ref.region,
project=self.project))
def GetSetRequest(self, args, replacement, existing):
return (self.service,
'Update',
self.messages.ComputeRoutersUpdateRequest(
router=self.ref.Name(),
routerResource=replacement,
region=self.ref.region,
project=self.project))
def Modify(self, args, existing):
replacement = copy.deepcopy(existing)
peer = self.messages.RouterBgpPeer(
name=args.peer_name,
interfaceName=args.interface,
peerIpAddress=args.peer_ip_address,
peerAsn=args.peer_asn,
advertisedRoutePriority=args.advertised_route_priority)
replacement.bgpPeers.append(peer)
return replacement
AddBgpPeer.detailed_help = {
'DESCRIPTION': """
*{command}* is used to add a BGP peer to a router.
""",
}
| 29.557522
| 79
| 0.653293
|
cb9ed7cb6cf673b479d2a7c9960e04b277d9123f
| 11,350
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/express_route_port.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/express_route_port.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/express_route_port.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ExpressRoutePort']
class ExpressRoutePort(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bandwidth_in_gbps: Optional[pulumi.Input[int]] = None,
encapsulation: Optional[pulumi.Input[Union[str, 'ExpressRoutePortsEncapsulation']]] = None,
express_route_port_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
ExpressRoutePort resource definition.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] bandwidth_in_gbps: Bandwidth of procured ports in Gbps.
:param pulumi.Input[Union[str, 'ExpressRoutePortsEncapsulation']] encapsulation: Encapsulation method on physical ports.
:param pulumi.Input[str] express_route_port_name: The name of the ExpressRoutePort resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The identity of ExpressRoutePort, if configured.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteLinkArgs']]]] links: The set of physical links of the ExpressRoutePort resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] peering_location: The name of the peering location that the ExpressRoutePort is mapped to physically.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['bandwidth_in_gbps'] = bandwidth_in_gbps
__props__['encapsulation'] = encapsulation
__props__['express_route_port_name'] = express_route_port_name
__props__['id'] = id
__props__['identity'] = identity
__props__['links'] = links
__props__['location'] = location
__props__['peering_location'] = peering_location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['allocation_date'] = None
__props__['circuits'] = None
__props__['etag'] = None
__props__['ether_type'] = None
__props__['mtu'] = None
__props__['name'] = None
__props__['provisioned_bandwidth_in_gbps'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRoutePort"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRoutePort")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRoutePort, __self__).__init__(
'azure-nextgen:network:ExpressRoutePort',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRoutePort':
"""
Get an existing ExpressRoutePort resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRoutePort(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocationDate")
def allocation_date(self) -> pulumi.Output[str]:
"""
Date of the physical port allocation to be used in Letter of Authorization.
"""
return pulumi.get(self, "allocation_date")
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Output[Optional[int]]:
"""
Bandwidth of procured ports in Gbps.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@property
@pulumi.getter
def circuits(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
Reference the ExpressRoute circuit(s) that are provisioned on this ExpressRoutePort resource.
"""
return pulumi.get(self, "circuits")
@property
@pulumi.getter
def encapsulation(self) -> pulumi.Output[Optional[str]]:
"""
Encapsulation method on physical ports.
"""
return pulumi.get(self, "encapsulation")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="etherType")
def ether_type(self) -> pulumi.Output[str]:
"""
Ether type of the physical port.
"""
return pulumi.get(self, "ether_type")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The identity of ExpressRoutePort, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def links(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteLinkResponse']]]:
"""
The set of physical links of the ExpressRoutePort resource.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def mtu(self) -> pulumi.Output[str]:
"""
Maximum transmission unit of the physical port pair(s).
"""
return pulumi.get(self, "mtu")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Output[Optional[str]]:
"""
The name of the peering location that the ExpressRoutePort is mapped to physically.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="provisionedBandwidthInGbps")
def provisioned_bandwidth_in_gbps(self) -> pulumi.Output[float]:
"""
Aggregate Gbps of associated circuit bandwidths.
"""
return pulumi.get(self, "provisioned_bandwidth_in_gbps")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route port resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the express route port resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.992248
| 1,418
| 0.660088
|
a53bc4555ea6052f05f65f5bde9d03cb452d7896
| 426
|
py
|
Python
|
Parte 2/classificando-atletas.py
|
ohniluap/python-exercises
|
7a7af8eced2811791bc4dfebcfab84100b46224c
|
[
"MIT"
] | null | null | null |
Parte 2/classificando-atletas.py
|
ohniluap/python-exercises
|
7a7af8eced2811791bc4dfebcfab84100b46224c
|
[
"MIT"
] | null | null | null |
Parte 2/classificando-atletas.py
|
ohniluap/python-exercises
|
7a7af8eced2811791bc4dfebcfab84100b46224c
|
[
"MIT"
] | null | null | null |
from datetime import date
atual = date.today().year
nascimento = int(input('Ano de nascimento: '))
idade = atual - nascimento
print('O atleta tem {} anos.'.format(idade))
if idade <= 9:
print('CLASSIFICAÇÃO: MIRIM')
elif idade <= 14:
print('CLASSIFICAÇÃO: INFANTIL')
elif idade <= 19:
print('CLASSIFICAÇÃO: JUNIOR')
elif idade <= 25:
print('CLASSIFICAÇÃO: SÊNIOR')
else:
print('CLASSIFICAÇÃO: MASTER')
| 26.625
| 46
| 0.678404
|
27264a8c6440aa048dea24d80482f2feb070a924
| 132,608
|
py
|
Python
|
neutron/tests/unit/agent/l3/test_agent.py
|
prap4source/Neutron
|
bd7c3a7708de2aaeaa374a9ee563da1e1a0955c8
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/l3/test_agent.py
|
prap4source/Neutron
|
bd7c3a7708de2aaeaa374a9ee563da1e1a0955c8
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/l3/test_agent.py
|
prap4source/Neutron
|
bd7c3a7708de2aaeaa374a9ee563da1e1a0955c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from itertools import chain as iter_chain
from itertools import combinations as iter_combinations
import eventlet
import mock
import netaddr
from oslo_log import log
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import ha
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3router
from neutron.agent.l3 import router_processing_queue
from neutron.agent.linux import dibbler
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class BasicRouterOperationsFramework(base.BaseTestCase):
def setUp(self):
super(BasicRouterOperationsFramework, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_availability_zone_opts_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.register_opts(external_process.OPTS)
self.conf.register_opts(pd.OPTS)
self.conf.register_opts(ra.OPTS)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '/tmp')
self.conf.set_override('ra_confs', '/tmp')
self.conf.set_override('pd_dhcp_driver', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.ensure_dir = mock.patch('neutron.common.utils.ensure_dir').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.common.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'network_id': _uuid(),
'device_owner':
l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'network_id': _uuid(),
'device_owner':
l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _process_router_instance_for_agent(self, agent, ri, router):
ri.router = router
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
ri.process(agent)
class TestBasicRouterOperations(BasicRouterOperationsFramework):
def test_init_ha_conf(self):
with mock.patch('os.path.dirname', return_value='/etc/ha/'):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ensure_dir.assert_called_once_with('/etc/ha/')
def test_enqueue_state_change_router_not_found(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
non_existent_router = 42
# Make sure the exceptional code path has coverage
agent.enqueue_state_change(non_existent_router, 'master')
def test_enqueue_state_change_metadata_disable(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
agent._update_metadata_proxy = mock.Mock()
agent.enqueue_state_change(router.id, 'master')
self.assertFalse(agent._update_metadata_proxy.call_count)
def test_check_ha_state_for_router_master_standby(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router.id = '1234'
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
router_info.ha_state = 'master'
with mock.patch.object(agent.state_change_notifier,
'queue_event') as queue_event:
agent.check_ha_state_for_router(
router.id, l3_constants.HA_ROUTER_STATE_STANDBY)
queue_event.assert_called_once_with((router.id, 'master'))
def test_check_ha_state_for_router_standby_standby(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router.id = '1234'
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
router_info.ha_state = 'backup'
with mock.patch.object(agent.state_change_notifier,
'queue_event') as queue_event:
agent.check_ha_state_for_router(
router.id, l3_constants.HA_ROUTER_STATE_STANDBY)
queue_event.assert_not_called()
def test_periodic_sync_routers_task_call_check_ha_state_for_router(self):
agent = l3_agent.L3NATAgentWithStateReport(HOSTNAME, self.conf)
ha_id = _uuid()
active_routers = [
{'id': ha_id,
l3_constants.HA_ROUTER_STATE_KEY:
l3_constants.HA_ROUTER_STATE_STANDBY,
'ha': True},
{'id': _uuid()}]
self.plugin_api.get_router_ids.return_value = [r['id'] for r
in active_routers]
self.plugin_api.get_routers.return_value = active_routers
with mock.patch.object(agent, 'check_ha_state_for_router') as check:
agent.periodic_sync_routers_task(agent.context)
check.assert_called_once_with(ha_id,
l3_constants.HA_ROUTER_STATE_STANDBY)
def test_periodic_sync_routers_task_raise_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_router_ids.return_value = ['fake_id']
self.plugin_api.get_routers.side_effect = ValueError
self.assertRaises(ValueError,
agent.periodic_sync_routers_task,
agent.context)
self.assertTrue(agent.fullsync)
def test_l3_initial_report_state_done(self):
with mock.patch.object(l3_agent.L3NATAgentWithStateReport,
'periodic_sync_routers_task'),\
mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state,\
mock.patch.object(eventlet, 'spawn_n'):
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
self.assertTrue(agent.agent_state['start_flag'])
agent.after_start()
report_state.assert_called_once_with(agent.context,
agent.agent_state,
True)
self.assertIsNone(agent.agent_state.get('start_flag'))
def test_report_state_revival_logic(self):
with mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state:
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
report_state.return_value = l3_constants.AGENT_REVIVED
agent._report_state()
self.assertTrue(agent.fullsync)
agent.fullsync = False
report_state.return_value = l3_constants.AGENT_ALIVE
agent._report_state()
self.assertFalse(agent.fullsync)
def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.return_value = []
agent.periodic_sync_routers_task(agent.context)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_router_ids = [_uuid(), _uuid()]
active_routers = [{'id': _uuid()}, {'id': _uuid()}]
self.plugin_api.get_router_ids.return_value = [r['id'] for r
in active_routers]
self.plugin_api.get_routers.return_value = active_routers
namespace_list = [namespaces.NS_PREFIX + r_id
for r_id in stale_router_ids]
namespace_list += [namespaces.NS_PREFIX + r['id']
for r in active_routers]
self.mock_ip.get_namespaces.return_value = namespace_list
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
agent.periodic_sync_routers_task(agent.context)
expected_calls = [mock.call(mock.ANY, r_id, agent.conf)
for r_id in stale_router_ids]
self.assertEqual(len(stale_router_ids), destroy_proxy.call_count)
destroy_proxy.assert_has_calls(expected_calls, any_order=True)
def test_router_info_create(self):
id = _uuid()
ri = l3router.RouterInfo(id, {}, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
ns_id = _uuid()
subnet_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}]}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ri = l3router.RouterInfo(ns_id, router, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(ns_id))
self.assertEqual(router, ri.router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router_id = router['id']
ri = l3router.RouterInfo(router_id, router, **self.ri_kwargs)
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '99.0.1.9',
'prefixlen': 24}]}
interface_name = ri.get_internal_device_name(port['id'])
if action == 'add':
self.device_exists.return_value = False
ri.internal_network_added(port)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
self.send_adv_notif.assert_called_once_with(ri.ns_name,
interface_name,
'99.0.1.9', mock.ANY)
elif action == 'remove':
self.device_exists.return_value = True
ri.internal_network_removed(port)
self.assertEqual(1, self.mock_driver.unplug.call_count)
else:
raise Exception("Invalid action %s" % action)
@staticmethod
def _fixed_ip_cidr(fixed_ip):
return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen'])
def _test_internal_network_action_dist(self, action):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router_id = router['id']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router_id, router, **self.ri_kwargs)
subnet_id = _uuid()
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '99.0.1.9',
'prefixlen': 24}],
'subnets': [{'id': subnet_id}]}
ri.router['gw_port_host'] = HOSTNAME
agent.host = HOSTNAME
agent.conf.agent_mode = 'dvr_snat'
sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
portbindings.HOST_ID: HOSTNAME,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
ri.snat_ports = sn_port
ri.ex_gw_port = ex_gw_port
ri.snat_namespace = mock.Mock()
if action == 'add':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_add = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri._internal_network_added = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri.internal_network_added(port)
self.assertEqual(1, ri._snat_redirect_add.call_count)
self.assertEqual(2, ri._internal_network_added.call_count)
ri._set_subnet_arp_info.assert_called_once_with(subnet_id)
ri._internal_network_added.assert_called_with(
dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']),
sn_port['network_id'],
sn_port['id'],
sn_port['fixed_ips'],
sn_port['mac_address'],
ri._get_snat_int_device_name(sn_port['id']),
dvr_snat_ns.SNAT_INT_DEV_PREFIX,
mtu=None)
elif action == 'remove':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._delete_arp_cache_for_internal_port = mock.Mock()
ri._snat_redirect_modify = mock.Mock()
ri.internal_network_removed(port)
self.assertEqual(
1, ri._delete_arp_cache_for_internal_port.call_count)
ri._snat_redirect_modify.assert_called_with(
sn_port, port,
ri.get_internal_device_name(port['id']),
is_add=False)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_add_internal_network_dist(self):
self._test_internal_network_action_dist('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def test_agent_remove_internal_network_dist(self):
self._test_internal_network_action_dist('remove')
def _add_external_gateway(self, ri, router, ex_gw_port, interface_name,
use_fake_fip=False,
no_subnet=False, no_sub_gw=None,
dual_stack=False):
self.device_exists.return_value = False
if no_sub_gw is None:
no_sub_gw = []
if use_fake_fip:
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_added(ex_gw_port, interface_name)
if not router.get('distributed'):
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
if no_subnet and not dual_stack:
self.assertEqual(0, self.send_adv_notif.call_count)
ip_cidrs = []
kwargs = {'preserve_ips': [],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [],
'clean_connections': True}
else:
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY)]
if dual_stack and not no_sub_gw:
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2',
mock.ANY)]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
if dual_stack:
if not no_sub_gw:
ip_cidrs.append('2001:192:168:100::2/64')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(
interface_name, ip_cidrs, **kwargs)
else:
ri._create_dvr_gateway.assert_called_once_with(
ex_gw_port, interface_name)
def _test_external_gateway_action(self, action, router, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ex_net_id = _uuid()
sn_port = self.snat_ports[1]
# Special setup for dvr routers
if router.get('distributed'):
agent.conf.agent_mode = 'dvr_snat'
agent.host = HOSTNAME
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri._create_dvr_gateway = mock.Mock()
ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
ri.snat_ports = self.snat_ports
ri._create_snat_namespace()
ri.fip_ns = agent.get_fip_ns(ex_net_id)
ri.internal_ports = self.snat_ports
else:
ri = l3router.RouterInfo(
router['id'], router,
**self.ri_kwargs)
ri.use_ipv6 = False
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
ri.use_ipv6 = True
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': ex_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port_no_sub = {'fixed_ips': [],
'id': _uuid(),
'network_id': ex_net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self._add_external_gateway(ri, router, ex_gw_port, interface_name,
use_fake_fip=True,
dual_stack=dual_stack)
elif action == 'add_no_sub':
ri.use_ipv6 = True
self._add_external_gateway(ri, router, ex_gw_port_no_sub,
interface_name,
no_subnet=True)
elif action == 'add_no_sub_v6_gw':
ri.use_ipv6 = True
self.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
if dual_stack:
use_fake_fip = True
# Remove v6 entries
del ex_gw_port['fixed_ips'][-1]
del ex_gw_port['subnets'][-1]
else:
use_fake_fip = False
ex_gw_port = ex_gw_port_no_sub
self._add_external_gateway(ri, router, ex_gw_port,
interface_name, no_subnet=True,
no_sub_gw='fe80::f816:3eff:fe2e:1',
use_fake_fip=use_fake_fip,
dual_stack=dual_stack)
elif action == 'remove':
self.device_exists.return_value = True
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_remove = mock.Mock()
ri.router['gw_port'] = ""
ri.external_gateway_removed(ex_gw_port, interface_name)
if not router.get('distributed'):
self.mock_driver.unplug.assert_called_once_with(
interface_name,
bridge=agent.conf.external_network_bridge,
namespace=mock.ANY,
prefix=mock.ANY)
else:
ri._snat_redirect_remove.assert_called_with(
sn_port, sn_port,
ri.get_internal_device_name(sn_port['id']))
ri.get_snat_port_for_internal_port.assert_called_with(
mock.ANY, ri.snat_ports)
else:
raise Exception("Invalid action %s" % action)
def _test_external_gateway_updated(self, dual_stack=False):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.use_ipv6 = False
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri, dual_stack=dual_stack)
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_updated(ex_gw_port, interface_name)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY)]
if dual_stack:
ri.use_ipv6 = True
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2', mock.ANY)]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
gateway_ips = ['20.0.0.1']
if dual_stack:
ip_cidrs.append('2001:192:168:100::2/64')
gateway_ips.append('2001:192:168:100::1')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(interface_name,
ip_cidrs,
**kwargs)
def test_external_gateway_updated(self):
self._test_external_gateway_updated()
def test_external_gateway_updated_dual_stack(self):
self._test_external_gateway_updated(dual_stack=True)
def test_dvr_edge_router_init_for_snat_namespace_object(self):
router = {'id': _uuid()}
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
# Make sure that ri.snat_namespace object is created when the
# router is initialized
self.assertIsNotNone(ri.snat_namespace)
def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none(
self):
"""Test to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object value on external_gateway_removed for deleting
snat_namespace when the gw_port_host mismatches or none.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
with mock.patch.object(dvr_snat_ns.SnatNamespace,
'delete') as snat_ns_delete:
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri)
router['gw_port_host'] = ''
ri._snat_redirect_remove = mock.Mock()
ri.external_gateway_updated(ex_gw_port, interface_name)
if router['gw_port_host'] != ri.host:
self.assertFalse(ri._snat_redirect_remove.called)
self.assertEqual(1, snat_ns_delete.call_count)
@mock.patch.object(namespaces.Namespace, 'delete')
def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist(
self, mock_ns_del):
"""Test to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object initialization without the actual creation
of snat_namespace. When deletes are issued to the snat
namespace based on the snat namespace object existence, it
should be checking for the valid namespace existence before
it tries to delete.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
# Make sure we set a return value to emulate the non existence
# of the namespace.
self.mock_ip.netns.exists.return_value = False
self.assertIsNotNone(ri.snat_namespace)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_removed = mock.Mock()
ri.external_gateway_removed(ex_gw_port, interface_name)
self.assertFalse(mock_ns_del.called)
def _test_ext_gw_updated_dvr_edge_router(self, host_match,
snat_hosted_before=True):
"""
Helper to test external gw update for edge router on dvr_snat agent
:param host_match: True if new gw host should be the same as agent host
:param snat_hosted_before: True if agent has already been hosting
snat for the router
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = dvr_router.DvrEdgeRouter(mock.Mock(),
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
if snat_hosted_before:
ri._create_snat_namespace()
snat_ns_name = ri.snat_namespace.name
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo')
ri.external_gateway_updated(ex_gw_port, interface_name)
if not host_match:
self.assertFalse(ri._external_gateway_added.called)
if snat_hosted_before:
# host mismatch means that snat was rescheduled to another
# agent, hence need to verify that gw port was unplugged and
# snat namespace was deleted
self.mock_driver.unplug.assert_called_with(
interface_name,
bridge=self.conf.external_network_bridge,
namespace=snat_ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
else:
if not snat_hosted_before:
self.assertIsNotNone(ri.snat_namespace)
self.assertTrue(ri._external_gateway_added.called)
def test_ext_gw_updated_dvr_edge_router(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True)
def test_ext_gw_updated_dvr_edge_router_host_mismatch(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=False)
def test_ext_gw_updated_dvr_dvr_edge_router_snat_rescheduled(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True,
snat_hosted_before=False)
def test_agent_add_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_no_subnet(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub', router)
def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw', router)
def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw',
router, dual_stack=True)
def test_agent_remove_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router, dual_stack=True)
def test_agent_remove_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router, dual_stack=True)
def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router,
negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for iface in interfaces:
for subnet in iface['subnets']:
prefix = subnet['cidr'].split('/')[1]
source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name),
'-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip),
'-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' %
(l3_constants.ROUTER_MARK_MASK, source_nat_ip)]
for r in nat_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
expected_rules = [
'-i %s -j MARK --set-xmark 0x2/%s' %
(interface_name, l3_constants.ROUTER_MARK_MASK),
'-o %s -m connmark --mark 0x0/%s -j CONNMARK '
'--save-mark --nfmask %s --ctmask %s' %
(interface_name,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK)]
for r in mangle_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def test_get_snat_port_for_internal_port(self):
router = l3_test_common.prepare_router_data(num_internal_ports=4)
ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
test_port = {
'mac_address': '00:12:23:34:45:56',
'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0]),
'ip_address': '101.12.13.14'}]}
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
# test valid case
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = [test_port]
res_port = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertEqual(test_port, res_port)
# test invalid case
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertNotEqual(test_port, res_ip)
self.assertIsNone(res_ip)
def test_process_cent_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
self._test_process_router(ri, agent)
def test_process_dist_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
subnet_id = l3_test_common.get_subnet_id(
router[l3_constants.INTERFACE_KEY][0])
ri.router['distributed'] = True
ri.router['_snat_router_interfaces'] = [{
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '1.2.3.4'}]}]
ri.router['gw_port_host'] = None
self._test_process_router(ri, agent)
def _test_process_router(self, ri, agent):
router = ri.router
agent.host = HOSTNAME
fake_fip_id = 'fake_fip_id'
ri.create_dvr_fip_interfaces = mock.Mock()
ri.process_floating_ip_addresses = mock.Mock()
ri.process_floating_ip_nat_rules = mock.Mock()
ri.process_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
ri.external_gateway_added = mock.Mock()
ri.external_gateway_updated = mock.Mock()
ri.process_address_scope = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid(),
'host': HOSTNAME}]}
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
ri.external_gateway_added.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(0, ri.external_gateway_updated.call_count)
ri.external_gateway_added.reset_mock()
ri.external_gateway_updated.reset_mock()
# change the ex_gw_port a bit to test gateway update
new_gw_port = copy.deepcopy(ri.router['gw_port'])
ri.router['gw_port'] = new_gw_port
old_ip = (netaddr.IPAddress(ri.router['gw_port']
['fixed_ips'][0]['ip_address']))
ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
ri.process(agent)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(1, ri.external_gateway_updated.call_count)
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
ri.process(agent)
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
ri.process(agent)
self.assertEqual(1, self.send_adv_notif.call_count)
distributed = ri.router.get('distributed', False)
self.assertEqual(distributed, ri.process_floating_ip_addresses.called)
self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_process_floating_ip_addresses_add(self, ri, agent, IPDevice):
floating_ips = ri.get_floating_ips()
fip_id = floating_ips[0]['id']
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id}
ri.add_floating_ip = mock.Mock(
return_value=l3_constants.FLOATINGIP_STATUS_ACTIVE)
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
if ri.router['distributed']:
ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
ri.create_dvr_fip_interfaces(ex_gw_port)
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
ri.add_floating_ip.assert_called_once_with(
floating_ips[0], mock.sentinel.interface_name, device)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_if_fipnamespace_exist(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
agent.process_router_add = mock.Mock()
ri.fip_ns.create_rtr_2_fip_link = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri.fip_ns,
'create') as create_fip, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertTrue(create_fip.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
# Now let us associate the fip to the router
ri.floating_ip_added_dist(fips, "192.168.0.1/32")
self.assertEqual(1, ri.dist_fip_count)
# Now let us disassociate the fip from the router
ri.floating_ip_removed_dist("192.168.0.1/32")
self.assertEqual(0, ri.dist_fip_count)
# Calling create_dvr_fip_interfaces again to make sure
# that the fip namespace create is not called again.
# If the create is not called again, that would contain
# the duplicate rules configuration in the fip namespace.
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
create_fip.assert_called_once_with()
self.assertEqual(2, agent.process_router_add.call_count)
self.assertEqual(2, ri.fip_ns.create_rtr_2_fip_link.call_count)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write):
fake_network_id = _uuid()
fake_subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': fake_subnet_id}],
'subnets': [
{'id': fake_subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
ri.fip_ns.subscribe = mock.Mock()
with mock.patch.object(agent.plugin_rpc,
'get_agent_gateway_port') as fip_gw_port:
fip_gw_port.return_value = agent_gateway_port
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertEqual(agent_gateway_port,
ri.fip_ns.agent_gateway_port)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.dist_fip_count = 0
ri.fip_ns.subscribe = mock.Mock()
ri.fip_ns.agent_router_gateway = mock.Mock()
agent.process_router_add = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_subnet)
self.assertEqual(1, agent.process_router_add.call_count)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self,
lla_write):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': 'subnet_id'}],
'subnets': [
{'id': 'subnet_id',
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': 'fake_network_id',
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.fip_ns.subscribe = mock.Mock(return_value=True)
ri.fip_ns.agent_router_gateway = mock.Mock()
ri.rtr_fip_subnet = None
ri.dist_fip_count = 0
with mock.patch.object(ri, 'get_floating_ips') as fips,\
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_fip_interfaces(ext_gw_port)
self.assertTrue(fip_gw_port.called)
self.assertTrue(fips.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_subnet)
def test_process_router_cent_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'status': 'DOWN',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.get_external_device_name = mock.Mock(return_value='exgw')
self._test_process_floating_ip_addresses_add(ri, agent)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=True)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in orig_mangle_rules
if r not in ri.iptables_manager.ipv4['mangle'].rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=False)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process without NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in ri.iptables_manager.ipv4['mangle'].rules
if r not in orig_mangle_rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def _test_update_routing_table(self, is_snat_host=True):
router = l3_test_common.prepare_router_data()
uuid = router['id']
s_netns = 'snat-' + uuid
q_netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '19.4.4.200'}
calls = [mock.call('replace', fake_route1, q_netns)]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent,
HOSTNAME,
uuid,
router,
**self.ri_kwargs)
ri._update_routing_table = mock.Mock()
with mock.patch.object(ri, '_is_this_snat_host') as snat_host:
snat_host.return_value = is_snat_host
ri.update_routing_table('replace', fake_route1)
if is_snat_host:
ri._update_routing_table('replace', fake_route1, s_netns)
calls += [mock.call('replace', fake_route1, s_netns)]
ri._update_routing_table.assert_has_calls(calls, any_order=True)
def test_process_update_snat_routing_table(self):
self._test_update_routing_table()
def test_process_not_update_snat_routing_table(self):
self._test_update_routing_table(is_snat_host=False)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an interface and reprocess
l3_test_common.router_append_interface(router)
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(ip_version=6,
dual_stack=dual_stack)
# Get NAT rules without the gw_port
gw_port = router['gw_port']
router['gw_port'] = None
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Get NAT rules with the gw_port
router['gw_port'] = gw_port
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
p = ri.external_gateway_nat_fip_rules
s = ri.external_gateway_nat_snat_rules
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['external_gateway_nat_fip_rules',
'external_gateway_nat_snat_rules']]
)
with mock.patch.multiple(ri, **attrs_to_mock) as mocks:
mocks['external_gateway_nat_fip_rules'].side_effect = p
mocks['external_gateway_nat_snat_rules'].side_effect = s
self._process_router_instance_for_agent(agent, ri, router)
new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# NAT rules should only change for dual_stack operation
if dual_stack:
self.assertTrue(
mocks['external_gateway_nat_fip_rules'].called)
self.assertTrue(
mocks['external_gateway_nat_snat_rules'].called)
self.assertNotEqual(orig_nat_rules, new_nat_rules)
else:
self.assertFalse(
mocks['external_gateway_nat_fip_rules'].called)
self.assertFalse(
mocks['external_gateway_nat_snat_rules'].called)
self.assertEqual(orig_nat_rules, new_nat_rules)
def test_process_ipv6_only_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw()
def test_process_dual_stack_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True)
def _process_router_ipv6_interface_added(
self, router, ra_mode=None, addr_mode=None):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1,
ip_version=6, ra_mode=ra_mode,
addr_mode=addr_mode)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
# IPv4 NAT rules should not be changed by adding an IPv6 interface
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertFalse(nat_rules_delta)
return ri
def _radvd_expected_call_external_process(self, ri, enable=True):
expected_calls = [mock.call(uuid=ri.router['id'],
service='radvd',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
run_as_root=True)]
if enable:
expected_calls.append(mock.call().enable(reload_cfg=True))
else:
expected_calls.append(mock.call().disable())
return expected_calls
def _process_router_ipv6_subnet_added(self, router,
ipv6_subnet_modes=None, dns_nameservers=None, network_mtu=0):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with len(ipv6_subnet_modes) subnets
# and reprocess
l3_test_common.router_append_subnet(
router,
count=len(ipv6_subnet_modes),
ip_version=6,
ipv6_subnet_modes=ipv6_subnet_modes,
dns_nameservers=dns_nameservers,
network_mtu=network_mtu)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
return ri
def _assert_ri_process_enabled(self, ri):
"""Verify that process was enabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def _assert_ri_process_disabled(self, ri):
"""Verify that process was disabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri, False)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
self._assert_ri_process_enabled(ri)
# Expect radvd configured without prefix
self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1])
def test_process_router_ipv6_slaac_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=l3_constants.IPV6_SLAAC)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous on', radvd_config_str)
def test_process_router_ipv6_dhcpv6_stateful_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=l3_constants.DHCPV6_STATEFUL)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous off', radvd_config_str)
def test_process_router_ipv6_subnets_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[
{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC},
{'ra_mode': l3_constants.DHCPV6_STATELESS,
'address_mode': l3_constants.DHCPV6_STATELESS},
{'ra_mode': l3_constants.DHCPV6_STATEFUL,
'address_mode': l3_constants.DHCPV6_STATEFUL}])
self._assert_ri_process_enabled(ri)
radvd_config_str = self.utils_replace_file.call_args[0][1]
# Assert we have a prefix from IPV6_SLAAC and a prefix from
# DHCPV6_STATELESS on one interface
self.assertEqual(3, radvd_config_str.count("prefix"))
self.assertEqual(1, radvd_config_str.count("interface"))
self.assertEqual(2, radvd_config_str.count("AdvAutonomous on"))
self.assertEqual(1, radvd_config_str.count("AdvAutonomous off"))
def test_process_router_ipv6_subnets_added_to_existing_port(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add the first subnet on a new interface
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6, ipv6_subnet_modes=[
{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}])
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
# Reset mocks to verify radvd enabled and configured correctly
# after second subnet added to interface
self.external_process.reset_mock()
self.utils_replace_file.reset_mock()
# Add the second subnet on the same interface
interface_id = router[l3_constants.INTERFACE_KEY][1]['id']
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6,
ipv6_subnet_modes=[
{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}],
interface_id=interface_id)
self._process_router_instance_for_agent(agent, ri, router)
# radvd should have been enabled again and the interface
# should have two prefixes
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(2, len(ri.internal_ports[1]['subnets']))
self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(2, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
def test_process_router_ipv6v4_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an IPv4 and IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=4)
l3_test_common.router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process(agent)
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
ri.process(agent)
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def test_process_router_ipv6_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=6)
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
self.process_monitor.reset_mock()
# Remove the IPv6 interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_disabled(ri)
def test_process_router_ipv6_subnet_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with two subnets and reprocess
l3_test_common.router_append_subnet(
router, count=2, ip_version=6,
ipv6_subnet_modes=([{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}]
* 2))
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset mocks to check for modified radvd config
self.utils_replace_file.reset_mock()
self.external_process.reset_mock()
# Remove one subnet from the interface and reprocess
interfaces = copy.deepcopy(router[l3_constants.INTERFACE_KEY])
del interfaces[1]['subnets'][0]
del interfaces[1]['fixed_ips'][0]
router[l3_constants.INTERFACE_KEY] = interfaces
self._process_router_instance_for_agent(agent, ri, router)
# Assert radvd was enabled again and that we only have one
# prefix on the interface
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("interface"))
self.assertEqual(1, radvd_config.count("prefix"))
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
ri,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, ri.process, agent)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process(agent)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# add an internal port
ri.process(agent)
with mock.patch.object(
ri,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, ri.process, agent)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process(agent)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_nochange(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN',
'floating_ip_address': '9.9.9.9'})
router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
mock_get_cidrs.return_value = set(
[fip1['floating_ip_address'] + '/32'])
ri.process(agent)
# make sure only the one that wasn't in existing cidrs was sent
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'})
def test_process_router_floatingip_status_update_if_processed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN', })
router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
mock_get_cidrs.return_value = set()
ri.process(agent)
# make sure both was sent since not existed in existing cidrs
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE',
fip2['id']: 'ACTIVE'})
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'status': 'DOWN',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = legacy_router.LegacyRouter(router['id'],
router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
ri.process(agent)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
ri.process(agent)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.process_floating_ip_addresses = mock.Mock(
side_effect=RuntimeError)
ri.external_gateway_added = mock.Mock()
ri.process(agent)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_process_external_iptables_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
ri.iptables_manager._apply = mock.Mock(side_effect=Exception)
ri.process_external(agent)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
self.assertEqual(1, ri.iptables_manager._apply.call_count)
def test_handle_router_snat_rules_distributed_without_snat_manager(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(
agent,
HOSTNAME,
'foo_router_id',
{},
**self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
ri._is_this_snat_host = mock.Mock(return_value=True)
ri.get_ex_gw_port = mock.Mock(return_value=None)
ri._handle_router_snat_rules(None, mock.ANY)
self.assertIsNone(ri.snat_iptables_manager)
self.assertFalse(ri.iptables_manager.called)
def test_handle_router_snat_rules_add_back_jump(self):
ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri._handle_router_snat_rules(port, "iface")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(('snat', '-j $float-snat'), args)
self.assertEqual({}, kwargs)
break
def test_handle_router_snat_rules_add_rules(self):
ri = l3router.RouterInfo(_uuid(), {}, **self.ri_kwargs)
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
ri._handle_router_snat_rules(ex_gw_port, "iface")
nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules))
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s "
"-m conntrack --ctstate DNAT "
"-j SNAT --to-source %s") % (
wrap_name, l3_constants.ROUTER_MARK_MASK,
ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(snat_rule1, nat_rules)
self.assertIn(snat_rule2, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(snat_rule1)))
mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules))
mangle_rule = ("-A %s-mark -i iface "
"-j MARK --set-xmark 0x2/%s" %
(wrap_name, l3_constants.ROUTER_MARK_MASK))
self.assertIn(mangle_rule, mangle_rules)
def test_process_router_delete_stale_internal_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'),
l3_test_common.FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(1, len(internal_ports))
internal_port = internal_ports[0]
with mock.patch.object(ri, 'internal_network_removed'
) as internal_network_removed,\
mock.patch.object(ri, 'internal_network_added'
) as internal_network_added,\
mock.patch.object(ri, 'external_gateway_removed'
) as external_gateway_removed,\
mock.patch.object(ri, 'external_gateway_added'
) as external_gateway_added:
ri.process(agent)
self.assertEqual(1, external_gateway_added.call_count)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(internal_port)
self.assertEqual(len(stale_devnames),
self.mock_driver.unplug.call_count)
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
del router['gw_port']
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
self.mock_ip.get_devices.return_value = stale_devlist
ri.process(agent)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_deleted(None, FAKE_ID)
self.assertEqual(1, agent._queue.add.call_count)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.routers_updated(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
self.assertEqual(1, agent._queue.add.call_count)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_added_to_agent(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_destroy_namespace(self):
namespace = 'qrouter-bar'
self.mock_ip.get_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev('qr-aaaa'),
l3_test_common.FakeDev('rfp-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.RouterNamespace(
'bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
prefix='qr-',
namespace='qrouter'
'-bar')
self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
def test_destroy_router_namespace(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.Namespace(
'qrouter-bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar")
def test_destroy_snat_namespace(self):
namespace = 'snat-bar'
self.mock_ip.get_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev('qg-aaaa'),
l3_test_common.FakeDev('sg-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = dvr_snat_ns.SnatNamespace(
'bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
calls = [mock.call('qg-aaaa',
bridge=agent.conf.external_network_bridge,
namespace=namespace,
prefix=l3_agent.EXTERNAL_DEV_PREFIX),
mock.call('sg-aaaa',
namespace=namespace,
prefix=dvr_snat_ns.SNAT_INT_DEV_PREFIX)]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': router_id,
'external_gateway_info': {},
'routes': [],
'distributed': False}
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
with mock.patch.object(
driver, 'spawn_monitored_metadata_proxy') as spawn_proxy:
agent._process_added_router(router)
if enableflag:
spawn_proxy.assert_called_with(
mock.ANY,
mock.ANY,
self.conf.metadata_port,
mock.ANY,
router_id=router_id
)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(mock.ANY,
router_id,
mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_router_id_specified_in_conf(self):
self.conf.set_override('router_id', '1234')
self._configure_metadata_proxy()
def _test_process_routers_update_rpc_timeout(self, ext_net_call=False,
ext_net_call_failed=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.fullsync = False
agent._process_router_if_compatible = mock.Mock()
if ext_net_call_failed:
agent._process_router_if_compatible.side_effect = (
oslo_messaging.MessagingTimeout)
agent._queue = mock.Mock()
agent._resync_router = mock.Mock()
update = mock.Mock()
update.router = None
agent._queue.each_update_to_next_router.side_effect = [
[(None, update)]]
agent._process_router_update()
self.assertFalse(agent.fullsync)
self.assertEqual(ext_net_call,
agent._process_router_if_compatible.called)
agent._resync_router.assert_called_with(update)
def test_process_routers_update_rpc_timeout_on_get_routers(self):
self.plugin_api.get_routers.side_effect = (
oslo_messaging.MessagingTimeout)
self._test_process_routers_update_rpc_timeout()
def test_process_routers_update_resyncs_failed_router(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# Attempting to configure the router will fail
agent._process_router_if_compatible = mock.MagicMock()
agent._process_router_if_compatible.side_effect = RuntimeError()
# Queue an update from a full sync
update = router_processing_queue.RouterUpdate(
42,
router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK,
router=mock.Mock(),
timestamp=timeutils.utcnow())
agent._queue.add(update)
agent._process_router_update()
# The update contained the router object, get_routers won't be called
self.assertFalse(agent.plugin_rpc.get_routers.called)
# The update failed, assert that get_routers was called
agent._process_router_update()
self.assertTrue(agent.plugin_rpc.get_routers.called)
def test_process_routers_update_rpc_timeout_on_get_ext_net(self):
self._test_process_routers_update_rpc_timeout(ext_net_call=True,
ext_net_call_failed=True)
def _test_process_routers_update_router_deleted(self, error=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
update = mock.Mock()
update.router = None
update.action = 1 # ROUTER_DELETED
router_info = mock.MagicMock()
agent.router_info[update.id] = router_info
router_processor = mock.Mock()
agent._queue.each_update_to_next_router.side_effect = [
[(router_processor, update)]]
agent._resync_router = mock.Mock()
if error:
agent._safe_router_removed = mock.Mock()
agent._safe_router_removed.return_value = False
agent._process_router_update()
if error:
self.assertFalse(router_processor.fetched_and_processed.called)
agent._resync_router.assert_called_with(update)
else:
router_info.delete.assert_called_once_with(agent)
self.assertFalse(agent.router_info)
self.assertFalse(agent._resync_router.called)
router_processor.fetched_and_processed.assert_called_once_with(
update.timestamp)
def test_process_routers_update_router_deleted_success(self):
self._test_process_routers_update_router_deleted()
def test_process_routers_update_router_deleted_error(self):
self._test_process_routers_update_router_deleted(True)
def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_router_if_compatible_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
n_exc.TooManyExternalNetworks())
self.assertRaises(n_exc.TooManyExternalNetworks,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('interface_driver', 'wrong.driver')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
@mock.patch.object(namespaces.RouterNamespace, 'delete')
@mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces,
mock_snat_ns,
mock_router_ns):
good_namespace_list = [namespaces.NS_PREFIX + r['id']
for r in router_list]
good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent.namespaces_manager._clean_stale)
pm = self.external_process.return_value
pm.reset_mock()
with agent.namespaces_manager as ns_manager:
for r in router_list:
ns_manager.keep_router(r['id'])
qrouters = [n for n in stale_namespace_list
if n.startswith(namespaces.NS_PREFIX)]
self.assertEqual(len(qrouters), mock_router_ns.call_count)
self.assertEqual(
len(stale_namespace_list) - len(qrouters),
mock_snat_ns.call_count)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [namespaces.NS_PREFIX + 'foo',
namespaces.NS_PREFIX + 'bar',
dvr_snat_ns.SNAT_NS_PREFIX + 'foo']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
namespaces.NS_PREFIX + 'eeeee',
dvr_snat_ns.SNAT_NS_PREFIX + 'fffff']
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
namespaces.NS_PREFIX + 'eeeee',
namespaces.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_create_dvr_gateway(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
port_id = _uuid()
subnet_id = _uuid()
dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': port_id,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri._get_snat_int_device_name(port_id)
self.device_exists.return_value = False
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = self.snat_ports
ri._create_dvr_gateway(dvr_gw_port, interface_name)
# check 2 internal ports are plugged
# check 1 ext-gw-port is plugged
self.assertEqual(3, self.mock_driver.plug.call_count)
self.assertEqual(3, self.mock_driver.init_router_port.call_count)
def test_process_address_scope(self):
router = l3_test_common.prepare_router_data()
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = dvr_router.DvrEdgeRouter(agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
ri.get_ex_gw_port = mock.Mock(return_value=None)
# Make sure the code doesn't crash if ri.snat_iptables_manager is None.
ri.process_address_scope()
with mock.patch.object(ri, '_add_address_scope_mark') as mocked_func:
ri.snat_iptables_manager = iptables_manager.IptablesManager(
namespace=mock.ANY, use_ipv6=False)
ri.snat_iptables_manager.defer_apply_off = mock.Mock()
ri.process_address_scope()
self.assertEqual(2, mocked_func.call_count)
def test_get_service_plugin_list(self):
service_plugins = [p_const.L3_ROUTER_NAT]
self.plugin_api.get_service_plugin_list.return_value = service_plugins
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(service_plugins, agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_failed(self):
raise_rpc = oslo_messaging.RemoteError()
self.plugin_api.get_service_plugin_list.side_effect = raise_rpc
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertIsNone(agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_retried(self):
raise_timeout = oslo_messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls
# get_service_plugin_list then return a empty tuple
self.plugin_api.get_service_plugin_list.side_effect = (
raise_timeout, tuple()
)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(tuple(), agent.neutron_service_plugins)
def test_external_gateway_removed_ext_gw_port_no_fip_ns(self):
self.conf.set_override('state_path', '/tmp')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr_snat'
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
ri = dvr_router.DvrEdgeRouter(
agent, HOSTNAME, router['id'], router, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.snat_namespace = mock.Mock()
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
self.assertFalse(ri.remove_floating_ip.called)
def test_spawn_radvd(self):
router = l3_test_common.prepare_router_data(ip_version=6)
conffile = '/fake/radvd.conf'
pidfile = '/fake/radvd.pid'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# we don't want the whole process manager to be mocked to be
# able to catch execute() calls
self.external_process_p.stop()
self.ip_cls_p.stop()
get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name'
get_pid_file_name = ('neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name')
utils_execute = 'neutron.agent.common.utils.execute'
mock.patch(get_conf_file_name).start().return_value = conffile
mock.patch(get_pid_file_name).start().return_value = pidfile
execute = mock.patch(utils_execute).start()
radvd = ra.DaemonMonitor(
router['id'],
namespaces.RouterNamespace._get_ns_name(router['id']),
agent.process_monitor,
l3_test_common.FakeDev,
self.conf)
radvd.enable(router['_interfaces'])
cmd = execute.call_args[0][0]
self.assertIn('radvd', cmd)
_join = lambda *args: ' '.join(args)
cmd = _join(*cmd)
self.assertIn(_join('-C', conffile), cmd)
self.assertIn(_join('-p', pidfile), cmd)
self.assertIn(_join('-m', 'syslog'), cmd)
def test_generate_radvd_mtu_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}]
network_mtu = '1446'
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
None,
network_mtu)
expected = "AdvLinkMTU 1446"
ri.agent_conf.set_override('advertise_mtu', False)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertNotIn(expected, self.utils_replace_file.call_args[0][1])
# Verify that MTU is advertised when advertise_mtu is True
ri.agent_conf.set_override('advertise_mtu', True)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def test_generate_radvd_conf_other_and_managed_flag(self):
# expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...}
expected = {l3_constants.IPV6_SLAAC: (False, False),
l3_constants.DHCPV6_STATELESS: (True, False),
l3_constants.DHCPV6_STATEFUL: (False, True)}
modes = [l3_constants.IPV6_SLAAC, l3_constants.DHCPV6_STATELESS,
l3_constants.DHCPV6_STATEFUL]
mode_combos = list(iter_chain(*[[list(combo) for combo in
iter_combinations(modes, i)] for i in range(1, len(modes) + 1)]))
for mode_list in mode_combos:
ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode}
for mode in mode_list]
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
def assertFlag(flag):
return (self.assertIn if flag else self.assertNotIn)
other_flag, managed_flag = (
any(expected[mode][0] for mode in mode_list),
any(expected[mode][1] for mode in mode_list))
assertFlag(other_flag)('AdvOtherConfigFlag on;',
self.utils_replace_file.call_args[0][1])
assertFlag(managed_flag)('AdvManagedFlag on;',
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_intervals(self):
self.conf.set_override('min_rtr_adv_interval', 22)
self.conf.set_override('max_rtr_adv_interval', 66)
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}]
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
self.assertIn("MinRtrAdvInterval 22",
self.utils_replace_file.call_args[0][1])
self.assertIn("MaxRtrAdvInterval 66",
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_rdnss_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': l3_constants.IPV6_SLAAC,
'address_mode': l3_constants.IPV6_SLAAC}]
dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400']
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
dns_nameservers=dns_list)
ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY])
# Verify that radvd configuration file includes RDNSS entries
expected = "RDNSS "
for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]:
expected += "%s " % dns
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def _pd_expected_call_external_process(self, requestor, ri, enable=True):
expected_calls = []
if enable:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
expected_calls.append(mock.call().enable(reload_cfg=False))
else:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
expected_calls.append(mock.call().disable(
get_stop_command=mock.ANY))
return expected_calls
def _pd_setup_agent_router(self):
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.external_gateway_added = mock.Mock()
ri.process(agent)
agent._router_added(router['id'], router)
# Make sure radvd monitor is created
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
return agent, router, ri
def _pd_remove_gw_interface(self, intfs, agent, router, ri):
expected_pd_update = {}
expected_calls = []
for intf in intfs:
requestor_id = self._pd_get_requestor_id(intf, router, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update[subnet['id']] = (
l3_constants.PROVISIONAL_IPV6_PD_PREFIX)
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in six.iteritems(prefix_update):
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
break
# Remove the gateway interface
agent.pd.notifier = pd_notifier
agent.pd.remove_gw_interface(router['id'])
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_remove_interfaces(self, intfs, agent, router, ri):
expected_pd_update = []
expected_calls = []
for intf in intfs:
# Remove the router interface
router[l3_constants.INTERFACE_KEY].remove(intf)
requestor_id = self._pd_get_requestor_id(intf, router, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update += [{subnet['id']:
l3_constants.PROVISIONAL_IPV6_PD_PREFIX}]
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = []
def pd_notifier(context, prefix_update):
self.pd_update.append(prefix_update)
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] in prefix_update:
# Update the prefix
subnet['cidr'] = prefix_update[subnet['id']]
# Process the router for removed interfaces
agent.pd.notifier = pd_notifier
ri.process(agent)
# The number of external process calls takes radvd into account.
# This is because there is no ipv6 interface any more after removing
# the interfaces, and radvd will be killed because of that
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls) - 2:])
self._pd_assert_radvd_calls(ri, False)
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_get_requestor_id(self, intf, router, ri):
ifname = ri.get_internal_device_name(intf['id'])
for subnet in intf['subnets']:
return dibbler.PDDibbler(router['id'],
subnet['id'], ifname).requestor_id
def _pd_assert_dibbler_calls(self, expected, actual):
'''Check the external process calls for dibbler are expected
in the case of multiple pd-enabled router ports, the exact sequence
of these calls are not deterministic. It's known, though, that each
external_process call is followed with either an enable() or disable()
'''
num_ext_calls = len(expected) // 2
expected_ext_calls = []
actual_ext_calls = []
expected_action_calls = []
actual_action_calls = []
for c in range(num_ext_calls):
expected_ext_calls.append(expected[c * 2])
actual_ext_calls.append(actual[c * 2])
expected_action_calls.append(expected[c * 2 + 1])
actual_action_calls.append(actual[c * 2 + 1])
self.assertEqual(expected_action_calls, actual_action_calls)
for exp in expected_ext_calls:
for act in actual_ext_calls:
if exp == act:
break
else:
msg = "Unexpected dibbler external process call."
self.fail(msg)
def _pd_assert_radvd_calls(self, ri, enable=True):
exp_calls = self._radvd_expected_call_external_process(ri, enable)
self.assertEqual(exp_calls,
self.external_process.mock_calls[-len(exp_calls):])
def _pd_get_prefixes(self, agent, router, ri,
existing_intfs, new_intfs, mock_get_prefix):
# First generate the prefixes that will be used for each interface
prefixes = {}
expected_pd_update = {}
expected_calls = []
for ifno, intf in enumerate(existing_intfs + new_intfs):
requestor_id = self._pd_get_requestor_id(intf, router, ri)
prefixes[requestor_id] = "2001:cafe:cafe:%d::/64" % ifno
if intf in new_intfs:
subnet_id = (intf['subnets'][0]['id'] if intf['subnets']
else None)
expected_pd_update[subnet_id] = prefixes[requestor_id]
expected_calls += (
self._pd_expected_call_external_process(requestor_id, ri))
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in six.iteritems(prefix_update):
for intf in new_intfs:
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
break
# Start the dibbler client
agent.pd.notifier = pd_notifier
agent.pd.process_prefix_update()
# Get the prefix and check that the neutron server is notified
def get_prefix(pdo):
key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname)
return prefixes[key]
mock_get_prefix.side_effect = get_prefix
agent.pd.process_prefix_update()
# Make sure that the updated prefixes are expected
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_add_gw_interface(self, agent, router, ri):
gw_ifname = ri.get_external_device_name(router['gw_port']['id'])
agent.pd.add_gw_interface(router['id'], gw_ifname)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add and remove one pd-enabled subnet
Remove the interface by deleting it from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router)
ri.process(agent)
# No client should be started since there is no gateway port
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get one prefix
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started and the router port is configured
# with the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the interface
self._pd_remove_interfaces(intfs, agent, router, ri)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add one pd-enabled subnet and remove the gateway port
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router)
ri.process(agent)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get one prefix
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started
self._pd_assert_radvd_calls(ri)
# Now remove the gw interface
self._pd_remove_gw_interface(intfs, agent, router, ri)
# There will be a router update
ri.process(agent)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add and remove two pd-enabled subnets
Remove the interfaces by deleting them from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create 2 pd-enabled subnets and add router interfaces
intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=2)
ri.process(agent)
# No client should be started
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started and the router port is configured
# with the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the interface
self._pd_remove_interfaces(intfs, agent, router, ri)
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix):
'''Add one pd-enabled subnet, followed by adding another one
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Add the gateway interface
self._pd_add_gw_interface(agent, router, ri)
# Create 1 pd-enabled subnet and add router interface
intfs = l3_test_common.router_append_pd_enabled_subnet(router, count=1)
ri.process(agent)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, [], intfs, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is started
self._pd_assert_radvd_calls(ri)
# Now add another interface
# Create one pd-enabled subnet and add router interface
intfs1 = l3_test_common.router_append_pd_enabled_subnet(router,
count=1)
ri.process(agent)
# Get prefixes
self._pd_get_prefixes(agent, router, ri, intfs,
intfs1, mock_get_prefix)
# Update the router with the new prefix
ri.process(agent)
# Check that radvd is notified for the new prefix
self._pd_assert_radvd_calls(ri)
# Now remove the gw interface
self._pd_remove_gw_interface(intfs + intfs1, agent, router, ri)
ri.process(agent)
| 45.853389
| 79
| 0.605831
|
435099dae2522df08fca20c0261dbbf5e79db279
| 4,063
|
py
|
Python
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_conversions_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_conversions_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_conversions_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListConversionsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'measure_type': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'measure_type': 'measure_type'
}
def __init__(self, x_language=None, measure_type=None):
"""ListConversionsRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._measure_type = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if measure_type is not None:
self.measure_type = measure_type
@property
def x_language(self):
"""Gets the x_language of this ListConversionsRequest.
|忽略大小写,默认 zh_CN:中文 en_US:英文|
:return: The x_language of this ListConversionsRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListConversionsRequest.
|忽略大小写,默认 zh_CN:中文 en_US:英文|
:param x_language: The x_language of this ListConversionsRequest.
:type: str
"""
self._x_language = x_language
@property
def measure_type(self):
"""Gets the measure_type of this ListConversionsRequest.
度量类型。1:货币2:时长3:流量4:数量7:容量9:行数10:周期11:频率12:个数16:带宽速率17:容量时长18:查询速率19:带宽速率(1000进制)20:性能测试用量21:面积22:视频23:吞吐量25:测试类型
:return: The measure_type of this ListConversionsRequest.
:rtype: int
"""
return self._measure_type
@measure_type.setter
def measure_type(self, measure_type):
"""Sets the measure_type of this ListConversionsRequest.
度量类型。1:货币2:时长3:流量4:数量7:容量9:行数10:周期11:频率12:个数16:带宽速率17:容量时长18:查询速率19:带宽速率(1000进制)20:性能测试用量21:面积22:视频23:吞吐量25:测试类型
:param measure_type: The measure_type of this ListConversionsRequest.
:type: int
"""
self._measure_type = measure_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListConversionsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.612676
| 120
| 0.582821
|
6ba079819c4c68750132637ad758d8e392efba83
| 10,953
|
py
|
Python
|
conans/client/conf/detect.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 1
|
2021-08-05T15:33:08.000Z
|
2021-08-05T15:33:08.000Z
|
conans/client/conf/detect.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 9
|
2017-10-07T06:23:10.000Z
|
2021-06-29T15:22:27.000Z
|
conans/client/conf/detect.py
|
ssaavedra/conan
|
e15dc7902fbbeaf469798a3b9948ead1ecfc8e3c
|
[
"MIT"
] | 1
|
2022-03-23T18:07:31.000Z
|
2022-03-23T18:07:31.000Z
|
import os
import platform
import re
import tempfile
import textwrap
from conans.client.conf.compiler_id import UNKNOWN_COMPILER, LLVM_GCC, detect_compiler_id
from conans.client.output import Color
from conans.client.tools import detected_os, OSInfo
from conans.client.tools.win import latest_visual_studio_version_installed
from conans.model.version import Version
from conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR
from conans.util.env_reader import get_env
from conans.util.files import save
from conans.util.runners import detect_runner
def _get_compiler_and_version(output, compiler_exe):
compiler_id = detect_compiler_id(compiler_exe)
if compiler_id.name == LLVM_GCC:
output.error("%s detected as a frontend using apple-clang. "
"Compiler not supported" % compiler_exe)
return None
if compiler_id != UNKNOWN_COMPILER:
output.success("Found %s %s" % (compiler_id.name, compiler_id.major_minor))
return compiler_id.name, compiler_id.major_minor
return None
def _gcc_compiler(output, compiler_exe="gcc"):
try:
if platform.system() == "Darwin":
# In Mac OS X check if gcc is a fronted using apple-clang
_, out = detect_runner("%s --version" % compiler_exe)
out = out.lower()
if "clang" in out:
return None
ret, out = detect_runner('%s -dumpversion' % compiler_exe)
if ret != 0:
return None
compiler = "gcc"
installed_version = re.search("([0-9]+(\.[0-9])?)", out).group()
# Since GCC 7.1, -dumpversion return the major version number
# only ("7"). We must use -dumpfullversion to get the full version
# number ("7.1.1").
if installed_version:
output.success("Found %s %s" % (compiler, installed_version))
return compiler, installed_version
except Exception:
return None
def _clang_compiler(output, compiler_exe="clang"):
try:
ret, out = detect_runner('%s --version' % compiler_exe)
if ret != 0:
return None
if "Apple" in out:
compiler = "apple-clang"
elif "clang version" in out:
compiler = "clang"
installed_version = re.search("([0-9]+\.[0-9])", out).group()
if installed_version:
output.success("Found %s %s" % (compiler, installed_version))
return compiler, installed_version
except Exception:
return None
def _sun_cc_compiler(output, compiler_exe="cc"):
try:
_, out = detect_runner('%s -V' % compiler_exe)
compiler = "sun-cc"
installed_version = re.search("Sun C.*([0-9]+\.[0-9]+)", out)
if installed_version:
installed_version = installed_version.group(1)
else:
installed_version = re.search("([0-9]+\.[0-9]+)", out).group()
if installed_version:
output.success("Found %s %s" % (compiler, installed_version))
return compiler, installed_version
except Exception:
return None
def _get_default_compiler(output):
"""
find the default compiler on the build machine
search order and priority:
1. CC and CXX environment variables are always top priority
2. Visual Studio detection (Windows only) via vswhere or registry or environment variables
3. Apple Clang (Mac only)
4. cc executable
5. gcc executable
6. clang executable
"""
v2_mode = get_env(CONAN_V2_MODE_ENVVAR, False)
cc = os.environ.get("CC", "")
cxx = os.environ.get("CXX", "")
if cc or cxx: # Env defined, use them
output.info("CC and CXX: %s, %s " % (cc or "None", cxx or "None"))
command = cc or cxx
if v2_mode:
compiler = _get_compiler_and_version(output, command)
if compiler:
return compiler
else:
if "gcc" in command:
gcc = _gcc_compiler(output, command)
if platform.system() == "Darwin" and gcc is None:
output.error("%s detected as a frontend using apple-clang. "
"Compiler not supported" % command)
return gcc
if "clang" in command.lower():
return _clang_compiler(output, command)
if platform.system() == "SunOS" and command.lower() == "cc":
return _sun_cc_compiler(output, command)
# I am not able to find its version
output.error("Not able to automatically detect '%s' version" % command)
return None
vs = cc = sun_cc = None
if detected_os() == "Windows":
version = latest_visual_studio_version_installed(output)
vs = ('Visual Studio', version) if version else None
if v2_mode:
cc = _get_compiler_and_version(output, "cc")
gcc = _get_compiler_and_version(output, "gcc")
clang = _get_compiler_and_version(output, "clang")
else:
gcc = _gcc_compiler(output)
clang = _clang_compiler(output)
if platform.system() == "SunOS":
sun_cc = _sun_cc_compiler(output)
if detected_os() == "Windows":
return vs or cc or gcc or clang
elif platform.system() == "Darwin":
return clang or cc or gcc
elif platform.system() == "SunOS":
return sun_cc or cc or gcc or clang
else:
return cc or gcc or clang
def _get_profile_compiler_version(compiler, version, output):
tokens = version.split(".")
major = tokens[0]
minor = tokens[1] if len(tokens) > 1 else 0
if compiler == "clang" and int(major) >= 8:
output.info("clang>=8, using the major as version")
return major
elif compiler == "gcc" and int(major) >= 5:
output.info("gcc>=5, using the major as version")
return major
elif compiler == "Visual Studio":
return major
elif compiler == "intel" and (int(major) < 19 or (int(major) == 19 and int(minor) == 0)):
return major
return version
def _detect_gcc_libcxx(executable, version, output, profile_name, profile_path):
# Assumes a working g++ executable
new_abi_available = Version(version) >= Version("5.1")
if not new_abi_available:
return "libstdc++"
if not get_env(CONAN_V2_MODE_ENVVAR, False):
msg = textwrap.dedent("""
Conan detected a GCC version > 5 but has adjusted the 'compiler.libcxx' setting to
'libstdc++' for backwards compatibility.
Your compiler is likely using the new CXX11 ABI by default (libstdc++11).
If you want Conan to use the new ABI for the {profile} profile, run:
$ conan profile update settings.compiler.libcxx=libstdc++11 {profile}
Or edit '{profile_path}' and set compiler.libcxx=libstdc++11
""".format(profile=profile_name, profile_path=profile_path))
output.writeln("\n************************* WARNING: GCC OLD ABI COMPATIBILITY "
"***********************\n %s\n************************************"
"************************************************\n\n\n" % msg,
Color.BRIGHT_RED)
return "libstdc++"
main = textwrap.dedent("""
#include <string>
using namespace std;
static_assert(sizeof(std::string) != sizeof(void*), "using libstdc++");
int main(){}
""")
t = tempfile.mkdtemp()
filename = os.path.join(t, "main.cpp")
save(filename, main)
old_path = os.getcwd()
os.chdir(t)
try:
error, out_str = detect_runner("%s main.cpp -std=c++11" % executable)
if error:
if "using libstdc++" in out_str:
output.info("gcc C++ standard library: libstdc++")
return "libstdc++"
# Other error, but can't know, lets keep libstdc++11
output.warn("compiler.libcxx check error: %s" % out_str)
output.warn("Couldn't deduce compiler.libcxx for gcc>=5.1, assuming libstdc++11")
else:
output.info("gcc C++ standard library: libstdc++11")
return "libstdc++11"
finally:
os.chdir(old_path)
def _detect_compiler_version(result, output, profile_path):
try:
compiler, version = _get_default_compiler(output)
except Exception:
compiler, version = None, None
if not compiler or not version:
output.error("Unable to find a working compiler")
return
result.append(("compiler", compiler))
result.append(("compiler.version", _get_profile_compiler_version(compiler, version, output)))
# Get compiler C++ stdlib
if compiler == "apple-clang":
result.append(("compiler.libcxx", "libc++"))
elif compiler == "gcc":
profile_name = os.path.basename(profile_path)
libcxx = _detect_gcc_libcxx("g++", version, output, profile_name, profile_path)
result.append(("compiler.libcxx", libcxx))
elif compiler == "cc":
if platform.system() == "SunOS":
result.append(("compiler.libstdcxx", "libstdcxx4"))
elif compiler == "clang":
if platform.system() == "FreeBSD":
result.append(("compiler.libcxx", "libc++"))
else:
result.append(("compiler.libcxx", "libstdc++"))
elif compiler == "sun-cc":
result.append(("compiler.libcxx", "libCstd"))
def _detect_os_arch(result, output):
architectures = {'i386': 'x86',
'i686': 'x86',
'i86pc': 'x86',
'amd64': 'x86_64',
'aarch64': 'armv8',
'sun4v': 'sparc'}
the_os = detected_os()
result.append(("os", the_os))
result.append(("os_build", the_os))
platform_machine = platform.machine().lower()
if platform_machine:
arch = architectures.get(platform_machine, platform_machine)
if arch.startswith('arm'):
for a in ("armv6", "armv7hf", "armv7", "armv8"):
if arch.startswith(a):
arch = a
break
else:
output.error("Your ARM '%s' architecture is probably not defined in settings.yml\n"
"Please check your conan.conf and settings.yml files" % arch)
elif OSInfo().is_aix:
arch = OSInfo.get_aix_architecture() or arch
result.append(("arch", arch))
result.append(("arch_build", arch))
def detect_defaults_settings(output, profile_path):
""" try to deduce current machine values without any constraints at all
:param output: Conan Output instance
:param profile_path: Conan profile file path
:return: A list with default settings
"""
result = []
_detect_os_arch(result, output)
_detect_compiler_version(result, output, profile_path)
result.append(("build_type", "Release"))
return result
| 37.768966
| 99
| 0.601114
|
4130d22e601be1fb94d742cff8a711f420447948
| 20,213
|
py
|
Python
|
model-optimizer/mo/ops/convolution_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3
|
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/mo/ops/convolution_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/ops/convolution_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2
|
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.ops.convolution import Convolution
from mo.utils.unittest.extractors import FakeValue
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'conv_input': {'value': None, 'kind': 'data'},
'conv_node': {'type': 'Convolution', 'kind': 'op'},
'conv_weights': {'value': FakeValue(None), 'kind': 'data'},
'conv_output': {'value': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class TestConvolutionPartialInfer(unittest.TestCase):
def test_caffe_conv2d_infer(self):
graph = build_graph(nodes_attributes,
[('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{'conv_output': {'shape': None},
'conv_input': {'shape': np.array([1, 3, 227, 227])},
'conv_weights': {'shape': np.array([64, 3, 3, 3]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
'output_spatial_shape': None, 'output_shape': None,
'stride': np.array([1, 1, 1, 1]), 'group': 1,
'kernel_spatial_idx': np.array([2, 3]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'output': 64, 'kernel_spatial': np.array([3, 3]),
'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
'batch_dims': np.array([0])}
})
conv_node = Node(graph, 'conv_node')
Convolution.infer(conv_node)
exp_shape = np.array([1, 64, 225, 225])
res_shape = graph.node['conv_output']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
def test_caffe_conv2d_infer_no_shape(self):
graph = build_graph(nodes_attributes,
[('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{'conv_output': {'shape': None},
'conv_input': {'shape': None},
'conv_weights': {'shape': None,
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
'output_spatial_shape': None, 'output_shape': None,
'stride': np.array([1, 1, 1, 1]), 'group': 1,
'output': 64, 'kernel_spatial': np.array([3, 3]),
'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
'batch_dims': np.array([0])}
})
conv_node = Node(graph, 'conv_node')
Convolution.infer(conv_node)
res_shape = graph.node['conv_output']['shape']
self.assertIsNone(res_shape)
def test_deconv_infer_ideal(self):
graph = build_graph(nodes_attributes,
[('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{'conv_output': {'shape': None},
'conv_input': {'shape': np.array([1, 21, 16, 16])},
'conv_weights': {'shape': np.array([1, 21, 4, 4]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False,
'batch_dims': np.array([0]),
'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
'kernel_spatial_idx': np.array([2, 3]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'output_padding': np.array([0, 0, 1, 1]),
'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
})
deconv_node = Node(graph, 'conv_node')
Convolution.infer(deconv_node)
res_shape = deconv_node['output_shape']
exp_shape = np.array([1, 21, 35, 35])
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
# Check that after double infer shape and pad attrs do not changes
Convolution.infer(deconv_node)
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
def test_deconv_infer_no_shape(self):
graph = build_graph(nodes_attributes,
[('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{'conv_output': {'shape': None},
'conv_input': {'shape': None},
'conv_weights': {'shape': np.array([1, 21, 16, 16]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_node': {'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
'channel_dims': np.array([1]),
'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
'kernel_spatial_idx': np.array([2, 3]),
'input_feature_channel': 1,
'output_feature_channel': 0,
'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
})
deconv_node = Node(graph, 'conv_node')
Convolution.infer(deconv_node)
res_shape = deconv_node['output_shape']
self.assertIsNone(res_shape)
def test_conv_infer_set_default_attrs_nchw(self):
graph = build_graph(nodes_attributes,
[
('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{
'conv_output': {
'shape': None
},
'conv_input': {
'shape': int64_array([1, 3, 224, 224])
},
'conv_weights': {
'shape': int64_array([3, 64, 7, 7]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
},
'conv_node': {
'type': 'Convolution',
'bias_term': None,
'stride': None,
'dilation': None,
'batch_dims': int64_array([0]),
'channel_dims': int64_array([1]),
'output_spatial_shape': None,
'input_feature_channel': 0,
'output_feature_channel': 1,
'group': 1,
'output_shape': None,
'layout': 'NCHW'
}
})
conv_node = Node(graph, 'conv_node')
conv_output = Node(graph, 'conv_output')
Convolution.infer(conv_node)
# Check bias_term attribute
self.assertTrue(conv_node.has_valid('bias_term'))
self.assertTrue(not conv_node.bias_term)
# Check kernel_spatial_idx attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx))
# Check spatial_dims attr detection
self.assertTrue(conv_node.has_valid('spatial_dims'))
self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.spatial_dims))
# Check kernel_spatial attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial'))
self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial))
# Check output attribute
self.assertTrue(conv_node.has_valid('output'))
self.assertEqual(64, conv_node.output)
# Check dilation value. Should be set to default
self.assertTrue(conv_node.has_valid('dilation'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation))
# Check stride value. Should be set to default
self.assertTrue(conv_node.has_valid('stride'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride))
# Check pad value. Should be set to default
self.assertTrue(conv_node.has_valid('pad'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
# Check pad_spatial_shape
self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape))
# Check resulting output shape
self.assertTrue(np.array_equal(int64_array([1, 64, 218, 218]), conv_output.shape))
def test_conv_infer_set_default_attrs_nhwc(self):
graph = build_graph(nodes_attributes,
[
('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{
'conv_output': {
'shape': None
},
'conv_input': {
'shape': int64_array([1, 224, 224, 3])
},
'conv_weights': {
'shape': int64_array([3, 64, 7, 7]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
},
'conv_node': {
'type': 'Convolution',
'bias_term': None,
'stride': None,
'dilation': None,
'batch_dims': int64_array([0]),
'channel_dims': int64_array([3]),
'output_spatial_shape': None,
'input_feature_channel': 0,
'output_feature_channel': 1,
'group': 1,
'output_shape': None,
'layout': 'NHWC'
}
})
conv_node = Node(graph, 'conv_node')
conv_output = Node(graph, 'conv_output')
Convolution.infer(conv_node)
# Check bias_term attribute
self.assertTrue(conv_node.has_valid('bias_term'))
self.assertTrue(not conv_node.bias_term)
# Check kernel_spatial_idx attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx))
# Check spatial_dims attr detection
self.assertTrue(conv_node.has_valid('spatial_dims'))
self.assertTrue(np.array_equal(int64_array([1, 2]), conv_node.spatial_dims))
# Check kernel_spatial attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial'))
self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial))
# Check output attribute
self.assertTrue(conv_node.has_valid('output'))
self.assertEqual(64, conv_node.output)
# Check dilation value. Should be set to default
self.assertTrue(conv_node.has_valid('dilation'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation))
# Check stride value. Should be set to default
self.assertTrue(conv_node.has_valid('stride'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride))
# Check pad value. Should be set to default
self.assertTrue(conv_node.has_valid('pad'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
# Check pad_spatial_shape
self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape))
# Check resulting output shape
self.assertTrue(np.array_equal(int64_array([1, 218, 218, 64]), conv_output.shape))
def test_conv_infer_3D_convolution(self):
graph = build_graph(nodes_attributes,
[
('conv_input', 'conv_node'),
('conv_weights', 'conv_node'),
('conv_node', 'conv_output'),
('conv_output', 'op_output')
],
{
'conv_output': {
'shape': None
},
'conv_input': {
'shape': int64_array([1, 3, 16, 224, 224])
},
'conv_weights': {
'shape': int64_array([3, 64, 1, 7, 7]),
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
},
'conv_node': {
'type': 'Convolution',
'bias_term': None,
'stride': None,
'dilation': None,
'batch_dims': int64_array([0]),
'channel_dims': int64_array([1]),
'output_spatial_shape': None,
'input_feature_channel': 0,
'output_feature_channel': 1,
'group': 1,
'output_shape': None,
'layout': 'NCHW'
}
})
conv_node = Node(graph, 'conv_node')
conv_output = Node(graph, 'conv_output')
Convolution.infer(conv_node)
# Check bias_term attribute
self.assertTrue(conv_node.has_valid('bias_term'))
self.assertTrue(not conv_node.bias_term)
# Check kernel_spatial_idx attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.kernel_spatial_idx))
# Check spatial_dims attr detection
self.assertTrue(conv_node.has_valid('spatial_dims'))
self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.spatial_dims))
# Check kernel_spatial attr detection
self.assertTrue(conv_node.has_valid('kernel_spatial'))
self.assertTrue(np.array_equal(int64_array([1, 7, 7]), conv_node.kernel_spatial))
# Check output attribute
self.assertTrue(conv_node.has_valid('output'))
self.assertEqual(64, conv_node.output)
# Check dilation value. Should be set to default
self.assertTrue(conv_node.has_valid('dilation'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.dilation))
# Check stride value. Should be set to default
self.assertTrue(conv_node.has_valid('stride'))
self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.stride))
# Check pad value. Should be set to default
self.assertTrue(conv_node.has_valid('pad'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
# Check pad_spatial_shape
self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0]]), conv_node.pad_spatial_shape))
# Check resulting output shape
self.assertTrue(np.array_equal(int64_array([1, 64, 16, 218, 218]), conv_output.shape))
| 53.052493
| 120
| 0.46381
|
106734a7c8f4de3ba29409b6682e3662f296d54c
| 9,070
|
py
|
Python
|
Algorithms/General/MergeStates.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | 6
|
2020-07-07T15:45:13.000Z
|
2021-08-29T06:44:29.000Z
|
Algorithms/General/MergeStates.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | null | null | null |
Algorithms/General/MergeStates.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | null | null | null |
from typing import List
from DataObjects.ClassState import State
########################################################################################################################
# 8) STATE MERGING
########################################################################################################################
class MergeStates:
def __init__(self, max_merging_iterations: int, access: List[str], evict: List[str]):
self.maxMergingIter = max_merging_iterations
self.access = access
self.evict = evict
def merge_states(self, statesets):
found = 1
itercnt = 0
while found and itercnt < self.maxMergingIter:
itercnt += 1
found = 0
setkeys = self._GetSetSeq(statesets)
for stateset in setkeys:
defercluster = self._ClusterStatesDeferred(statesets[stateset].getstates())
for deferkey in sorted(defercluster.keys()):
contextcluster = self._ClusterTransitionContext(defercluster[deferkey])
for contextkey in sorted(contextcluster.keys()):
accesscluster = self._ClusterAccessMerge(contextcluster[contextkey])
for accesskey in sorted(accesscluster.keys()):
dependencemap = self._ClusterIndependent(accesscluster[accesskey])
if len(dependencemap) > 1:
transitionmap = self._ClusterTransitions(dependencemap)
if (len(transition) > 1 for transition in transitionmap.values()) \
and len(transitionmap):
# To test if states that shall be merged still exist is important
# Greedy algorithm, a state might be mergeable with multiple different states
# After it was merged one, it dissappears and is dead afterwards, therefore any
# possible remaining merging must be deleted
if self._TestStateExistInSet(dependencemap, statesets[stateset]):
self._MergeGivenStates(dependencemap, transitionmap, statesets)
found = 1
def _GetSetSeq(self, statesets):
statesetlist = []
for stateset in statesets.values():
statesetlist.append(stateset.getstablestate())
statesetlist.sort(key=lambda x: (len(x.getaccesshit()), x.getstatename()))
return [state.getstatename() for state in statesetlist]
def _MergeGivenStates(self, mergestates, transitionmap, statesets):
mergestates.sort(key=lambda x: len(x.getstatename()))
# mergestates.sort(key=lambda x: len(x.getstatename()))
# Make new state
newstate = State(mergestates[0].getstatename(), self.access, self.evict)
for transition in transitionmap.values():
newstate.addtransitions(transition[0])
# Explore context
startstatesets = []
endstatesets = []
for state in mergestates:
startstatesets += state.getstartstatesets()
endstatesets += state.getendstatesets()
startstatesets = list(set(startstatesets))
endstatesets = list(set(endstatesets))
# Remove old states from all state sets
for stateset in statesets.values():
stateset.removestates(mergestates)
# Now add new state to sets
for stateset in startstatesets:
stateset.addstartstate(newstate)
for stateset in endstatesets:
stateset.addendstate(newstate)
# Update links
for stateset in statesets.values():
for state in stateset.getstates():
for replacestate in mergestates:
state.replaceremotestate(replacestate, newstate)
@staticmethod
def _TestStateExistInSet(states, stateset):
for state in states:
if not stateset.teststateexist(state):
return 0
return 1
####################################################################################################################
# CLUSTER STATES
####################################################################################################################
def _ClusterStatesDeferred(self, states):
ordereddeferred = 1
msgmap = {}
for state in states:
defermsgs = state.getdefermessages()
detectset = []
for defermsg in defermsgs:
detectset.append(defermsg.getmsgtype())
detectkey = '$' + ''.join(detectset if ordereddeferred else detectset.sort())
entry = msgmap.get(detectkey, 0)
if entry:
if state not in entry:
entry.append(state)
else:
msgmap.update({detectkey: [state]})
return self._RemoveSingleEntries(msgmap)
def _ClusterTransitionContext(self, defercluster):
msgcontextmap = {}
for state in defercluster:
for transition in state.getdataack():
cond = transition.getcond()[0] if transition.getcond() else "_"
inmsg = transition.getinmsg() if isinstance(transition.getinmsg(), str) else \
transition.getinmsg().getmsgtype()
identkey = inmsg + cond + transition.getfinalstate().getstatename()
entry = msgcontextmap.get(identkey, 0)
if entry:
entry.append(state)
else:
msgcontextmap.update({identkey: [state]})
return self._RemoveSingleEntries(msgcontextmap)
def _ClusterAccessMerge(self, contextcluster):
accessmap = {}
for state in contextcluster:
accesskey = "$"
for access in state.getaccesshit():
accesskey += access.getguard()
entry = accessmap.get(accesskey, 0)
if entry:
entry.append(state)
else:
accessmap.update({accesskey: [state]})
return self._RemoveSingleEntries(accessmap)
def _ClusterIndependent(self, accesscluster):
dependencemap = {}
for state in accesscluster:
for transition in state.gettransitions():
dependencemap.update({state: []})
finalstate = transition.getfinalstate()
if finalstate in accesscluster and finalstate != transition.getstartstate():
dependencemap[state].append(finalstate)
independentlist = []
keys = list(dependencemap.keys())
keys.sort(key=lambda x: x.getstatename())
for entry in keys:
if not len(dependencemap[entry]):
independentlist.append(entry)
return independentlist
def _ClusterTransitions(self, accesscluster):
transitionmap = {}
for state in accesscluster:
for transition in state.getremote() + state.getdataack():
cond = transition.getcond()[0] if transition.getcond() else "_"
inmsg = transition.getinmsg() if isinstance(transition.getinmsg(), str) else \
transition.getinmsg().getmsgtype()
self._AppendTransitionMap(transitionmap, transition, inmsg + cond)
for transition in state.getaccess() + state.getevict():
identkey = transition.getaccess()
if transition.getstartstate() == transition.getfinalstate():
identkey += "_l"
self._AppendTransitionMap(transitionmap, transition, identkey)
transitionmap = self._ClusterNonAmbigousTrans(transitionmap)
return transitionmap
@staticmethod
def _AppendTransitionMap(transitionmap, transition, identkey):
entry = transitionmap.get(identkey, 0)
if entry:
entry.append(transition)
else:
transitionmap.update({identkey: [transition]})
@staticmethod
def _ClusterNonAmbigousTrans(transitionmap):
match = 1
for transitionkey in sorted(transitionmap.keys()):
finalstates = []
for transition in transitionmap[transitionkey]:
finalstate = transition.getfinalstate()
if transition.getstartstate() != finalstate:
finalstates.append(finalstate)
if len(list(set(finalstates))) > 1:
match = 0
break
if match == 0:
return {}
return transitionmap
@staticmethod
def _RemoveSingleEntries(statedict):
removeguards = []
for guard in statedict:
if len(statedict[guard]) == 1:
removeguards.append(guard)
for guard in removeguards:
del statedict[guard]
return statedict
| 36.572581
| 120
| 0.553032
|
a3bf0574f3e683591dfd6ab052a76784ab5a69b4
| 904
|
py
|
Python
|
microcosm_flask/tests/swagger/parameters/test_default.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 11
|
2017-01-30T21:53:20.000Z
|
2020-05-29T22:39:19.000Z
|
microcosm_flask/tests/swagger/parameters/test_default.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 139
|
2016-03-09T19:09:59.000Z
|
2021-09-03T17:14:00.000Z
|
microcosm_flask/tests/swagger/parameters/test_default.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 10
|
2016-12-19T22:39:42.000Z
|
2021-03-09T19:23:15.000Z
|
from hamcrest import assert_that, equal_to, is_
from marshmallow import Schema, fields
from microcosm_flask.swagger.api import build_parameter
class TestSchema(Schema):
id = fields.UUID()
foo = fields.String(description="Foo", default="bar")
payload = fields.Dict()
datetime = fields.DateTime()
def test_field_description_and_default():
parameter = build_parameter(TestSchema().fields["foo"])
assert_that(parameter, is_(equal_to({
"type": "string",
"description": "Foo",
"default": "bar",
})))
def test_field_uuid():
parameter = build_parameter(TestSchema().fields["id"])
assert_that(parameter, is_(equal_to({
"type": "string",
"format": "uuid",
})))
def test_field_dict():
parameter = build_parameter(TestSchema().fields["payload"])
assert_that(parameter, is_(equal_to({
"type": "object",
})))
| 25.111111
| 63
| 0.659292
|
f34a6ae451e182d06ec6217aa282c77cd96c4737
| 288
|
py
|
Python
|
cms/admin.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | null | null | null |
cms/admin.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | 8
|
2020-03-24T17:11:49.000Z
|
2022-01-13T01:18:11.000Z
|
cms/admin.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from cms.models import News
from cms.models import Event
class NewsAdmin(admin.ModelAdmin):
pass
class EventAdmin(admin.ModelAdmin):
pass
admin.site.register(News, NewsAdmin)
admin.site.register(Event, EventAdmin)
| 19.2
| 38
| 0.784722
|
1ad216932a4659e29b033611bdc81bfb7d71a330
| 2,109
|
py
|
Python
|
gurobi-test/test.py
|
jontev/postnord-optimization
|
38dd1738dd4a930531d70bd914ae34918ad7f1d2
|
[
"MIT"
] | null | null | null |
gurobi-test/test.py
|
jontev/postnord-optimization
|
38dd1738dd4a930531d70bd914ae34918ad7f1d2
|
[
"MIT"
] | null | null | null |
gurobi-test/test.py
|
jontev/postnord-optimization
|
38dd1738dd4a930531d70bd914ae34918ad7f1d2
|
[
"MIT"
] | null | null | null |
import gurobipy as gp
from gurobipy import GRB
import pandas as pd
import os
S = pd.read_csv('Sj.csv')
L = pd.read_csv('Lj.csv')
m = pd.read_csv('mj.csv')
dij = pd.read_csv('dij.csv')
aip = pd.read_csv('aip.csv')
S = S.to_numpy()
L = L.to_numpy()
m = m.to_numpy()
dij = dij.to_numpy()
aip = aip.to_numpy()
P = list(range(40))
I = list(range(9))
J = list(range(dij.shape[1]))
T = list(range(2))
aip = aip.T
Kl = 18
Ks = 30
m = [m[j][0] + 1 for j in J]
S = [S[j][0] for j in J]
L = [L[j][0] for j in J]
# create model instance
model = gp.Model()
# define variables in optimization problem
x = model.addVars(I, P, J, vtype=GRB.INTEGER, name="x", lb = 0)
yl = model.addVars(P, J, T, vtype=GRB.BINARY, name="yl")
ys = model.addVars(P, J, T, vtype=GRB.BINARY, name="ys")
u = model.addVars(J, T, vtype=GRB.BINARY, name="u")
w = model.addVars(P, J, vtype=GRB.BINARY, name="w")
# Objective function
obj = sum(aip[i,p]*x[i,p,j] for p in P for i in I for j in J)
# Constraints
A = model.addConstrs(sum( yl[p,j,t] + ys[p,j,t] for j in J) <= 1 for p in P for t in T )
B = model.addConstrs(sum( x[i,p,j] for p in P) == dij[i,j] for i in I for j in J )
C = model.addConstrs(sum( yl[p,j,t] for p in P) <= u[j,t]*L[j] for j in J for t in T)
D = model.addConstrs(sum( ys[p,j,t] for p in P) <= u[j,t]*S[j] for j in J for t in T)
E = model.addConstrs(sum( x[i,p,j] for i in I) <= sum(Kl*yl[p,j,t] + Ks*ys[p,j,t] for t in T) for p in P for j in J )
F = model.addConstrs(sum( u[j,t] for t in T) == 1 for j in J )
G = model.addConstrs(sum( (t+1)*u[j,t] for t in T) <= m[j] for j in J )
H = model.addConstrs(sum( w[p,j] for p in P ) <= 1 for j in J )
Q = model.addConstrs(w[p,j]*dij[0,j] == x[0,p,j] for p in P for j in J )
R = model.addConstrs(sum( x[friplock,p,j] for p in P[20:] ) == 0 for j in J)
AA = model.addConstrs(sum( x[helpall, p, j] for p in P[0:19]) == 0 for j in J )
# Min-problem
model.setObjective(obj, GRB.MINIMIZE)
# time limit
#model.Params.TimeLimit = 300
model.update()
# call for solver
model.optimize()
# optimal objective value
model.objVal
# optimal solution (variables)
model.printAttr('x')
| 29.704225
| 117
| 0.626363
|
5d87ef8f3887cbc92ad9250cf847974199e0a850
| 6,801
|
py
|
Python
|
hubspot/crm/deals/models/error_category.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/deals/models/error_category.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/deals/models/error_category.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Deals
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.deals.configuration import Configuration
class ErrorCategory(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"http_status": "str", "name": "str"}
attribute_map = {"http_status": "httpStatus", "name": "name"}
def __init__(
self, http_status=None, name=None, local_vars_configuration=None
): # noqa: E501
"""ErrorCategory - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._http_status = None
self._name = None
self.discriminator = None
self.http_status = http_status
self.name = name
@property
def http_status(self):
"""Gets the http_status of this ErrorCategory. # noqa: E501
:return: The http_status of this ErrorCategory. # noqa: E501
:rtype: str
"""
return self._http_status
@http_status.setter
def http_status(self, http_status):
"""Sets the http_status of this ErrorCategory.
:param http_status: The http_status of this ErrorCategory. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and http_status is None
): # noqa: E501
raise ValueError(
"Invalid value for `http_status`, must not be `None`"
) # noqa: E501
allowed_values = [
"CONTINUE",
"SWITCHING_PROTOCOLS",
"PROCESSING",
"OK",
"CREATED",
"ACCEPTED",
"NON_AUTHORITATIVE_INFORMATION",
"NO_CONTENT",
"RESET_CONTENT",
"PARTIAL_CONTENT",
"MULTI_STATUS",
"ALREADY_REPORTED",
"IM_USED",
"MULTIPLE_CHOICES",
"MOVED_PERMANENTLY",
"FOUND",
"SEE_OTHER",
"NOT_MODIFIED",
"USE_PROXY",
"TEMPORARY_REDIRECT",
"PERMANENT_REDIRECT",
"BAD_REQUEST",
"UNAUTHORIZED",
"PAYMENT_REQUIRED",
"FORBIDDEN",
"NOT_FOUND",
"METHOD_NOT_ALLOWED",
"NOT_ACCEPTABLE",
"PROXY_AUTHENTICATION_REQUIRED",
"REQUEST_TIMEOUT",
"CONFLICT",
"GONE",
"LENGTH_REQUIRED",
"PRECONDITION_FAILED",
"REQUEST_ENTITY_TOO_LARGE",
"REQUEST_URI_TOO_LONG",
"UNSUPPORTED_MEDIA_TYPE",
"REQUESTED_RANGE_NOT_SATISFIABLE",
"EXPECTATION_FAILED",
"IM_A_TEAPOT",
"MISDIRECTED_REQUEST",
"UNPROCESSABLE_ENTITY",
"LOCKED",
"FAILED_DEPENDENCY",
"UPGRADE_REQUIRED",
"PRECONDITION_REQUIRED",
"TOO_MANY_REQUESTS",
"REQUEST_HEADERS_FIELDS_TOO_LARGE",
"INTERNAL_STALE_SERVICE_DISCOVERY",
"UNAVAILABLE_FOR_LEGAL_REASONS",
"INTERNAL_SERVER_ERROR",
"NOT_IMPLEMENTED",
"BAD_GATEWAY",
"SERVICE_UNAVAILABLE",
"GATEWAY_TIMEOUT",
"HTTP_VERSION_NOT_SUPPORTED",
"VARIANT_ALSO_NEGOTIATES",
"INSUFFICIENT_STORAGE",
"LOOP_DETECTED",
"NOT_EXTENDED",
"NETWORK_AUTHENTICATION_REQUIRED",
] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and http_status not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `http_status` ({0}), must be one of {1}".format( # noqa: E501
http_status, allowed_values
)
)
self._http_status = http_status
@property
def name(self):
"""Gets the name of this ErrorCategory. # noqa: E501
:return: The name of this ErrorCategory. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ErrorCategory.
:param name: The name of this ErrorCategory. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation and name is None
): # noqa: E501
raise ValueError(
"Invalid value for `name`, must not be `None`"
) # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorCategory):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ErrorCategory):
return True
return self.to_dict() != other.to_dict()
| 29.960352
| 124
| 0.545802
|
2e744bdbea3ef1528e5cc99fad0837fc41280333
| 2,774
|
py
|
Python
|
python/shopping_store_v2/main.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
python/shopping_store_v2/main.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
python/shopping_store_v2/main.py
|
matheuskiser/pdx_code_guild
|
49a5c62fb468253eb4d9a1fb11166df79bb10873
|
[
"MIT"
] | null | null | null |
from store import Store
from login import Login
import os
# Starts store and sets up all items
store = Store("Fred Meyer", "Portland, OR")
# Temporary code to load or not load items
load = raw_input("Load items? ")
if load == "y":
store.load_items_store()
# Starts user's login
user = Login()
# Global user ID
USER_ID = user.user.get_user_id(user.user.username)
def clear():
os.system('clear')
def user_menu():
clear()
option = 0
while option != 5:
print "==================================="
print "Menu"
print "==================================="
print "1. Browse all items"
print "2. Search"
print "3. User account"
print "4. Checkout"
print "5. Exit"
option = int(raw_input("Pick an option: "))
user_pick_menu_option(option)
def user_pick_menu_option(option):
if option == 1:
"""Show Items"""
show_items()
elif option == 2:
"""Search item"""
search_item()
elif option == 3:
"""Show account info"""
display_user_account()
elif option == 4:
"""Checkout"""
checkout()
elif option == 5:
"""Exit Program"""
clear()
print "Thanks for shopping!"
# Outputs all items from store
def show_items():
clear()
store.print_all_items()
# Searches item in store and asks user to buy item
def search_item():
clear()
print "Search item:"
item_name = raw_input("What is the item's name? ")
# Shows item searched
store.print_single_item(item_name)
# Asks user to buy item
choice = raw_input("Buy item? (y/n): ")
if choice.lower() == "y" or choice.lower() == "yes":
user_qty = raw_input("How many? ")
item_qty = store.get_item_qty(item_name)
item_cost = store.get_item_cost(item_name)
if int(item_qty) > 0:
# If user is trying to buy more qty than store has
while int(user_qty) > int(item_qty):
print "Sorry, quantity entered is too high. Try again"
user_qty = int(raw_input("How many? "))
# Gets item amount after adding to cart
qty_left = int(item_qty) - int(user_qty)
# Adds item to cart
user.user.add_to_cart(USER_ID, item_name, user_qty, item_cost)
# Remove item qty from item in store
store.remove_item_qty(item_name, qty_left)
print "Item added to cart."
else:
print "Item not in stock."
def display_user_account():
clear()
user.user.display_user_account_info()
def checkout():
clear()
user.user.checkout_cart(USER_ID)
# RUNS PROGRAM
# Checks if user is logged in
if user.is_logged_in:
user_menu()
| 24.333333
| 74
| 0.582913
|
c63da9c501d44ecf97a3883c4b9f6e890b7acacc
| 5,786
|
py
|
Python
|
costar_task_plan/python/costar_task_plan/mcts/node.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 66
|
2018-10-31T04:58:53.000Z
|
2022-03-17T02:32:25.000Z
|
costar_task_plan/python/costar_task_plan/mcts/node.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 8
|
2018-10-23T21:19:25.000Z
|
2018-12-03T02:08:41.000Z
|
costar_task_plan/python/costar_task_plan/mcts/node.py
|
cpaxton/costar_plan
|
be5c12f9d0e9d7078e6a5c283d3be059e7f3d040
|
[
"Apache-2.0"
] | 25
|
2018-10-19T00:54:17.000Z
|
2021-10-10T08:28:15.000Z
|
from costar_task_plan.abstract import *
'''
An MCTS node is a TYPE of state, but contains a different type of state.
Why? So that we can do interesting learning over MCTS states.
'''
class Node(AbstractState):
next_idx = 0
def __init__(self, world=None, action=None, prior=1., root=False):
'''
You only need one of world or action. Ways to use this:
- provide a world and set root to true: ROOT node
- provide no world, but an action: non-instantiated child node
Parameters:
-----------
world: world state including all current actors
action: the current MCTS action (wraps a policy)
prior: weight for how good this is going to be
root: is this the root of a tree?
'''
if world is None and action is None:
raise RuntimeError('must provide either a world or an action!')
self.parent = None
self.n_visits = 0
self.n_rollouts = 0
self.world = world
self.action = action
self.children = []
self.max_reward = -float('inf')
self.total_reward = 0
self.avg_reward = 0
self.prev_reward = 0
self.max_final_reward = -float('inf')
self.prior = prior
self.initialized = self.world is not None
self.terminal = self.world is not None and self.world.done
if self.action is not None and self.action.tag is not None:
self.tag = self.action.tag
elif root:
self.tag = 'ROOT()'
if self.world.done:
raise RuntimeError('cannot create tree from terminal node')
else:
self.tag = 'NODE'
if self.world is not None:
self.state = self.world.actors[0].state
else:
self.state = None
self.traj = []
self.rewards = []
self.reward = 0
'''
MCTS update step
'''
def update(self, reward, final_reward, steps):
self.total_reward += reward
self.max_reward = max(reward, self.max_reward)
self.max_final_reward = max(reward, self.max_final_reward)
self.avg_reward = self.total_reward / self.n_visits
self.reward = 0 # reset counter for this trace
'''
expand() creates a new child world from a particular environment. We can
then tick() this world forward however long we want to follow a particular
policy.
'''
def expand(self, action):
if action is None:
raise RuntimeError('Cannot expand with an empty action!')
if not isinstance(action, AbstractAction):
raise TypeError(
'node.expand() takes an Environment action, not an MCTS action.')
new_world = self.world.fork(action)
n = Node(world=new_world)
n.prev_reward = self.prev_reward + self.reward
n.parent = self
return n
'''
Alternate version:
- if all of our children have been created ahead of time, we may want to
'''
def instantiate(self, child):
if child.parent is None:
child.parent = self
elif child.parent is not self:
raise RuntimeError(
'Cannot instantiate node with a node other than its parent!')
if child.world is None:
if not isinstance(child, Node):
raise RuntimeError(
'Cannot instantiate someting that is not an MCTS node.')
if child.action is None:
raise RuntimeError(
'Cannot instantiate a node with an empty action!')
action = child.action.getAction(self)
if action is None:
failed = True
else:
failed = False
new_world = self.world.fork(action)
child.world = new_world
child.state = child.world.actors[0].state
child.initialized = True
child.terminal = child.world.done or failed
child.rewards = [new_world.initial_reward]
child.reward += new_world.initial_reward
child.parent = self
child.prev_reward = self.prev_reward + self.reward
child.traj.append((self.world.actors[0].state, action))
child.action.update(child)
else:
raise RuntimeError(
'Cannot instantiate a node that already has been instantiated!')
@property
def ticks(self):
return self.world.ticks
'''
tick() to advance the state of the world associated with a particular
action or policy.
'''
def tick(self, action):
if len(self.children) is not 0:
raise RuntimeError(
'Cannot tick a node that has already been expanded!')
elif self.terminal:
raise RuntimeError('Tried to advance from a terminal state!')
else:
if action is None:
failed = True
else:
failed = False
# advance the current state of the world
(res, S0, A0, S1, F1, r) = self.world.tick(action)
self.traj.append((S0, A0))
self.rewards.append(r)
self.reward += r
self.terminal = self.world.done or failed
self.state = S1
return res and not failed, S0, A0, S1, F1, r
''' -----------------------------------------------------------------------
NOTE: these are helper functions for accessing and updating the world state
----------------------------------------------------------------------- '''
def features(self):
if self.world is None:
raise RuntimeError('node.instantiate() not called yet!')
return self.world.initial_features
| 33.252874
| 81
| 0.566367
|
2cbac16d3c0517f00cfe53f177d6717a71a2d97e
| 1,635
|
py
|
Python
|
module_factions.py
|
invisiblebob395/awefawe
|
42daf9d3ae06bcdb3b91973d94eed8bed1303e2b
|
[
"BSD-3-Clause"
] | 17
|
2015-01-19T07:53:57.000Z
|
2021-07-10T02:26:51.000Z
|
module_factions.py
|
qt911025/pw_module_system
|
dbd257e5231d16c47f17091a3ab18972be7687e5
|
[
"BSD-3-Clause"
] | 1
|
2017-08-31T03:55:09.000Z
|
2017-08-31T03:55:09.000Z
|
module_factions.py
|
qt911025/pw_module_system
|
dbd257e5231d16c47f17091a3ab18972be7687e5
|
[
"BSD-3-Clause"
] | 14
|
2015-05-03T05:20:01.000Z
|
2021-12-29T17:10:50.000Z
|
from header_factions import *
####################################################################################################################
# Each faction record contains the following fields:
# 1) Faction id: used for referencing factions in other files.
# The prefix fac_ is automatically added before each faction id.
# 2) Faction name.
# 3) Faction flags. See header_factions.py for a list of available flags
# 4) Faction coherence. Relation between members of this faction.
# 5) Relations. This is a list of relation records.
# Each relation record is a tuple that contains the following fields:
# 5.1) Faction. Which other faction this relation is referring to
# 5.2) Value: Relation value between the two factions.
# Values range between -1 and 1.
# 6) Ranks
# 7) Faction color (default is gray)
####################################################################################################################
# Adding more factions to this list will not work: many scripts are limited to 10 factions by the scene file format.
factions = [
("commoners","Commoners", 0, 0, [], [], 0x990099),
("outlaws","Outlaws", 0, 0, [], [], 0x775500),
("1","Red Faction", 0, 0, [], [], 0xDD0000),
("2","White Faction", 0, 0, [], [], 0xFFFFFF),
("3","Blue Faction", 0, 0, [], [], 0x3333FF),
("4","Green Faction", 0, 0, [], [], 0x00DD00),
("5","Yellow Faction", 0, 0, [], [], 0xCCAA44),
("6","Purple Faction", 0, 0, [], [], 0x663333),
("7","Orange Faction", 0, 0, [], [], 0x884411),
("8","Black Faction", 0, 0, [], [], 0x000000),
("factions_end","factions_end", 0, 0, [], []),
]
| 48.088235
| 116
| 0.545566
|
aa4c074f87300d5bfab6688794b519d2b8710174
| 241
|
py
|
Python
|
backend/urls.py
|
sauravsharmaz/Quote_app_DRF
|
3a634a7be95d1c812d69c98590963cec946d24bc
|
[
"MIT"
] | 1
|
2021-09-03T15:44:27.000Z
|
2021-09-03T15:44:27.000Z
|
backend/urls.py
|
sauravsharmaz/Quote_app_DRF
|
3a634a7be95d1c812d69c98590963cec946d24bc
|
[
"MIT"
] | null | null | null |
backend/urls.py
|
sauravsharmaz/Quote_app_DRF
|
3a634a7be95d1c812d69c98590963cec946d24bc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('API.urls')),
path('scrapper/',include('scrapper.urls')),
]
| 24.1
| 47
| 0.697095
|
b749aba6260cd374342434aa517543a36b619616
| 9,452
|
py
|
Python
|
nova/virt/hyperv/volumeops.py
|
bopopescu/nova-40
|
d8d5e4c4e30d0e605001ebab9f19005d2ea96f99
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/hyperv/volumeops.py
|
bopopescu/nova-40
|
d8d5e4c4e30d0e605001ebab9f19005d2ea96f99
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/hyperv/volumeops.py
|
bopopescu/nova-40
|
d8d5e4c4e30d0e605001ebab9f19005d2ea96f99
|
[
"Apache-2.0"
] | 1
|
2020-07-24T09:44:17.000Z
|
2020-07-24T09:44:17.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('volume_attach_retry_count',
default=10,
help='The number of times to retry to attach a volume'),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
help='Interval between volume attachment attempts, in seconds'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._initiator = None
self._default_root_device = 'vda'
def ebs_root_in_block_devices(self, block_device_info):
return self._volutils.volume_in_mapping(self._default_root_device,
block_device_info)
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def login_storage_targets(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self._login_storage_target(vol['connection_info'])
def _login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug(_("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s"),
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
else:
LOG.debug(_("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s"),
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s"),
{'connection_info': connection_info,
'instance_name': instance_name})
try:
self._login_storage_target(connection_info)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
#Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
#Attaching to the first slot
slot = 0
else:
#Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def _get_free_controller_slot(self, scsi_controller_path):
#Slots starts from 0, so the length of the disks gives us the free slot
return self._vmutils.get_attached_disks_count(scsi_controller_path)
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn):
LOG.debug(_("Logging off storage target %s"), target_iqn)
self._volutils.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s"),
{'connection_info': connection_info,
'instance_name': instance_name})
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug(_("Detaching physical disk from instance: %s"),
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
device_number = self._volutils.get_device_number_for_target(target_iqn,
target_lun)
if device_number is None:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s'),
{'device_number': device_number, 'target_lun': target_lun})
#Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
raise exception.NotFound(_('Unable to find a mounted disk '
'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
#Get the session_id of the ISCSI connection
session_id = self._volutils.get_session_id_from_mounted_disk(
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
| 42.38565
| 79
| 0.614473
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.