hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb734538ec2a9233cef1f9e991b5131a0b62c0dc
| 10,301
|
py
|
Python
|
flink-ai-flow/examples/python_examples/sklearn_stream_train_batch_predict/python_codes/stream_train_batch_predict_executor.py
|
shanshanpt/flink-ai-extended
|
c9f4a980ac229188a2bc09558952f7e0085bda70
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-03T05:37:21.000Z
|
2021-06-03T05:37:21.000Z
|
flink-ai-flow/examples/python_examples/sklearn_stream_train_batch_predict/python_codes/stream_train_batch_predict_executor.py
|
sentimentist/flink-ai-extended
|
689d000f2db8919fd80e0725a1609918ca4a26f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
flink-ai-flow/examples/python_examples/sklearn_stream_train_batch_predict/python_codes/stream_train_batch_predict_executor.py
|
sentimentist/flink-ai-extended
|
689d000f2db8919fd80e0725a1609918ca4a26f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import threading
import time
import shutil
from typing import List
import numpy as np
from joblib import dump, load
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
from streamz import Stream
import ai_flow as af
from ai_flow.model_center.entity.model_version_stage import ModelVersionStage
from python_ai_flow import FunctionContext, Executor, ExampleExecutor
from ai_flow.common.path_util import get_file_dir
def preprocess_data(x_data, y_data=None):
random_state = check_random_state(0)
permutation = random_state.permutation(x_data.shape[0])
if y_data is None:
return x_data[permutation]
else:
return x_data[permutation], y_data[permutation]
class ExampleTrainThread(threading.Thread):
"""Create stream training data"""
def __init__(self, stream_uri):
super().__init__()
self.stream_uri = stream_uri
self.stream = Stream()
def run(self) -> None:
for i in range(0, 5):
with np.load(self.stream_uri) as f:
x_train, y_train = f['x_train'], f['y_train']
self.stream.emit((x_train, y_train))
time.sleep(30)
class TrainExampleReader(ExampleExecutor):
def __init__(self):
super().__init__()
self.thread = None
def setup(self, function_context: FunctionContext):
stream_uri = function_context.node_spec.example_meta.stream_uri
self.thread = ExampleTrainThread(stream_uri)
self.thread.start()
def execute(self, function_context: FunctionContext, input_list: List) -> List:
return [self.thread.stream]
class TrainExampleTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
def transform(df):
x_train, y_train = preprocess_data(df[0], df[1])
x_train = x_train.reshape((x_train.shape[0], -1))
return StandardScaler().fit_transform(x_train), y_train
return [input_list[0].map(transform)]
class ModelTrainer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
print("### {}".format(self.__class__.__name__))
def train(df):
# https://scikit-learn.org/stable/auto_examples/linear_model/plot_sparse_logistic_regression_mnist.html
clf = LogisticRegression(C=50. / 5000, penalty='l1', solver='saga', tol=0.1)
x_train, y_train = df[0], df[1]
clf.fit(x_train, y_train)
model_path = get_file_dir(__file__) + '/saved_model'
if not os.path.exists(model_path):
os.makedirs(model_path)
model_timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
model_path = model_path + '/' + model_timestamp
dump(clf, model_path)
model = function_context.node_spec.output_model
print(model.name)
print(model_timestamp)
# When registering a model, corresponding type of event will be sent to downstream job as well.
af.register_model_version(model=model, model_path=model_path, current_stage=ModelVersionStage.GENERATED)
return df
def sink(df):
pass
input_list[0].map(train).sink(sink)
return []
class ValidateExampleReader(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
with np.load(function_context.node_spec.example_meta.stream_uri) as f:
x_test, y_test = f['x_test'], f['y_test']
return [[x_test, y_test]]
class ValidateTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
x_test, y_test = preprocess_data(input_list[0][0], input_list[0][1])
x_test = x_test.reshape((x_test.shape[0], -1))
return [[StandardScaler().fit_transform(x_test), y_test]]
class ModelValidator(Executor):
def __init__(self, artifact_name):
super().__init__()
self.artifact_name = artifact_name
self.model_name = None
self.model_path = None
self.model_version = None
self.model_meta = None
def setup(self, function_context: FunctionContext):
self.model_name = function_context.node_spec.model.name
self.model_meta = af.get_latest_generated_model_version(self.model_name)
self.model_path = self.model_meta.model_path
self.model_version = self.model_meta.version
def execute(self, function_context: FunctionContext, input_list: List) -> List:
deployed_model_version = af.get_deployed_model_version(model_name=self.model_name)
x_validate, y_validate = input_list[0][0], input_list[0][1]
clf = load(self.model_path)
scores = cross_val_score(clf, x_validate, y_validate, scoring='precision_macro')
stream_uri = af.get_artifact_by_name(self.artifact_name).stream_uri
if deployed_model_version is None:
with open(stream_uri, 'a') as f:
f.write('generated model version[{}] scores: {}\n'.format(self.model_version, np.mean(scores)))
af.update_model_version(model_name=self.model_name,
model_version=self.model_version,
current_stage=ModelVersionStage.VALIDATED)
else:
deployed_clf = load(deployed_model_version.model_path)
deployed_scores = cross_val_score(deployed_clf, x_validate, y_validate, scoring='precision_macro')
f = open(stream_uri, 'a')
f.write('current model version[{}] scores: {}\n'.format(deployed_model_version.version,
np.mean(deployed_scores)))
f.write('new generated model version[{}] scores: {}\n'.format(self.model_version, np.mean(scores)))
if np.mean(scores) > np.mean(deployed_scores):
# Make latest generated model to be validated
af.update_model_version(model_name=self.model_name,
model_version=self.model_version,
current_stage=ModelVersionStage.VALIDATED)
f.write('new generated model version[{}] pass validation.\n'.format(self.model_version))
else:
f.write('new generated model version[{}] fail validation.\n'.format(self.model_version))
f.close()
return []
class ModelPusher(Executor):
def __init__(self, artifact):
super().__init__()
self.artifact = artifact
def execute(self, function_context: FunctionContext, input_list: List) -> List:
model_name = function_context.node_spec.model.name
validated_model = af.get_latest_validated_model_version(model_name)
# Deprecate deployed model
deployed_model_version = af.get_deployed_model_version(model_name)
if deployed_model_version is not None:
af.update_model_version(model_name=model_name,
model_version=deployed_model_version.version,
current_stage=ModelVersionStage.DEPRECATED)
af.update_model_version(model_name=model_name,
model_version=validated_model.version,
current_stage=ModelVersionStage.DEPLOYED)
# Copy deployed model to deploy_model_dir
deployed_model_dir = af.get_artifact_by_name(self.artifact).stream_uri
if not os.path.exists(deployed_model_dir):
os.makedirs(deployed_model_dir)
for file in os.listdir(deployed_model_dir):
file_path = os.path.join(deployed_model_dir, file)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path, True)
deployed_model_version = af.get_deployed_model_version(model_name=model_name)
shutil.copy(deployed_model_version.model_path, deployed_model_dir)
return []
class PredictExampleReader(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
with np.load(function_context.node_spec.example_meta.batch_uri) as f:
x_test = f['x_test']
return [[x_test]]
class PredictTransformer(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
x_test = preprocess_data(input_list[0][0], None)
x_test = x_test.reshape((x_test.shape[0], -1))
return [[StandardScaler().fit_transform(x_test)]]
class ModelPredictor(Executor):
def __init__(self):
super().__init__()
def execute(self, function_context: FunctionContext, input_list: List) -> List:
model_name = function_context.node_spec.model.name
while af.get_deployed_model_version(model_name) is None:
time.sleep(2)
model_meta = af.get_deployed_model_version(model_name)
clf = load(model_meta.model_path)
return [clf.predict(input_list[0][0])]
class ExampleWriter(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
np.savetxt(function_context.node_spec.example_meta.batch_uri, input_list[0])
return []
| 41.704453
| 116
| 0.67275
|
be69f5d54594307d796b3ab6d29edb9ae1e3d037
| 27
|
py
|
Python
|
test_rational/__init__.py
|
daniel-dinu/rational-python
|
917ad8ff8b319a66a676aa84227ff76770f6277b
|
[
"MIT"
] | null | null | null |
test_rational/__init__.py
|
daniel-dinu/rational-python
|
917ad8ff8b319a66a676aa84227ff76770f6277b
|
[
"MIT"
] | null | null | null |
test_rational/__init__.py
|
daniel-dinu/rational-python
|
917ad8ff8b319a66a676aa84227ff76770f6277b
|
[
"MIT"
] | null | null | null |
__author__ = 'Daniel Dinu'
| 13.5
| 26
| 0.740741
|
5ddf37a58a616a620dbe66ad503c3548bd0d6ca8
| 259
|
py
|
Python
|
src/python_autocite/formatter/__init__.py
|
thenaterhood/python-autocite
|
e4930ff8240ed97733e0708af6946b1d05ca820e
|
[
"MIT"
] | 26
|
2017-06-21T01:12:59.000Z
|
2022-02-13T03:06:35.000Z
|
src/python_autocite/formatter/__init__.py
|
nutellaweera/python-autocite
|
2009cc8362a7d7a2c0e28ace2d9b36a2e713109a
|
[
"MIT"
] | 6
|
2020-02-07T17:18:14.000Z
|
2021-07-10T02:35:05.000Z
|
src/python_autocite/formatter/__init__.py
|
nutellaweera/python-autocite
|
2009cc8362a7d7a2c0e28ace2d9b36a2e713109a
|
[
"MIT"
] | 8
|
2019-12-15T16:42:21.000Z
|
2021-12-19T10:27:33.000Z
|
class CitationFormatter(object):
AUTHOR_UNKNOWN = "[[[AUTHORS]]]"
PUBDATE_UNKNOWN = "[[[PUBLICATION DATE]]]"
ACCESSDATE_UNKNOWN = "[[[ACCESS DATE]]]"
def format(citation):
raise NotImplementedError("Citation format not implemeted")
| 25.9
| 67
| 0.675676
|
8cd8dc2ab24a78fe237656a48f3720257150a62a
| 1,919
|
py
|
Python
|
wal_e/worker/s3/s3_deleter.py
|
paalkr/wal-e
|
ff6102a1d19cf0f683c7232f618dbe8d14e6e017
|
[
"BSD-3-Clause"
] | 2,739
|
2015-01-05T03:57:02.000Z
|
2022-03-22T10:51:38.000Z
|
wal_e/worker/s3/s3_deleter.py
|
0xgpapad/wal-e
|
f5b3e790fe10daa098b8cbf01d836c4885dc13c7
|
[
"BSD-3-Clause"
] | 264
|
2015-01-13T01:07:19.000Z
|
2022-02-08T00:56:14.000Z
|
wal_e/worker/s3/s3_deleter.py
|
0xgpapad/wal-e
|
f5b3e790fe10daa098b8cbf01d836c4885dc13c7
|
[
"BSD-3-Clause"
] | 245
|
2015-01-11T15:13:42.000Z
|
2022-02-10T10:58:09.000Z
|
import csv
import sys
from wal_e import exception
from wal_e import retries
from wal_e.worker.base import _Deleter
class Deleter(_Deleter):
@retries.retry(retries.critical_stop_exception_processor)
def _delete_batch(self, page):
# Check that all keys are in the same bucket; this code is not
# designed to deal with fast deletion of keys from multiple
# buckets at the same time, and not checking this could result
# in deleting similarly named keys from the wrong bucket.
#
# In wal-e's use, homogeneity of the bucket retaining the keys
# is presumed to be always the case.
bucket_name = page[0].bucket.name
for key in page:
if key.bucket.name != bucket_name:
raise exception.UserCritical(
msg='submitted keys are not part of the same bucket',
detail=('The clashing bucket names are {0} and {1}.'
.format(key.bucket.name, bucket_name)),
hint='This should be reported as a bug.')
bucket = page[0].bucket
result = bucket.delete_keys([key.name for key in page])
if result and hasattr(result, 'errors'):
if len(result.errors) > 0:
w_csv = csv.writer(sys.stdout, dialect='excel-tab')
w_csv.writerow(('key', 'error_code', 'error_message'))
for error in result.errors:
w_csv.writerow((error.key, error.code,
error.message.replace('\n', ' ')))
sys.stdout.flush()
raise exception.UserCritical(
msg='Some keys were not deleted',
detail=('Failed keys: first {0}, last {1}, {2} total'
.format(result.errors[0], result.errors[-1],
len(result.errors))))
| 42.644444
| 73
| 0.56592
|
f0990c49497509a33d496692fbe2c8607d885385
| 1,569
|
py
|
Python
|
develop/mongodb.py
|
adamwang0705/cross_media_affect_analysis
|
ff156e42914775231e20f0f5e2e2026d3a06791c
|
[
"MIT"
] | 1
|
2017-10-17T15:12:43.000Z
|
2017-10-17T15:12:43.000Z
|
develop/mongodb.py
|
adamwang0705/cross_media_affect_analysis
|
ff156e42914775231e20f0f5e2e2026d3a06791c
|
[
"MIT"
] | null | null | null |
develop/mongodb.py
|
adamwang0705/cross_media_affect_analysis
|
ff156e42914775231e20f0f5e2e2026d3a06791c
|
[
"MIT"
] | null | null | null |
"""
Get connection to (local) MongoDB database
"""
import pymongo
from pymongo import MongoClient
from pymongo import errors
def initialize(db_name: object, collection_name: object, host: object = 'localhost', port: object = 27017) -> object:
"""
Initialize connection to MongoDB database and get collection object
:param db_name:
:param collection_name:
:param host:
:param port:
:return: collection obj
"""
try:
client = MongoClient(host, port)
db = client[db_name]
collection = db[collection_name]
print('MongoDB on {}:{}/{}.{} connected successfully!'.format(host, port, db_name, collection_name))
return collection
except pymongo.errors.ConnectionFailure as e:
print('MongoDB on {}:{} connection failed: {}'.format(host, port, e))
def initialize_db(db_name: object, host: object = 'localhost', port: object = 27017) -> object:
"""
Initialize connection to MongoDB database and get database object
:param db_name:
:param collection_name:
:param host:
:param port:
:return: db obj
"""
try:
client = MongoClient(host, port)
db = client[db_name]
print('MongoDB on {}:{}/{} connected successfully!'.format(host, port, db_name))
return db
except pymongo.errors.ConnectionFailure as e:
print('MongoDB on {}:{} connection failed: {}'.format(host, port, e))
def test_connection():
print('Test MongoDB connection successful!')
if __name__ == '__main__':
test_connection()
| 29.055556
| 117
| 0.650096
|
9460cf356a142e5cd9d3470736459552e0124aa0
| 47
|
py
|
Python
|
comment/__init__.py
|
cxq1/c
|
52507017f676b4ebed561581ced0d4edf15cdc70
|
[
"MIT"
] | 1
|
2019-03-22T05:54:24.000Z
|
2019-03-22T05:54:24.000Z
|
comment/__init__.py
|
cxq1/c
|
52507017f676b4ebed561581ced0d4edf15cdc70
|
[
"MIT"
] | 4
|
2021-04-08T18:40:39.000Z
|
2021-06-10T17:40:34.000Z
|
comment/__init__.py
|
cxq1/c
|
52507017f676b4ebed561581ced0d4edf15cdc70
|
[
"MIT"
] | null | null | null |
default_app_config='comment.apps.CommentConfig'
| 47
| 47
| 0.893617
|
4cc270efbfc1330b86113d0459c56b44aaf577b6
| 12,955
|
py
|
Python
|
mfi_customization/mfi/doctype/task.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
mfi_customization/mfi/doctype/task.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
mfi_customization/mfi/doctype/task.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, bizmap technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.data import getdate,today
from frappe.model.mapper import get_mapped_doc
from frappe.permissions import add_user_permission
def validate(doc,method):
# machine_reading=""
for d in doc.get("current_reading"):
# machine_reading=d.machine_reading
if d.idx>1:
frappe.throw("More than one row not allowed")
last_reading=today()
if doc.asset and len(doc.get("last_readings"))==0:
doc.set("last_readings", [])
fltr={"project":doc.project,"asset":doc.asset,"reading_date":("<=",last_reading)}
# if machine_reading:
# fltr.update({"name":("!=",machine_reading)})
for d in frappe.get_all("Machine Reading",filters=fltr,fields=["name","reading_date","asset","black_and_white_reading","colour_reading","total","machine_type"],limit=1,order_by="reading_date desc,name desc"):
doc.append("last_readings", {
"date" : d.get('reading_date'),
"type" : d.get('machine_type'),
"asset":d.get('asset'),
"reading":d.get('black_and_white_reading'),
"reading_2":d.get('colour_reading'),
"total":( int(d.get('black_and_white_reading') or 0) + int(d.get('colour_reading') or 0))
})
set_field_values(doc)
if doc.get('__islocal'):
for d in frappe.get_all("Task",{"issue":doc.issue}):
frappe.throw("Task <b>{0}</b> Already Exist Against This Issue".format(doc.name))
else:
create_user_permission(doc)
def after_insert(doc,method):
if doc.get('issue'):
frappe.db.set_value('Issue',doc.get('issue'),'status','Assigned')
if doc.failure_date_and_time and doc.issue:
doc.failure_date_and_time=frappe.db.get_value("Issue",doc.issue,"failure_date_and_time")
if doc.issue:
doc.description=frappe.db.get_value("Issue",doc.issue,"description")
create_user_permission(doc)
# docperm = frappe.new_doc("DocShare")
# docperm.update({
# "user": doc.completed_by,
# "share_doctype": 'Task',
# "share_name": doc.name ,
# "read": 1,
# "write": 1
# })
# docperm.save(ignore_permissions=True)
def on_change(doc,method):
if doc.get("issue"):
set_reading_from_task_to_issue(doc)
validate_reading(doc)
existed_mr=[]
for d in doc.get('current_reading'):
existed_mr = frappe.get_all("Machine Reading",{"task":doc.name,"project":doc.project, 'row_id':d.get('name')}, 'name')
if existed_mr :
update_machine_reading(doc, existed_mr)
else:
create_machine_reading(doc)
if doc.issue and doc.status != 'Open':
frappe.db.set_value("Issue",doc.issue,'status',doc.status)
if doc.status == 'Completed':
validate_if_material_request_is_not_submitted(doc)
attachment_validation(doc)
issue=frappe.get_doc("Issue",doc.issue)
issue.status="Task Completed"
issue.closing_date_time=doc.completion_date_time
issue.set("task_attachments",[])
for d in doc.get("attachments"):
issue.append("task_attachments",{
"attach":d.attach
})
issue.save()
elif doc.status=="Working" and doc.attended_date_time:
frappe.db.set_value("Issue",doc.issue,'first_responded_on',doc.attended_date_time)
def after_delete(doc,method):
for t in frappe.get_all('Asset Repair',filters={'task':doc.name}):
frappe.delete_doc('Asset Repair',t.name)
def set_field_values(doc):
if doc.get("issue"):
issue = frappe.get_doc("Issue",{"name":doc.get("issue")})
if doc.get("completed_by"):
issue.assign_to = doc.get("completed_by")
if doc.get("assign_date"):
issue.assign_date = doc.get("assign_date")
issue.save()
@frappe.whitelist()
def make_material_req(source_name, target_doc=None):
def set_missing_values(source, target):
target.company=frappe.db.get_value("Employee",{"user_id":frappe.session.user},"company")
doclist = get_mapped_doc("Task", source_name, {
"Task": {
"doctype": "Material Request",
"name":"custom_task"
}
}, target_doc,set_missing_values )
return doclist
@frappe.whitelist()
def make_asset_movement(source_name, target_doc=None, ignore_permissions=False):
def set_missing_values(source, target):
customer = frappe.db.get_value("Task", source_name,'customer')
company = frappe.db.get_value("Project",{"customer":customer},'company')
target.purpose = "Transfer"
target.company = company
target.task=source_name
doclist = get_mapped_doc("Task", source_name, {
"Task": {
"doctype": "Asset Movement",
}
}, target_doc ,set_missing_values, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def set_readings(project,asset,target_doc=None):
reading_list=[]
for d in frappe.get_all('Asset Readings',filters={'parent':project,'asset':asset,'parenttype':'Project'},fields=['date','type','asset','reading','reading_2']):
reading_list.append({
'date':d.date,
'type':d.type,
'asset':d.asset,
'black_white':d.get("reading"),
'colour':d.get("reading_2")
})
return reading_list
def set_item_from_material_req(doc,method):
if doc.get('task_') and doc.status=="Issued":
task=frappe.get_doc('Task',doc.get('task_'))
items=[]
for t in task.get('refilled__items'):
items.append(t.item)
for d in doc.get('items'):
if d.get('item_code') not in items:
task.append("refilled__items", {
"item": d.get('item_code'),
"warehouse": d.get('warehouse'),
"qty": d.get('qty')
})
task.material_request=doc.name
task.save()
@frappe.whitelist()
def get_tech(doctype, txt, searchfield, start, page_len, filters):
tch_lst = []
fltr = {}
dct = {}
if txt:
fltr.update({"full_name": ("like", "{0}%".format(txt))})
for i in frappe.get_roles(filters.get("user")):
for ss in frappe.db.get_all('Support Setting Table',{'back_office_team_role':i},['technician_role','back_office_team_role']):
for usr in frappe.get_all('User',fltr,['name','full_name']):
if ss.get('technician_role') in frappe.get_roles(usr.get("name")) and not usr.get("name") == 'Administrator':
if usr.name not in tch_lst:
tch_lst.append(usr.name)
dct.update({usr.full_name:usr.name})
return [(y,d) for d,y in dct.items()]
@frappe.whitelist()
def check_material_request_status(task):
flag = False
for i in frappe.get_all('Material Request',{'task_':task},['status']):
if i.get('status') not in ['Stopped','Cancelled','Issued']:
flag = True
return flag
@frappe.whitelist()
def get_location(doctype, txt, searchfield, start, page_len, filters):
lst = []
fltr = {}
if txt:
fltr.update({"location": ("like", "{0}%".format(txt))})
for i in frappe.get_all('Project',filters,['name']):
fltr.update({'project':i.get('name')})
for a in frappe.get_all('Asset',fltr,['location']):
if a.location not in lst:
lst.append(a.location)
return [(d,) for d in lst]
@frappe.whitelist()
def get_asset_in_task(doctype, txt, searchfield, start, page_len, filters):
cond1 = ''
cond2 = ''
cond3 = ''
if txt:
cond3 = "and name = '{0}'".format(txt)
if filters.get("customer"):
cond2+="where customer ='{0}'".format(filters.get("customer"))
if filters.get("location"):
cond1+="and location='{0}'".format(filters.get("location"))
data = frappe.db.sql("""select asset from `tabAsset Serial No`
where asset IN (select name from
`tabAsset` where docstatus = 1 {0}
and project = (select name
from `tabProject` {1} {2}))
""".format(cond1,cond2,cond3))
return data
@frappe.whitelist()
def get_serial_no_list(doctype, txt, searchfield, start, page_len, filters):
if txt:
filters.update({"name": ("like", "{0}%".format(txt))})
return frappe.get_all("Asset Serial No",filters=filters,fields = ["name"], as_list=1)
@frappe.whitelist()
def get_serial_on_cust_loc(doctype, txt, searchfield, start, page_len, filters):
fltr1 = {}
fltr2 = {}
lst = []
if filters.get('customer'):
fltr1.update({'customer':filters.get('customer')})
if filters.get('location'):
fltr2.update({'location':filters.get('location')})
if txt:
fltr2.update({'serial_no':txt})
for i in frappe.get_all('Project',fltr1,['name']):
fltr2.update({'project':i.get('name'),'docstatus':1})
for j in frappe.get_all('Asset',fltr2,['serial_no']):
if j.serial_no not in lst:
lst.append(j.serial_no)
return [(d,) for d in lst]
@frappe.whitelist()
def get_asset_serial_on_cust(doctype, txt, searchfield, start, page_len, filters):
fltr = {}
asst = {}
lst = []
if filters.get('customer'):
fltr.update({'customer':filters.get('customer')})
if txt:
asst.update({'serial_no':("like", "{0}%".format(txt))})
# asst.update()
for i in frappe.get_all('Project',fltr,['name']):
asst.update({'project':i.get('name'),'docstatus':1})
for ass in frappe.get_all('Asset',asst,['serial_no']):
if ass.serial_no not in lst:
lst.append(ass.serial_no)
return [(d,) for d in lst]
@frappe.whitelist()
def get_customer(serial_no,asset):
project = frappe.get_value('Asset',{'serial_no':serial_no},'project')
customer = frappe.db.get_value('Project',{'name':project},'customer')
name = frappe.db.get_value('Customer',{'name':customer},'name')
return name
@frappe.whitelist()
def get_asset_on_cust(doctype, txt, searchfield, start, page_len, filters):
fltr = {}
asst = {}
lst = []
if filters.get('customer'):
fltr.update({'customer':filters.get('customer')})
if txt:
asst.update({'name':("like", "{0}%".format(txt))})
# asst.update()
for i in frappe.get_all('Project',fltr,['name']):
asst.update({'project':i.get('name'),'docstatus':1})
for ass in frappe.get_all('Asset',asst,['name']):
if ass.name not in lst:
lst.append(ass.name)
return [(d,) for d in lst]
def create_machine_reading(doc):
for d in doc.get('current_reading'):
if len(frappe.get_all("Machine Reading",{"task":doc.name,"project":doc.project,"asset":d.get('asset'),"reading_date":d.get('date')}))<1:
mr=frappe.new_doc("Machine Reading")
mr.reading_date=d.get('date')
mr.asset=d.get('asset')
mr.black_and_white_reading=d.get("reading")
mr.colour_reading=d.get("reading_2")
mr.machine_type=d.get('type')
mr.total=d.get("total")
mr.project=doc.project
mr.task=doc.name
mr.row_id = d.name
mr.save()
# d.machine_reading=mr.name
def update_machine_reading(doc, existed_mr):
for d in doc.get('current_reading'):
for mr in existed_mr:
mr_doc=frappe.get_doc("Machine Reading", mr)
mr_doc.reading_date=d.get('date')
mr_doc.asset=d.get('asset')
mr_doc.black_and_white_reading=d.get("reading")
mr_doc.colour_reading=d.get("reading_2")
mr_doc.machine_type=d.get('type')
mr_doc.total=d.get("total")
mr_doc.save()
def set_reading_from_task_to_issue(doc):
issue_doc=frappe.get_doc('Issue',{'name':doc.get("issue")})
for d in doc.get('current_reading'):
if issue_doc.get("current_reading") and len(issue_doc.get("current_reading"))>0:
for isu in doc.get("current_reading"):
isu.date=d.get('date')
isu.type=d.get('type')
isu.asset=d.get('asset')
isu.reading=d.get('reading')
isu.reading_2=d.get('reading_2')
issue_doc.save()
else:
issue_doc.append("current_reading",{
"date":d.get('date'),
"type":d.get('type'),
"asset":d.get('asset'),
"reading":d.get('reading'),
"reading_2":d.get('reading_2')
})
if doc.get("asset"):
issue_doc.asset = doc.get("asset")
if doc.get("serial_no"):
issue_doc.serial_no = doc.get("serial_no")
issue_doc.save()
def validate_reading(doc):
for cur in doc.get('current_reading'):
cur.total=( int(cur.get('reading') or 0) + int(cur.get('reading_2') or 0))
for lst in doc.get('last_readings'):
lst.total=( int(lst.get('reading') or 0) + int(lst.get('reading_2') or 0))
if int(lst.total)>int(cur.total):
frappe.throw("Current Reading Must be Greater than Last Reading")
if getdate(lst.date)>getdate(cur.date):
frappe.throw("Current Reading <b>Date</b> Must be Greater than Last Reading")
def validate_if_material_request_is_not_submitted(doc):
for mr in frappe.get_all("Material Request",{"task":doc.name,"docstatus":0}):
frappe.throw("Material Request is not completed yet. Name <b>{0}</b>".format(mr.name))
def attachment_validation(doc):
if not doc.attachments or len(doc.attachments)==0:
frappe.throw("Cann't Completed Task Without Attachment")
def create_user_permission(doc):
if len(frappe.get_all("User Permission",{"allow":"Task","for_value":doc.name,"user":doc.completed_by}))==0:
for d in frappe.get_all("User Permission",{"allow":"Task","for_value":doc.name}):
frappe.delete_doc("User Permission",d.name)
add_user_permission("Task",doc.name,doc.completed_by)
for emp in frappe.get_all("Employee",{"user_id":doc.completed_by},['material_request_approver']):
if emp.material_request_approver:
for emp2 in frappe.get_all("Employee",{"name":emp.material_request_approver},['user_id']):
if emp2.user_id:
add_user_permission("Task",doc.name,emp2.user_id)
| 33.825065
| 210
| 0.689772
|
deea1da4661bc78f99e8c156b5b4ed453a09ebd4
| 873
|
py
|
Python
|
api/tests/python/end_points/auto_analyse/_data/xpro_names/data_xp_xul.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 4
|
2018-10-05T23:41:05.000Z
|
2019-06-19T16:17:50.000Z
|
api/tests/python/end_points/auto_analyse/_data/xpro_names/data_xp_xul.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 635
|
2018-05-31T04:12:46.000Z
|
2022-03-31T18:45:42.000Z
|
api/tests/python/end_points/auto_analyse/_data/xpro_names/data_xp_xul.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 71
|
2018-05-14T20:47:55.000Z
|
2022-03-31T23:08:30.000Z
|
def data_for_contains_unclassifiable_word_request_test():
return {
'name': 'MOUNTAIN VIEW FOOD BLOGGINS ULC.',
'location': 'CA',
'entity_type_cd': 'XUL',
'request_action_cd': 'NEW'
}
def data_for_contains_words_to_avoid_request_test():
return {
'name': 'MOUNTAIN VIEW VSC ULC.',
'location': 'CA',
'entity_type_cd': 'XUL',
'request_action_cd': 'NEW'
}
def data_for_name_requires_consent_request_test():
return {
'name': 'MOUNTAIN VIEW FOOD ENGINEERING ULC.',
'location': 'CA',
'entity_type_cd': 'XUL',
'request_action_cd': 'NEW'
}
def data_for_corporate_name_conflict_request_test():
return {
'name': 'MOUNTAIN VIEW FOOD GROWERS ULC.',
'location': 'CA',
'entity_type_cd': 'XUL',
'request_action_cd': 'NEW'
}
| 24.942857
| 57
| 0.600229
|
5c514e4b5024204e00e0404aa7f37a9cbf18df17
| 428
|
py
|
Python
|
strings/phone_num.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
strings/phone_num.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
strings/phone_num.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
"""
https://www.codewars.com/kata/525f50e3b73515a6db000b83
Given an array of 10 ints, return a string with format of a phone num.
Example:
create_phone_number([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # => returns "(123) 456-7890"
"""
def create_phone_number(n):
# return "({0}{1}{2}) {3}{4}{5}-{6}{7}{8}{9}".format(*n)
return "({}{}{}) {}{}{}-{}{}{}{}".format(*n)
print(create_phone_number([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]))
| 25.176471
| 81
| 0.577103
|
c81095ba1c5e0c8a6afbf3b195b63e20c913f79e
| 2,355
|
py
|
Python
|
Fooling/utilities.py
|
alexborio/Projects
|
a85ad4aab370b009de14e3696e06aad92ca4859f
|
[
"MIT"
] | null | null | null |
Fooling/utilities.py
|
alexborio/Projects
|
a85ad4aab370b009de14e3696e06aad92ca4859f
|
[
"MIT"
] | null | null | null |
Fooling/utilities.py
|
alexborio/Projects
|
a85ad4aab370b009de14e3696e06aad92ca4859f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from layer import DenseLayer, ConvLayer, MaxPoolingLayer
import matplotlib.pyplot as plt
import os
class _Hook(tf.train.SessionRunHook):
def __init__(self, params_dict, is_training=False):
self.params_dict = params_dict
self.assign_ops = [] # list for assignment operations
self.assignment_performed = False # indicates wether weights have been loaded
self.is_training = is_training
self.train_vars = []
"""Append assignment ops to a graph = load trained weights and biases"""
def begin(self):
if (len(self.params_dict) > 0):
graph = tf.get_default_graph()
variables = graph._collections['trainable_variables']
for variable in variables:
if variable.name in self.params_dict:
self.assign_ops.append( variable.assign(self.params_dict[variable.name]))
"""Perform assignment operations"""
def before_run(self, run_context):
if (len(self.assign_ops) > 0 and not self.assignment_performed):
for op in self.assign_ops:
run_context.session.run(op)
self.assignment_performed = True
"""Save trained params into a dictionary provided"""
def end(self, session):
if self.is_training:
variables = session.graph._collections['trainable_variables']
for variable in variables:
self.params_dict.update({variable.name: session.run(variable)})
def make_dataset(train_data, batch_sz):
n_data = len(train_data[0])
images_data = np.reshape(train_data[0], [-1, 28, 28, 1]).astype(np.float32)
nb_classes = 10
labels_data = (np.eye(nb_classes)[train_data[1]]).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((images_data, labels_data))
capacity = n_data // batch_sz
return dataset, capacity
def calculate_accuracy(predictions, labels):
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
, dtype=tf.float32)
)
return accuracy
def corrupt_data(data, corruption_level=0.3):
data = np.array([x * (np.random.uniform(size=(28, 28)) < (1-corruption_level)) for x in data])
return data
| 34.130435
| 98
| 0.670064
|
6030b1bd8e41fe46c0ec553bc0deb40bc23224d4
| 24,350
|
py
|
Python
|
src/common/yolo/utils/general.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
src/common/yolo/utils/general.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
src/common/yolo/utils/general.py
|
wenksi/pren-robo-cube-ipcv
|
e2cf655a7e33aa63dae6e2b2a91abaa11d587f8f
|
[
"MIT"
] | null | null | null |
# YOLOv5 general utils
import glob
import logging
import math
import os
import platform
import re
import subprocess
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
import numpy as np
import pkg_resources as pkg
import torch
import torchvision
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
def set_logging(rank=-1, verbose=True):
logging.basicConfig(
format="%(message)s",
level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
def get_latest_run(search_dir='.'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def is_docker():
# Is environment a Docker container
return Path('/workspace').exists() # or Path('/.dockerenv').exists()
def is_colab():
# Is environment a Google Colab instance
try:
import google.colab
return True
except Exception as e:
return False
def emojis(str=''):
# Return platform-dependent emoji-safe version of string
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
def file_size(file):
# Return file size in MB
return Path(file).stat().st_size / 1e6
def check_online():
# Check internet connectivity
import socket
try:
socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
assert Path('.git').exists(), 'skipping check (not a git repository)'
assert not is_docker(), 'skipping check (Docker image)'
assert check_online(), 'skipping check (offline)'
cmd = 'git fetch && git config --get remote.origin.url'
url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
if n > 0:
s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
f"Use 'git pull' to update or 'git clone {url}' to download latest."
else:
s = f'up to date with {url} ✅'
print(emojis(s)) # emoji-safe
except Exception as e:
print(e)
def check_python(minimum='3.7.0', required=True):
# Check current python version vs. required python version
current = platform.python_version()
result = pkg.parse_version(current) >= pkg.parse_version(minimum)
if required:
assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed'
return result
def check_requirements(requirements='requirements.txt', exclude=()):
# Check installed dependencies meet requirements (pass *.txt file or list of packages)
prefix = colorstr('red', 'bold', 'requirements:')
check_python() # check python version
if isinstance(requirements, (str, Path)): # requirements.txt file
file = Path(requirements)
if not file.exists():
print(f"{prefix} {file.resolve()} not found, check failed.")
return
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
else: # list or tuple of packages
requirements = [x for x in requirements if x not in exclude]
n = 0 # number of packages updates
for r in requirements:
try:
pkg.require(r)
except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
n += 1
print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...")
print(subprocess.check_output(f"pip install '{r}'", shell=True).decode())
if n: # if packages updated
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def check_imshow():
# Check if environment supports image displays
try:
assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
cv2.imshow('test', np.zeros((1, 1, 3)))
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
return True
except Exception as e:
print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
return False
def check_file(file):
# Search for file if not found
if Path(file).is_file() or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
assert len(files), f'File Not Found: {file}' # assert file was found
assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
def check_dataset(dict):
# Download dataset if not found locally
val, s = dict.get('val'), dict.get('download')
if val and len(val):
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip
elif s.startswith('bash '): # bash script
print(f'Running {s} ...')
r = os.system(s)
else: # python script
r = exec(s) # return None
print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
# Multi-threaded file download and unzip function
def download_one(url, dir):
# Download 1 file
f = dir / Path(url).name # filename
if not f.exists():
print(f'Downloading {url} to {f}...')
if curl:
os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
else:
torch.hub.download_url_to_file(url, f, progress=True) # torch download
if unzip and f.suffix in ('.zip', '.gz'):
print(f'Unzipping {f}...')
if f.suffix == '.zip':
s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite
elif f.suffix == '.gz':
s = f'tar xfz {f} --directory {f.parent}' # unzip
if delete: # delete zip file after unzip
s += f' && rm {f}'
os.system(s)
dir = Path(dir)
dir.mkdir(parents=True, exist_ok=True) # make directory
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
pool.close()
pool.join()
else:
for u in tuple(url) if isinstance(url, str) else url:
download_one(u, dir)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def one_cycle(y1=0.0, y2=1.0, steps=100):
# lambda function for sinusoidal ramp from y1 to y2
return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
def colorstr(*input):
# Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
*args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
colors = {'black': '\033[30m', # basic colors
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'magenta': '\033[35m',
'cyan': '\033[36m',
'white': '\033[37m',
'bright_black': '\033[90m', # bright colors
'bright_red': '\033[91m',
'bright_green': '\033[92m',
'bright_yellow': '\033[93m',
'bright_blue': '\033[94m',
'bright_magenta': '\033[95m',
'bright_cyan': '\033[96m',
'bright_white': '\033[97m',
'end': '\033[0m', # misc
'bold': '\033[1m',
'underline': '\033[4m'}
return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
return y
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
# Convert normalized segments into pixel segments, shape (n,2)
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = w * x[:, 0] + padw # top left x
y[:, 1] = h * x[:, 1] + padh # top left y
return y
def segment2box(segment, width=640, height=640):
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
x, y = segment.T # segment xy
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
x, y, = x[inside], y[inside]
return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
def segments2boxes(segments):
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
boxes = []
for s in segments:
x, y = s.T # segment xy
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
return xyxy2xywh(np.array(boxes)) # cls, xywh
def resample_segments(segments, n=1000):
# Up-sample an (n,2) segment
for i, s in enumerate(segments):
x = np.linspace(0, len(s) - 1, n)
xp = np.arange(len(s))
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
return segments
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False):
# Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels
xyxy = torch.tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
xyxy = xywh2xyxy(b).long()
clip_coords(xyxy, im.shape)
crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])]
cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1])
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
suffix = path.suffix
path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # update path
dir = path if path.suffix == '' else path.parent # directory
if not dir.exists() and mkdir:
dir.mkdir(parents=True, exist_ok=True) # make directory
return path
| 41.766724
| 120
| 0.578686
|
aa1fbe2e4a6dc448ba0bf5b7a8eba868777e3c3c
| 1,100
|
py
|
Python
|
0066 Plus One.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | null | null | null |
0066 Plus One.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | null | null | null |
0066 Plus One.py
|
AtharvRedij/leetcode-solutions
|
7194d202302989d53c241b12c9befb06923b1510
|
[
"MIT"
] | 1
|
2021-03-06T06:15:48.000Z
|
2021-03-06T06:15:48.000Z
|
'''
URL: https://leetcode.com/problems/plus-one/
Difficulty: Easy
Description: Plus One
Given a non-empty array of decimal digits representing a non-negative integer, increment one to the integer.
The digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself.
Example 1:
Input: digits = [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: digits = [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
Example 3:
Input: digits = [0]
Output: [1]
Constraints:
1 <= digits.length <= 100
0 <= digits[i] <= 9
'''
class Solution:
def plusOne(self, digits):
carry = 1
for i in range(len(digits)-1, -1, -1):
s = digits[i] + carry
digits[i] = s % 10
carry = s//10
if carry == 0:
break
if carry > 0:
return [carry] + digits
return digits
| 19.298246
| 141
| 0.631818
|
1c04e272e2cad32ccf0aaabba47ce2e61da69124
| 846
|
py
|
Python
|
stft_core/utils/imports.py
|
lingyunwu14/STFT
|
1af5d26c1d27388ef8b143b1de5713d5da8eb787
|
[
"BSD-2-Clause"
] | 22
|
2021-07-09T12:42:33.000Z
|
2022-03-31T08:36:39.000Z
|
stft_core/utils/imports.py
|
lingyunwu14/STFT
|
1af5d26c1d27388ef8b143b1de5713d5da8eb787
|
[
"BSD-2-Clause"
] | 1
|
2021-10-05T06:19:13.000Z
|
2021-11-12T09:12:48.000Z
|
stft_core/utils/imports.py
|
lingyunwu14/STFT
|
1af5d26c1d27388ef8b143b1de5713d5da8eb787
|
[
"BSD-2-Clause"
] | 3
|
2021-07-09T12:42:55.000Z
|
2022-03-31T08:36:40.000Z
|
# Copyright (c) SenseTime Research and its affiliates. All Rights Reserved.
import torch
if torch._six.PY3:
import importlib
import importlib.util
import sys
# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module
else:
import imp
def import_file(module_name, file_path, make_importable=None):
module = imp.load_source(module_name, file_path)
return module
| 36.782609
| 168
| 0.735225
|
2bcaee12a1c430e910092c19013b0a0a3e20a884
| 107
|
py
|
Python
|
src/observer/Observer.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
src/observer/Observer.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | null | null | null |
src/observer/Observer.py
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
class Observer(object):
@classmethod
def update(cls, generator):
raise NotImplementedError
| 21.4
| 33
| 0.700935
|
501408a1244c4eebd2f395045ba64576c33da9ff
| 798
|
py
|
Python
|
taso-ae/onnx_test.py
|
hgl71964/PET
|
4cedb25c5dce0c49eebb693125235fc4ad1e26f8
|
[
"Apache-2.0"
] | 69
|
2021-06-01T03:19:12.000Z
|
2022-03-26T00:14:20.000Z
|
taso-ae/onnx_test.py
|
hgl71964/PET
|
4cedb25c5dce0c49eebb693125235fc4ad1e26f8
|
[
"Apache-2.0"
] | null | null | null |
taso-ae/onnx_test.py
|
hgl71964/PET
|
4cedb25c5dce0c49eebb693125235fc4ad1e26f8
|
[
"Apache-2.0"
] | 4
|
2021-07-10T07:21:11.000Z
|
2022-02-06T18:56:56.000Z
|
import taso
import onnx
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="Path to input ONNX file", required=True)
args = parser.parse_args()
#graph = taso.load_onnx("/home/ubuntu/taso/onnx/squeezenet1.1.onnx")
#graph = taso.load_onnx("/home/ubuntu/taso/onnx/bertsquad10.onnx")
graph = taso.load_onnx(args.file)
#graph = xflow.load("/home/ubuntu/resnext-101.onnx")
#graph = xflow.load("/home/ubuntu/ONNXModel/inception_v2/model.onnx")
print(" original_cost = {}".format(graph.cost()))
new_graph = taso.optimize(graph, alpha = 1.0, budget = 100, print_subst=True)
print("optimized_cost = {}".format(new_graph.cost()))
onnx_model = taso.export_onnx(new_graph)
onnx.checker.check_model(onnx_model)
onnx.save(onnx_model, "{}.taso.onnx".format(args.file))
| 36.272727
| 82
| 0.745614
|
80e62f604e2e8ea3c2ec13cf55291c7f5566c597
| 10,969
|
py
|
Python
|
tuframework/run/run_training_DP.py
|
Magnety/tuFramework_win
|
4c49e1fdb292aaf1945c81d5d9bc57db0eea42b3
|
[
"Apache-2.0"
] | null | null | null |
tuframework/run/run_training_DP.py
|
Magnety/tuFramework_win
|
4c49e1fdb292aaf1945c81d5d9bc57db0eea42b3
|
[
"Apache-2.0"
] | null | null | null |
tuframework/run/run_training_DP.py
|
Magnety/tuFramework_win
|
4c49e1fdb292aaf1945c81d5d9bc57db0eea42b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from tuframework.run.default_configuration import get_default_configuration
from tuframework.paths import default_plans_identifier
from tuframework.training.cascade_stuff.predict_next_stage import predict_next_stage
from tuframework.training.network_training.tuTrainer import tuframeworkTrainer
from tuframework.training.network_training.tuTrainerCascadeFullRes import tuframeworkTrainerCascadeFullRes
from tuframework.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("-gpus", help="number of gpus", required=True,type=int)
parser.add_argument("--dbs", required=False, default=False, action="store_true", help="distribute batch size. If "
"True then whatever "
"batch_size is in plans will "
"be distributed over DDP "
"models, if False then each "
"model will have batch_size "
"for a total of "
"GPUs*batch_size")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then tuframework will "
"export npz files of "
"predicted segmentations "
"in the vlaidation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing tuframework "
"you should enable this")
parser.add_argument("--valbest", required=False, default=False, action="store_true", help="")
parser.add_argument("--find_lr", required=False, default=False, action="store_true", help="")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files. Useful for development when you are "
"only interested in the results and want to save some disk space")
parser.add_argument("--disable_postprocessing_on_folds", required=False, action='store_true',
help="Running postprocessing on each fold only makes sense when developing with nnU-Net and "
"closely observing the model performance on specific configurations. You do not need it "
"when applying nnU-Net because the postprocessing for this will be determined only once "
"all five folds have been trained and tuframework_find_best_configuration is called. Usually "
"running postprocessing on each fold is computationally cheap, but some users have "
"reported issues with very large images. If your images are large (>600x600x600 voxels) "
"you should consider setting this flag.")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
disable_postprocessing_on_folds = args.disable_postprocessing_on_folds
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
find_lr = args.find_lr
num_gpus = args.gpus
fp32 = args.fp32
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, tuframeworkTrainerCascadeFullRes), "If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"tuframeworkTrainerCascadeFullRes"
else:
assert issubclass(trainer_class, tuframeworkTrainer), "network_trainer was found but is not derived from " \
"tuframeworkTrainer"
trainer = trainer_class(plans_file, fold, output_folder=output_folder_name,
dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage,
unpack_data=decompress_data, deterministic=deterministic,
distribute_batch_size=args.dbs, num_gpus=num_gpus, fp16=not fp32)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_final_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder,
run_postprocessing_on_folds=not disable_postprocessing_on_folds)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, dataset_directory+"/"+ trainer.plans['data_identifier'] + "_stage%d" % 1)
if __name__ == "__main__":
main()
| 59.939891
| 135
| 0.592944
|
c9182b716c55284ac11d6f83c3d2d9503b5f914d
| 11,503
|
py
|
Python
|
pdm/utils.py
|
orions-stardom/pdm
|
005e6ed1549330479c12d9152fab14c5f04e22cd
|
[
"MIT"
] | null | null | null |
pdm/utils.py
|
orions-stardom/pdm
|
005e6ed1549330479c12d9152fab14c5f04e22cd
|
[
"MIT"
] | null | null | null |
pdm/utils.py
|
orions-stardom/pdm
|
005e6ed1549330479c12d9152fab14c5f04e22cd
|
[
"MIT"
] | null | null | null |
"""
Utility functions
"""
import atexit
import functools
import importlib
import json
import os
import shutil
import subprocess
import tempfile
import urllib.parse as parse
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from distlib.wheel import Wheel
from pdm._types import Source
from pdm.models.pip_shims import (
InstallCommand,
InstallRequirement,
PackageFinder,
get_package_finder,
url_to_path,
)
try:
from functools import cached_property
except ImportError:
class cached_property:
def __init__(self, func):
self.func = func
self.attr_name = func.__name__
self.__doc__ = func.__doc__
def __get__(self, inst, cls=None):
if inst is None:
return self
if self.attr_name not in inst.__dict__:
inst.__dict__[self.attr_name] = self.func(inst)
return inst.__dict__[self.attr_name]
def prepare_pip_source_args(
sources: List[Source], pip_args: Optional[List[str]] = None
) -> List[str]:
if pip_args is None:
pip_args = []
if sources:
# Add the source to pip9.
pip_args.extend(["-i", sources[0]["url"]]) # type: ignore
# Trust the host if it's not verified.
if not sources[0].get("verify_ssl", True):
pip_args.extend(
["--trusted-host", parse.urlparse(sources[0]["url"]).hostname]
) # type: ignore
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(["--extra-index-url", source["url"]]) # type: ignore
# Trust the host if it's not verified.
if not source.get("verify_ssl", True):
pip_args.extend(
["--trusted-host", parse.urlparse(source["url"]).hostname]
) # type: ignore
return pip_args
def get_pypi_source():
"""Get what is defined in pip.conf as the index-url."""
install_cmd = InstallCommand()
options, _ = install_cmd.parser.parse_args([])
index_url = options.index_url
parsed = parse.urlparse(index_url)
verify_ssl = parsed.scheme == "https"
if any(parsed.hostname.startswith(host) for host in options.trusted_hosts):
verify_ssl = False
return index_url, verify_ssl
def get_finder(
sources: List[Source],
cache_dir: Optional[str] = None,
python_version: Optional[Tuple[int, int]] = None,
ignore_requires_python: bool = False,
) -> PackageFinder:
install_cmd = InstallCommand()
pip_args = prepare_pip_source_args(sources)
options, _ = install_cmd.parser.parse_args(pip_args)
if cache_dir:
options.cache_dir = cache_dir
finder = get_package_finder(
install_cmd=install_cmd,
options=options,
python_version=python_version,
ignore_requires_python=ignore_requires_python,
)
if not hasattr(finder, "session"):
finder.session = finder._link_collector.session
return finder
def create_tracked_tempdir(
suffix: Optional[str] = None, prefix: Optional[str] = "", dir: Optional[str] = None
) -> str:
name = tempfile.mkdtemp(suffix, prefix, dir)
os.makedirs(name, mode=0o777, exist_ok=True)
def clean_up():
shutil.rmtree(name, ignore_errors=True)
atexit.register(clean_up)
return name
def parse_name_version_from_wheel(filename: str) -> Tuple[str, str]:
w = Wheel(filename)
return w.name, w.version
def url_without_fragments(url: str) -> str:
return parse.urlunparse(parse.urlparse(url)._replace(fragment=""))
def is_readonly_property(cls, name):
"""Tell whether a attribute can't be setattr'ed."""
attr = getattr(cls, name, None)
return attr and isinstance(attr, property) and not attr.fset
def join_list_with(items: List[Any], sep: Any) -> List[Any]:
new_items = []
for item in items:
new_items.extend([item, sep])
return new_items[:-1]
def _wheel_supported(self, tags=None):
# Ignore current platform. Support everything.
return True
def _wheel_support_index_min(self, tags=None):
# All wheels are equal priority for sorting.
return 0
@contextmanager
def allow_all_wheels(enable: bool = True):
"""Monkey patch pip.Wheel to allow all wheels
The usual checks against platforms and Python versions are ignored to allow
fetching all available entries in PyPI. This also saves the candidate cache
and set a new one, or else the results from the previous non-patched calls
will interfere.
"""
from pdm.models.pip_shims import PipWheel
if not enable:
yield
return
original_wheel_supported = PipWheel.supported
original_support_index_min = PipWheel.support_index_min
PipWheel.supported = _wheel_supported
PipWheel.support_index_min = _wheel_support_index_min
yield
PipWheel.supported = original_wheel_supported
PipWheel.support_index_min = original_support_index_min
def find_project_root(cwd: str = ".", max_depth: int = 5) -> Optional[str]:
"""Recursively find a `pyproject.toml` at given path or current working directory.
If none if found, go to the parent directory, at most `max_depth` levels will be
looked for.
"""
original_path = Path(cwd).absolute()
path = original_path
for _ in range(max_depth):
if path.joinpath("pyproject.toml").exists():
return path.as_posix()
if path.parent == path:
# Root path is reached
break
path = path.parent
return None
@functools.lru_cache()
def get_python_version(executable, as_string=False):
"""Get the version of the Python interperter."""
args = [
executable,
"-c",
"import sys,json;print(json.dumps(tuple(sys.version_info[:3])))",
]
result = tuple(json.loads(subprocess.check_output(args)))
if not as_string:
return result
return ".".join(map(str, result))
def get_sys_config_paths(executable: str, vars=None) -> Dict[str, str]:
"""Return the sys_config.get_paths() result for the python interpreter"""
if not vars:
args = [
executable,
"-c",
"import sysconfig,json;print(json.dumps(sysconfig.get_paths()))",
]
return json.loads(subprocess.check_output(args))
else:
env = os.environ.copy()
env.update(SYSCONFIG_VARS=json.dumps(vars))
args = [
executable,
"-c",
"import os,sysconfig,json;print(json.dumps(sysconfig."
"get_paths(vars=json.loads(os.getenv('SYSCONFIG_VARS')))))",
]
return json.loads(subprocess.check_output(args, env=env))
def get_pep508_environment(executable: str) -> Dict[str, Any]:
script = importlib.import_module("pdm.pep508").__file__.rstrip("co")
args = [executable, script]
return json.loads(subprocess.check_output(args))
def convert_hashes(hashes: Dict[str, str]) -> Dict[str, List[str]]:
"""Convert Pipfile.lock hash lines into InstallRequirement option format.
The option format uses a str-list mapping. Keys are hash algorithms, and
the list contains all values of that algorithm.
"""
result = {}
for hash_value in hashes.values():
try:
name, hash_value = hash_value.split(":")
except ValueError:
name = "sha256"
setdefault(result, name, []).append(hash_value)
return result
def get_user_email_from_git() -> Tuple[str, str]:
"""Get username and email from git config.
Return empty if not configured or git is not found.
"""
git = shutil.which("git")
if not git:
return "", ""
try:
username = subprocess.check_output(
[git, "config", "user.name"], text=True
).strip()
except subprocess.CalledProcessError:
username = ""
try:
email = subprocess.check_output(
[git, "config", "user.email"], text=True
).strip()
except subprocess.CalledProcessError:
email = ""
return username, email
def add_ssh_scheme_to_git_uri(uri: str) -> str:
"""Cleans VCS uris from pip format"""
# Add scheme for parsing purposes, this is also what pip does
if "://" not in uri:
uri = "ssh://" + uri
parsed = parse.urlparse(uri)
if ":" in parsed.netloc:
netloc, _, path_start = parsed.netloc.rpartition(":")
path = "/{0}{1}".format(path_start, parsed.path)
uri = parse.urlunparse(parsed._replace(netloc=netloc, path=path))
return uri
def get_venv_python(root: Path) -> Optional[str]:
"""Get the python interpreter path of venv"""
if os.name == "nt":
suffix = ".exe"
scripts = "Scripts"
else:
suffix = ""
scripts = "bin"
venv = None
if "VIRTUAL_ENV" in os.environ:
venv = os.environ["VIRTUAL_ENV"]
else:
for possible_dir in ("venv", ".venv", "env"):
if (root / possible_dir / scripts / f"python{suffix}").exists():
venv = str(root / possible_dir)
break
if venv:
return os.path.join(venv, scripts, f"python{suffix}")
return None
@contextmanager
def atomic_open_for_write(filename: Union[Path, str], *, encoding: str = "utf-8"):
fd, name = tempfile.mkstemp("-atomic-write", "pdm-")
filename = str(filename)
try:
f = open(fd, "w", encoding=encoding)
yield f
except Exception:
f.close()
os.unlink(name)
raise
else:
f.close()
try:
os.unlink(filename)
except OSError:
pass
shutil.move(name, filename)
@contextmanager
def cd(path: str):
_old_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(_old_cwd)
@contextmanager
def temp_environ():
environ = os.environ.copy()
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
@contextmanager
def open_file(url, session=None):
if url.startswith("file://"):
local_path = url_to_path(url)
if os.path.isdir(local_path):
raise ValueError("Cannot open directory for read: {}".format(url))
else:
with open(local_path, "rb") as local_file:
yield local_file
else:
headers = {"Accept-Encoding": "identity"}
with session.get(url, headers=headers, stream=True) as resp:
try:
raw = getattr(resp, "raw", None)
result = raw if raw else resp
yield result
finally:
if raw:
conn = getattr(raw, "_connection")
if conn is not None:
conn.close()
result.close()
def populate_link(
finder: PackageFinder,
ireq: InstallRequirement,
upgrade: bool = False,
):
"""Populate ireq's link attribute"""
if not ireq.link:
link = finder.find_requirement(ireq, upgrade)
if not link:
return
link = getattr(link, "link", link)
ireq.link = link
def setdefault(document, key, value):
"""A compatiable dict.setdefault() for tomlkit data structures."""
if key not in document:
document[key] = value
return document[key]
| 29.646907
| 87
| 0.624533
|
956c4a43422ddfce6a5da22e216722e524db1c2c
| 1,482
|
py
|
Python
|
getheadshot.py
|
pierce403/nweb-agent
|
912803883a221eb45abc5a2c78366f52876fa8a9
|
[
"Apache-2.0"
] | null | null | null |
getheadshot.py
|
pierce403/nweb-agent
|
912803883a221eb45abc5a2c78366f52876fa8a9
|
[
"Apache-2.0"
] | null | null | null |
getheadshot.py
|
pierce403/nweb-agent
|
912803883a221eb45abc5a2c78366f52876fa8a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import os
# wkhtmltoimage --width 50 --quality 80 -f jpg <target> out.jpg
# vncsnapshot -quality 50 <target> out.jpg
def getheadshot(ip,rand, service):
# display hack, wkhtmltoimage doesn't like to run headless
# this requires you to run a vncserver or something
# os.environ["DISPLAY"]=':1'
FNULL=open(os.devnull, 'w') # open devnull to get rid of output
if service in ("vnc"):
print("[+] (%s) Attempting to take vnc snapshot" % rand)
process = subprocess.Popen(["vncsnapshot","-quality","50",ip,"data/nweb."+rand+ "." + service + ".headshot.jpg"], stdout=FNULL, stderr=FNULL)
try:
out, err = process.communicate(timeout=60)
if process.returncode == 0:
return True
except:
try:
print("[+] (%s) Killing slacker process" % rand)
process.kill()
except:
pass
if service in ("http", "https"):
print("[+] (%s) Attempting to take %s snapshot" % (rand, service))
process = subprocess.Popen(["wkhtmltoimage","--javascript-delay","3000","--width","800","--height","600","--quality","80","-f","jpg",service+"://"+ip,"data/nweb."+rand+"." + service + ".headshot.jpg"], stdout=FNULL, stderr=FNULL)
try:
out, err = process.communicate(timeout=60)
if process.returncode == 0:
return True
except:
try:
print("[+] (%s) Killing slacker process" % rand)
process.kill()
except:
pass
FNULL.close()
| 32.933333
| 233
| 0.614035
|
705f36eef62448a2d8075b038a84bccbb151fd16
| 8,085
|
py
|
Python
|
sim.py
|
Solomoriah/level1sim
|
f809c51a8caf305c027108a96efb9d655f651a91
|
[
"MIT"
] | null | null | null |
sim.py
|
Solomoriah/level1sim
|
f809c51a8caf305c027108a96efb9d655f651a91
|
[
"MIT"
] | null | null | null |
sim.py
|
Solomoriah/level1sim
|
f809c51a8caf305c027108a96efb9d655f651a91
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# sim.py -- Very simple simulator for first level BFRPG combat, used to test
# out the effects of house rules.
#
# Copyright 2019 Chris Gonnerman
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This program simulates a battle between a canonical group of first-level
adventurers (all human, one fighter, one cleric, one thief, one magic-user) and
a group of four goblins.
It is assumed that the fight takes place in a 10' wide corridor, and that the
fighter and cleric form the front line, soaking up all attacks by the goblins
as long as they survive. If either of them is killed, the thief moves up and
takes the place of the dead adventurer. The magic-user is only subject to
attack if two other adventurers are killed.
Ranged attacks can be assigned to characters or monsters, with limited
ammunition. Also, our magic-user has access to Magic Missile, which will be
cast in his or her first round.
Finally, surprise is ignored in this combat. Initiative is rolled for each
combatant, and reach is ignored (so that identical numbers always mean
simultaneous attacks).
"""
import random, sqlite3, os
def die(n, s, b):
return random.randint(n, s) + b
class Combatant:
def __init__(self):
self.hp = 0
self.curr_hp = 0
self.init = 0
self.damage = (1, 6, 0)
self.now_init = 0
self.ac = 11
self.spell = 0
self.ab = 1
self.description = "undefined"
self.missiles = 0
self.missileab = 0
self.missiledamage = (1, 4, 0)
def rollinit(self):
self.now_init = die(1, 6, self.init)
def attack(self, other):
attackroll = die(1, 20, self.ab)
if attackroll >= other.ac:
damage = die(*self.damage)
other.curr_hp -= damage
def ranged(self, other):
attackroll = die(1, 20, self.missileab)
if attackroll >= other.ac:
damage = die(*self.missiledamage)
other.curr_hp -= damage
self.missiles -= 1
def __lt__(self, other):
return self.now_init < other.now_init
def __le__(self, other):
return self.now_init <= other.now_init
def __gt__(self, other):
return self.now_init > other.now_init
def __ge__(self, other):
return self.now_init >= other.now_init
def __eq__(self, other):
return self.now_init == other.now_init
def reap(combatants):
newdead = []
i = 0
while i < len(combatants):
if combatants[i].curr_hp < 1:
newdead.append(combatants.pop(i))
else:
i += 1
return newdead
def runcombat(pcs, monsters):
dead = []
round = 0
while pcs and monsters:
round += 1
for pc in pcs:
pc.rollinit()
for monster in monsters:
monster.rollinit()
for init in range(9, -3, -1):
for i in range(len(pcs)):
if monsters and pcs[i].now_init == init:
if pcs[i].spell > 0:
monster = monsters[die(1, len(monsters), -1)]
monster.curr_hp -= die(1, 6, 1)
pcs[i].spell -= 1
elif i < 2:
pcs[i].attack(monsters[min(i, len(monsters)-1)])
elif pcs[i].missiles > 0 and len(monsters) > 2:
pcs[i].ranged(monsters[min(i, len(monsters)-1)])
for i in range(len(monsters)):
if pcs and monsters[i].now_init == init:
if i < 2:
monsters[i].attack(pcs[min(i, len(pcs)-1)])
elif monsters[i].missiles > 0 and len(pcs) > 2:
monsters[i].ranged(pcs[min(i, len(pcs)-1)])
dead += reap(pcs)
dead += reap(monsters)
winner = "tie"
if pcs:
winner = "pcs"
if monsters:
winner = "monsters"
pchp = 0
monsterhp = 0
pcdam = 0
monsterdam = 0
for pc in pcs:
pchp += pc.hp
pcdam += pc.curr_hp - pc.hp
for monster in monsters:
monsterhp += monster.hp
monsterdam += monster.curr_hp - monster.hp
return (winner, round, len(pcs), pchp, pcdam, len(monsters), monsterhp, monsterdam)
def pcsetup():
# allocate a fighter
ftr = Combatant()
ftr.description = "fighter"
ftr.damage = (1, 8, 1)
ftr.curr_hp = ftr.hp = die(1, 8, 0)
ftr.curr_hp = ftr.hp = max(die(1, 8, 0), die(1, 8, 0))
# ftr.curr_hp = ftr.hp = 8 # max hit points
ftr.ac = 16
ftr.ab = 2
# allocate a cleric
clr = Combatant()
clr.description = "cleric"
clr.damage = (1, 8, 0)
clr.curr_hp = clr.hp = die(1, 6, 0)
clr.curr_hp = clr.hp = max(die(1, 6, 0), die(1, 6, 0))
# clr.curr_hp = clr.hp = 6 # max hit points
clr.ac = 16
# allocate a thief
thf = Combatant()
thf.description = "thief"
thf.curr_hp = thf.hp = die(1, 4, 0)
thf.curr_hp = thf.hp = max(die(1, 4, 0), die(1, 4, 0))
# thf.curr_hp = thf.hp = 4 # max hit points
thf.ac = 14
thf.missiles = 5
thf.missileab = 2
thf.missiledamage = (1, 4, 0)
# allocate a magic-user
mag = Combatant()
mag.description = "magic-user"
mag.curr_hp = mag.hp = die(1, 4, 0)
mag.curr_hp = mag.hp = max(die(1, 4, 0), die(1, 4, 0))
# mag.curr_hp = mag.hp = 4 # max hit points
mag.ac = 11
mag.spell = 1
# mag.spell = 2 # bonus spells
mag.missiles = 5
mag.missileab = 1
# mag.missiles = 100 # for "arcane bolt" testing
# mag.missileab = 2 # for "arcane bolt" testing
mag.missiledamage = (1, 4, 0)
return [ ftr, clr, thf, mag ]
def csvformat(seq):
result = []
for item in seq:
if type(item) is type(""):
result.append('"%s"' % item)
else:
result.append(str(item))
return ",".join(result)
def monstersetup():
monsters = []
for i in range(4):
gob = Combatant()
gob.ac = 14
gob.curr_hp = gob.hp = max(1, die(1, 8, -1))
gob.description = "goblin"
monsters.append(gob)
return monsters
###############################################################################
# start of main program
try:
os.remove("sim.db")
except:
pass
db = sqlite3.connect("sim.db")
cursor = db.cursor()
cursor.execute("""
create table simulations (
winner text,
rounds integer,
pcs integer,
pchp integer,
pcdam integer,
monsters integer,
monsterhp integer,
monsterdam integer
);
""")
for i in range(10000):
pcs = pcsetup()
monsters = monstersetup()
results = runcombat(pcs, monsters)
# if pcs:
# monsters = monstersetup()
# results = runcombat(pcs, monsters)
cursor.execute("""
insert into simulations (winner, rounds, pcs, pchp, pcdam, monsters, monsterhp, monsterdam)
values (?, ?, ?, ?, ?, ?, ?, ?)
""", results)
db.commit()
# end of file.
| 28.772242
| 99
| 0.586518
|
4cebb7119d13df73a333664919a4a9d0b65fa023
| 2,641
|
py
|
Python
|
41.first-missing-positive.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
41.first-missing-positive.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
41.first-missing-positive.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# @lc app=leetcode id=41 lang=python
#
# [41] First Missing Positive
#
# https://leetcode.com/problems/first-missing-positive/description/
#
# algorithms
# Hard (29.46%)
# Likes: 1957
# Dislikes: 625
# Total Accepted: 234.9K
# Total Submissions: 795.2K
# Testcase Example: '[1,2,0]'
#
# Given an unsorted integer array, find the smallest missing positive integer.
#
# Example 1:
#
#
# Input: [1,2,0]
# Output: 3
#
#
# Example 2:
#
#
# Input: [3,4,-1,1]
# Output: 2
#
#
# Example 3:
#
#
# Input: [7,8,9,11,12]
# Output: 1
#
#
# Note:
#
# Your algorithm should run in O(n) time and uses constant extra space.
#
#
class Solution(object):
def _firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# MemoryError
# [2147483647]
if not nums:
return 1
zeros = [0] * (max(nums)+2)
for num in nums:
if num > 0:
zeros[num] = 1
for i in range(1, len(zeros)):
if zeros[i] != 1:
return i
def __firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
for i in range(1, len(nums)+2):
if i not in nums:
return i
def ___firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
zeros = [0] * (len(nums)+2)
for num in nums:
if num < 0 or num > len(nums):
continue
zeros[num] = 1
for i in range(1, len(zeros)):
if zeros[i] != 1:
return i
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 1
for i in nums:
if i == 1:
break
else:
return 1
for i in range(len(nums)):
if nums[i] <= 0 or nums[i] > len(nums):
nums[i] = 1
for i in range(len(nums)):
nums[abs(nums[i])-1] = -abs(nums[abs(nums[i])-1])
for i in range(len(nums)):
if nums[i] > 0:
return i + 1
return i+2
# if __name__ == '__main__':
# s = Solution()
# print s.firstMissingPositive([])
# print s.firstMissingPositive([0])
# print s.firstMissingPositive([1])
# print s.firstMissingPositive([5])
# print s.firstMissingPositive([1,2,0])
# print s.firstMissingPositive([3,4,-1,1])
# print s.firstMissingPositive([7,8,9,11,12])
| 20.960317
| 78
| 0.497917
|
aad34887d8b0fb2324debacf4900b8e30f84f779
| 13,790
|
py
|
Python
|
bclaw_runner/tests/test_repo.py
|
jack-e-tabaska/BayerCLAW
|
7e94777f4a5b155a3beead625c460117ffcb56af
|
[
"BSD-3-Clause"
] | 7
|
2021-06-25T18:14:08.000Z
|
2022-03-20T18:27:29.000Z
|
bclaw_runner/tests/test_repo.py
|
jack-e-tabaska/BayerCLAW
|
7e94777f4a5b155a3beead625c460117ffcb56af
|
[
"BSD-3-Clause"
] | 1
|
2022-03-30T20:15:44.000Z
|
2022-03-30T20:15:44.000Z
|
bclaw_runner/tests/test_repo.py
|
jack-e-tabaska/BayerCLAW
|
7e94777f4a5b155a3beead625c460117ffcb56af
|
[
"BSD-3-Clause"
] | 3
|
2021-06-07T18:26:24.000Z
|
2021-12-02T17:59:04.000Z
|
from contextlib import closing
import jmespath
import json
import os
import boto3
import moto
import pytest
from ..src.runner.repo import _is_glob, _s3_file_exists, _expand_s3_glob, _inputerator, _download_this, _outputerator, \
_upload_that, Repository
TEST_BUCKET = "test-bucket"
JOB_DATA = {"job": "data"}
FILE1_CONTENT = "file one"
FILE2_CONTENT = "file two"
FILE3_CONTENT = "file three"
OTHER_FILE_CONTENT = "other file"
DIFFERENT_BUCKET = "different-bucket"
DIFFERENT_FILE_CONTENT = "different file"
@pytest.fixture(scope="module")
def repo_bucket():
with moto.mock_s3():
yld = boto3.resource("s3", region_name="us-east-1").Bucket(TEST_BUCKET)
yld.create()
yld.put_object(Key="repo/path/_JOB_DATA_", Body=json.dumps(JOB_DATA).encode("utf-8"))
yld.put_object(Key="repo/path/file1", Body=FILE1_CONTENT.encode("utf-8"))
yld.put_object(Key="repo/path/file2", Body=FILE2_CONTENT.encode("utf-8"))
yld.put_object(Key="repo/path/file3", Body=FILE3_CONTENT.encode("utf-8"))
yld.put_object(Key="repo/path/other_file", Body=OTHER_FILE_CONTENT.encode("utf-8"))
yld.put_object(Key="repo/path/_control_/test_step.complete", Body=b"")
yield yld
@pytest.fixture(scope="module")
def different_bucket():
with moto.mock_s3():
boto3.client("s3", region_name="us-east-1").create_bucket(Bucket=DIFFERENT_BUCKET)
yld = boto3.resource("s3", region_name="us-east-1").Bucket(DIFFERENT_BUCKET)
yld.put_object(Key="different/path/different_file", Body=DIFFERENT_FILE_CONTENT.encode("utf-8"))
yield yld
@pytest.mark.parametrize("path, expect", [
("st*r", True),
("/question/mark?", True),
("/character/[set]", True),
("/not/a/glob", False),
])
def test_is_glob(path, expect):
result = _is_glob(path)
assert result == expect
@pytest.mark.parametrize("key, expect", [
("repo/path/file1", True),
("repo/path/file99", False),
])
def test_s3_file_exists(key, expect, repo_bucket):
result = _s3_file_exists(key, TEST_BUCKET)
assert result == expect
@pytest.mark.parametrize("glob, expect", [
("file*", ["file1", "file2", "file3"]),
("file?", ["file1", "file2", "file3"]),
("file[12]", ["file1", "file2"]),
("*file*", ["file1", "file2", "file3", "other_file"]),
("nothing*", []),
])
def test_expand_s3_glob(repo_bucket, glob, expect):
ext_glob = f"s3://{TEST_BUCKET}/repo/path/{glob}"
result = sorted(list(_expand_s3_glob(ext_glob)))
ext_expect = [f"s3://{TEST_BUCKET}/repo/path/{x}" for x in expect]
assert result == ext_expect
def test_inputerator(repo_bucket):
boto3.client("s3", region_name="us-east-1").create_bucket(Bucket=DIFFERENT_BUCKET)
different_bucket = boto3.resource("s3", region_name="us-east-1").Bucket(DIFFERENT_BUCKET)
different_bucket.put_object(Key="different/path/different_file", Body=DIFFERENT_FILE_CONTENT.encode("utf-8"))
paths = [
"s3://test-bucket/repo/path/file*",
"s3://different-bucket/path/different_file",
]
result = sorted(list(_inputerator(paths)))
expect = sorted([
"s3://test-bucket/repo/path/file1",
"s3://test-bucket/repo/path/file2",
"s3://test-bucket/repo/path/file3",
"s3://different-bucket/path/different_file",
])
assert result == expect
@pytest.mark.parametrize("optional", [True, False])
def test_download_this(optional, tmp_path, repo_bucket):
os.chdir(tmp_path)
_download_this("s3://test-bucket/repo/path/file1", optional)
expected_file = tmp_path / "file1"
assert os.path.isfile(expected_file)
with expected_file.open() as fp:
line = fp.readline()
assert line == FILE1_CONTENT
def test_download_this_missing_required_file(tmp_path, repo_bucket):
target = "s3://test-bucket/repo/path/file99"
os.chdir(tmp_path)
with pytest.raises(RuntimeError, match=f"download failed: {target}"):
_download_this(target, False)
def test_download_this_missing_optional_file(tmp_path, repo_bucket, caplog):
target = "s3://test-bucket/repo/path/file99"
os.chdir(tmp_path)
_download_this(target, True)
unexpected_file = tmp_path / "file99"
assert "optional file not found" in caplog.text
assert os.path.exists(unexpected_file) is False
def test_outputerator(tmp_path, caplog):
filenames = "output1 output2 output3 other_thing ignore_me".split()
for filename in filenames:
file = tmp_path / filename
file.open("w").close()
request = ["output*", "other_thing", "non_thing*"]
os.chdir(tmp_path)
result = sorted(list(_outputerator(request)))
expect = sorted("output1 output2 output3 other_thing".split())
assert result == expect
assert caplog.messages[0] == "no file matching 'non_thing*' found in workspace"
def test_upload_that(monkeypatch, tmp_path, repo_bucket):
monkeypatch.setenv("BC_EXECUTION_ID", "ELVISLIVES")
target_file = tmp_path / "output1"
with target_file.open("w") as fp:
print("target file", file=fp)
_upload_that(str(target_file.absolute()), TEST_BUCKET, "repo/path")
chek = repo_bucket.Object("repo/path/output1").get()
expected_metadata = {"execution_id": "ELVISLIVES"}
assert chek["Metadata"] == expected_metadata
with closing(chek["Body"]) as fp:
line = next(fp)
assert line == "target file\n".encode("utf-8")
def test_upload_that_missing_file(tmp_path, caplog, repo_bucket):
target_file = tmp_path / "missing"
_upload_that(str(target_file.absolute()), TEST_BUCKET, "repo/path")
assert f"{target_file} not found; skipping upload" in caplog.text
def test_upload_that_fail(tmp_path, repo_bucket):
target_file = tmp_path / "outputx"
with target_file.open("w") as fp:
print("target file", file=fp)
with pytest.raises(RuntimeError, match="upload failed: outputx -> s3://unbucket/repo/path/outputx"):
_upload_that(str(target_file.absolute()), "unbucket", "repo/path")
def test_repository(monkeypatch):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
path = "s3://test-bucket/repo/path"
repo = Repository(path)
assert repo.full_path == path
assert repo.bucket == "test-bucket"
assert repo.prefix == "repo/path"
assert repo.run_status_obj == "repo/path/_control_/test_step.complete"
def test_read_job_data(monkeypatch, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
job_data = repo.read_job_data()
assert job_data == JOB_DATA
@pytest.mark.parametrize("name, expect", [
("file1", f"s3://{TEST_BUCKET}/repo/path/file1"),
("s3://some/path/to/a/file", "s3://some/path/to/a/file"),
])
def test_add_s3_path(monkeypatch, name, expect):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
result = repo.add_s3_path(name)
assert result == expect
@pytest.mark.parametrize("files, expect", [
(["file1", "file2", "file3"], True),
(["file1", "file99", "file3"], False),
(["file1", "file*", "file3"], False),
([], True),
])
def test_files_exist(monkeypatch, repo_bucket, files, expect):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
result = repo.files_exist(files)
assert result == expect
@pytest.mark.parametrize("optional", [True, False])
def test_download_inputs(optional, monkeypatch, tmp_path, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
boto3.client("s3", region_name="us-east-1").create_bucket(Bucket=DIFFERENT_BUCKET)
different_bucket = boto3.resource("s3", region_name="us-east-1").Bucket(DIFFERENT_BUCKET)
different_bucket.put_object(Key="different/path/different_file", Body=DIFFERENT_FILE_CONTENT.encode("utf-8"))
file_spec = {
"files": "file*",
"other_file": "other_file",
"different_file": f"s3://{DIFFERENT_BUCKET}/different/path/different_file"
}
os.chdir(tmp_path)
result = repo.download_inputs(file_spec, optional)
expect = {
"files": "file*",
"other_file": "other_file",
"different_file": "different_file"
}
for filename in "file1 file2 file3 other_file different_file".split():
chek = tmp_path / filename
assert chek.exists()
assert result == expect
def test_download_inputs_missing_required_file(monkeypatch, tmp_path, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
boto3.client("s3", region_name="us-east-1").create_bucket(Bucket=DIFFERENT_BUCKET)
different_bucket = boto3.resource("s3", region_name="us-east-1").Bucket(DIFFERENT_BUCKET)
different_bucket.put_object(Key="different/path/different_file", Body=DIFFERENT_FILE_CONTENT.encode("utf-8"))
file_spec = {
"files": "file*",
"other_file": "other_file",
"different_file": f"s3://{DIFFERENT_BUCKET}/different/path/different_file",
"missing_file": f"s3://{DIFFERENT_BUCKET}/missing_path/missing_file"
}
os.chdir(tmp_path)
with pytest.raises(RuntimeError, match=f"download failed: {file_spec['missing_file']}"):
_ = repo.download_inputs(file_spec, False)
def test_download_inputs_missing_optional_file(monkeypatch, tmp_path, caplog, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
boto3.client("s3", region_name="us-east-1").create_bucket(Bucket=DIFFERENT_BUCKET)
different_bucket = boto3.resource("s3", region_name="us-east-1").Bucket(DIFFERENT_BUCKET)
different_bucket.put_object(Key="different/path/different_file", Body=DIFFERENT_FILE_CONTENT.encode("utf-8"))
file_spec = {
"files": "file*",
"other_file": "other_file",
"different_file": f"s3://{DIFFERENT_BUCKET}/different/path/different_file",
"missing_file": f"s3://{DIFFERENT_BUCKET}/missing_path/missing_file"
}
os.chdir(tmp_path)
result = repo.download_inputs(file_spec, True)
expect = {
"files": "file*",
"other_file": "other_file",
"different_file": "different_file",
"missing_file": "missing_file",
}
assert result == expect
assert f"optional file not found: {file_spec['missing_file']}; skipping" in caplog.text
assert os.path.exists(tmp_path / "file1")
assert os.path.exists(tmp_path / "file2")
assert os.path.exists(tmp_path / "file3")
assert os.path.exists(tmp_path / "other_file")
assert os.path.exists(tmp_path / "different_file")
assert os.path.exists(tmp_path / "missing_file") is False
@pytest.mark.parametrize("optional", [True, False])
def test_download_inputs_empty_inputs(optional, monkeypatch, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
file_spec = {}
result = repo.download_inputs(file_spec, optional)
assert len(result) == 0
def test_upload_outputs(monkeypatch, tmp_path, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo_two/path")
for output_filename in "output1 output2 output3 other_output".split():
file = tmp_path / output_filename
with file.open("w") as fp:
print(output_filename, file=fp)
file_spec = {
"outputs": "output*",
"other_output": "other_output",
"missing_file": "missing_file",
}
os.chdir(tmp_path)
repo.upload_outputs(file_spec)
repo_objects = boto3.client("s3", region_name="us-east-1").list_objects_v2(Bucket=TEST_BUCKET, Prefix="repo_two/path")
repo_contents = sorted(jmespath.search("Contents[].Key", repo_objects))
expect = sorted([
"repo_two/path/output1",
"repo_two/path/output2",
"repo_two/path/output3",
"repo_two/path/other_output",
])
assert repo_contents == expect
def test_upload_outputs_fail(monkeypatch, tmp_path, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://unbucket/repo_x/path")
for output_filename in "output1 output2".split():
file = tmp_path / output_filename
with file.open("w") as fp:
print(output_filename, file=fp)
file_spec = {
"outputs": "output*",
}
os.chdir(tmp_path)
with pytest.raises(RuntimeError, match=f"upload failed:"):
repo.upload_outputs(file_spec)
def test_upload_outputs_empty_outputs(monkeypatch, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
file_spec = {}
repo.upload_outputs(file_spec)
@pytest.mark.parametrize("step_name, expect", [
("test_step", True),
("non_step", False),
])
def test_check_for_previous_run(monkeypatch, repo_bucket, step_name, expect):
monkeypatch.setenv("BC_STEP_NAME", step_name)
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
result = repo.check_for_previous_run()
assert result == expect
def test_clear_run_status(monkeypatch, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
assert repo.check_for_previous_run() is True
repo.clear_run_status()
assert repo.check_for_previous_run() is False
def test_put_run_status(monkeypatch, repo_bucket):
monkeypatch.setenv("BC_STEP_NAME", "test_step_two")
repo = Repository(f"s3://{TEST_BUCKET}/repo/path")
assert repo.check_for_previous_run() is False
repo.put_run_status()
assert repo.check_for_previous_run() is True
| 34.823232
| 122
| 0.688905
|
d5c8dd59ed765641b96b42829fdf866676ae63b2
| 1,170
|
py
|
Python
|
plugins/misp/komand_misp/actions/add_attachment/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/misp/komand_misp/actions/add_attachment/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/misp/komand_misp/actions/add_attachment/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import AddAttachmentInput, AddAttachmentOutput
# Custom imports below
import shutil
import tempfile
import base64
class AddAttachment(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="add_attachment",
description="Add attachment to event",
input=AddAttachmentInput(),
output=AddAttachmentOutput(),
)
def run(self, params={}):
attachment = params.get("attachment")
filename = params.get("filename")
path = tempfile.mkdtemp() + "/"
fname = "tmp.txt"
with open(path + fname, "w") as f:
f.write(base64.b64decode(attachment).decode("utf-8"))
client = self.connection.client
in_event = client.get_event(params.get("event"))
out = client.add_attachment(in_event, attachment=path + fname, filename=filename)
self.logger.info(out)
shutil.rmtree(path)
return {"status": True}
def test(self):
client = self.connection.client
output = client.test_connection()
self.logger.info(output)
return {"status": True}
| 28.536585
| 89
| 0.626496
|
de0825bfc4255fe02b69595ea4429b75091b6cb8
| 23
|
py
|
Python
|
__init__.py
|
cove9988/git-talk
|
9f549d8565948a150834bcaa704b55ae15c094c1
|
[
"MIT"
] | 5
|
2020-04-06T11:00:27.000Z
|
2020-09-30T15:16:56.000Z
|
__init__.py
|
ggdrg/git-talk
|
89ed00caa6a426ea9d5fa84cbef588d07aebc1f0
|
[
"MIT"
] | 3
|
2020-09-26T02:53:30.000Z
|
2020-10-09T01:46:37.000Z
|
__init__.py
|
ggdrg/git-talk
|
89ed00caa6a426ea9d5fa84cbef588d07aebc1f0
|
[
"MIT"
] | 1
|
2020-09-25T23:41:54.000Z
|
2020-09-25T23:41:54.000Z
|
__version__ = '1.6.5'
| 11.5
| 22
| 0.608696
|
6b73397140a04b27ae38d74a8e1615d4aa193b33
| 1,069
|
py
|
Python
|
venv/lib/python3.6/site-packages/pelican/tests/test_rstdirectives.py
|
Patrisha-de-Boon/CMPUT404-Lab5
|
cc235c56d707a96345e1093c8f346eee78e3644d
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.6/site-packages/pelican/tests/test_rstdirectives.py
|
Patrisha-de-Boon/CMPUT404-Lab5
|
cc235c56d707a96345e1093c8f346eee78e3644d
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.6/site-packages/pelican/tests/test_rstdirectives.py
|
Patrisha-de-Boon/CMPUT404-Lab5
|
cc235c56d707a96345e1093c8f346eee78e3644d
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import Mock
from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase):
def call_it(self, text):
from pelican.rstdirectives import abbr_role
rawtext = text
lineno = 42
inliner = Mock(name='inliner')
nodes, system_messages = abbr_role(
'abbr', rawtext, text, lineno, inliner)
self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1)
return nodes[0]
def test(self):
node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr")
self.assertEqual(node['explanation'], "Abbreviation")
def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL")
self.assertEqual(node['explanation'], "See you\nlater")
def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA")
self.assertEqual(node['explanation'], "USA")
| 33.40625
| 63
| 0.63985
|
e271921fa859350679be5ed5d3b8ca62da266349
| 3,748
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
wargo32/DOTCoin
|
24e9c5e5a547a6b782a8b4fe1fdb51f6a2a7e6fa
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
wargo32/DOTCoin
|
24e9c5e5a547a6b782a8b4fe1fdb51f6a2a7e6fa
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
wargo32/DOTCoin
|
24e9c5e5a547a6b782a8b4fe1fdb51f6a2a7e6fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):19745$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| 31.495798
| 186
| 0.580843
|
8e1f0c83aa9d0c9f4c0b27c6d8a6a1350fbcfaf6
| 666
|
py
|
Python
|
aiida_phonopy/utils/resources.py
|
bastonero/aiida-phonopy
|
0202372dc3e877c07d1990b9669468dc200e5e0c
|
[
"MIT"
] | 1
|
2018-06-22T21:23:55.000Z
|
2018-06-22T21:23:55.000Z
|
aiida_phonopy/utils/resources.py
|
bastonero/aiida-phonopy
|
0202372dc3e877c07d1990b9669468dc200e5e0c
|
[
"MIT"
] | 4
|
2018-02-09T02:23:37.000Z
|
2018-05-22T13:37:54.000Z
|
aiida_phonopy/utils/resources.py
|
bastonero/aiida-phonopy
|
0202372dc3e877c07d1990b9669468dc200e5e0c
|
[
"MIT"
] | 4
|
2018-02-08T02:54:47.000Z
|
2018-07-04T08:23:38.000Z
|
# -*- coding: utf-8 -*-
"""Utilities for CalcJob resources."""
def get_default_options(max_num_machines=1, max_wallclock_seconds=300, with_mpi=False):
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
"resources": {"num_machines": int(max_num_machines)},
"max_wallclock_seconds": int(max_wallclock_seconds),
"withmpi": with_mpi,
}
| 39.176471
| 107
| 0.717718
|
522b02b7dd4ce6c18e49eaf00929454e9b9ac5b8
| 1,939
|
py
|
Python
|
Configuration/Theme.py
|
olmedoluis/pix
|
872fc75a3cef0d8cb152b1565a831874b9fd3fb5
|
[
"MIT"
] | null | null | null |
Configuration/Theme.py
|
olmedoluis/pix
|
872fc75a3cef0d8cb152b1565a831874b9fd3fb5
|
[
"MIT"
] | null | null | null |
Configuration/Theme.py
|
olmedoluis/pix
|
872fc75a3cef0d8cb152b1565a831874b9fd3fb5
|
[
"MIT"
] | null | null | null |
ANSI = "\u001b["
CODE = f"{ANSI}38;5;"
THEME = {
"th_added": f"{CODE}48;1m",
"th_conflicted": f"{CODE}209;1m",
"th_deleted": f"{CODE}203;1m",
"th_dim": f"{ANSI}2m",
"th_error": f"{CODE}9;1m",
"th_keyword": f"{CODE}171;1m",
"th_modified": f"{CODE}221;1m",
"th_normal": f"{CODE}15;1m",
"th_renamed": f"{CODE}203;1m",
"th_reset": f"{ANSI}0m",
"th_success": f"{CODE}47;1m",
"th_untracked": f"{CODE}69;1m",
"th_stash": f"{CODE}69;1m",
}
ICONS = {
"ic_modified": "☢",
"ic_untracked": "✱",
"ic_renamed": "✦",
"ic_deleted": "✝",
"ic_conflicted": "■",
"ic_resetted": "⧗",
"ic_removed": "−",
"ic_patch_add": "▲",
"ic_patch_remove": "▼",
"ic_added": "✚",
"ic_error": "✖",
"ic_stash": "★",
"ic_selection": "❤",
"ic_normal": "•",
"ic_success": "⚑",
"ic_branch": "⚲",
"ic_log_selected": "♦",
"ic_log": "⋅",
}
EMPTY = {}
INPUT_THEME = {
"ADD_SELECTION": {"selection": f"{CODE}48;1m"},
"BRANCH_CREATION_ABOUT": EMPTY,
"BRANCH_CREATION_CONFIRM": EMPTY,
"BRANCH_CREATION_ID": EMPTY,
"BRANCH_CREATION_SWITCH": EMPTY,
"BRANCH_CREATION_TYPE": {"selection": f"{CODE}221;1m"},
"BRANCH_SELECTION": {"selection": f"{CODE}171;1m"},
"BRANCH_RENAME": {"font": f"{CODE}221;1m"},
"COMMIT_CREATION_ABOUT": EMPTY,
"COMMIT_CREATION_CONFIRM": EMPTY,
"COMMIT_CREATION_SCOPE": EMPTY,
"COMMIT_CREATION_TYPE": {"selection": f"{CODE}221;1m"},
"LOG_LOG": EMPTY,
"PATCH_SELECTION": EMPTY,
"REMOVE_SELECTION": {"selection": f"{CODE}9;1m"},
"RESET_SELECTION": {"selection": f"{CODE}48;1m"},
"STASH_CREATION_NAME": EMPTY,
"STASH_SELECTION": EMPTY,
}
INPUT_ICONS = {
"+": ICONS["ic_modified"],
"-": ICONS["ic_error"],
"selection": ICONS["ic_selection"],
"normal": ICONS["ic_normal"],
"log_selection": ICONS["ic_log_selected"],
"log_normal": ICONS["ic_log"],
}
| 27.309859
| 59
| 0.574007
|
387e8826576626e1918e23f03e15e24ae0e7e1e4
| 1,627
|
py
|
Python
|
src/yafs/__init__.py
|
diegopso/YAFS
|
5e9d22bcea836cb3c8caef1d3f721f889536a54e
|
[
"MIT"
] | 1
|
2020-03-12T13:16:09.000Z
|
2020-03-12T13:16:09.000Z
|
src/yafs/__init__.py
|
mikesneider/YAFS
|
1b805cb0dc5ceb438ab335c347750e1cb1cdd34a
|
[
"MIT"
] | null | null | null |
src/yafs/__init__.py
|
mikesneider/YAFS
|
1b805cb0dc5ceb438ab335c347750e1cb1cdd34a
|
[
"MIT"
] | null | null | null |
"""
The ``yafs`` module is the main component who perform the simulation.
The following tables list all of the available components in this module.
{toc}
"""
from pkgutil import extend_path
from yafs.core import Sim
from yafs.placement import Placement,ClusterPlacement
from yafs.selection import Selection,OneRandomPath,First_ShortestPath
from yafs.topology import Topology
from yafs.population import Population,Statical
from yafs.application import Application, Message
from yafs.metrics import Metrics
from yafs.distribution import *
import yafs.utils
def compile_toc(entries, section_marker='='):
"""Compiles a list of sections with objects into sphinx formatted
autosummary directives."""
toc = ''
for section, objs in entries:
toc += '\n\n%s\n%s\n\n' % (section, section_marker * len(section))
toc += '.. autosummary::\n\n'
for obj in objs:
toc += ' ~%s.%s\n' % (obj.__module__, obj.__name__)
return toc
toc = (
('Core', [Sim]),
('Topology', [Topology]),
('Application', [Application, Message]),
('Population', [Population, Statical]),
('Placement', [Placement,ClusterPlacement]),
('Selection', [Selection,OneRandomPath,First_ShortestPath]),
('Metrics', [Metrics]),
('Distribution',[Distribution,deterministic_distribution,exponential_distribution])
)
# Use the toc to keep the documentation and the implementation in sync.
if __doc__:
__doc__ = __doc__.format(toc=compile_toc(toc))
__all__ = [obj.__name__ for section, objs in toc for obj in objs]
__path__ = extend_path(__path__, __name__)
__version__ = '0.2'
| 29.581818
| 87
| 0.712354
|
f2913634af5aaa1f88bbf9351e34263eeff6e79a
| 727
|
py
|
Python
|
LeetcodeAlgorithms/055. Jump Game/jump-game.py
|
Fenghuapiao/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | 3
|
2019-08-20T06:54:38.000Z
|
2022-01-07T12:56:46.000Z
|
LeetcodeAlgorithms/055. Jump Game/jump-game.py
|
yhangf/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | null | null | null |
LeetcodeAlgorithms/055. Jump Game/jump-game.py
|
yhangf/PyLeetcode
|
d804a62643fe935eb61808196a2c093ea9583654
|
[
"MIT"
] | 2
|
2018-11-01T16:10:34.000Z
|
2020-06-02T03:24:43.000Z
|
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
pos = 0
bound = len(nums)
while pos < len(nums) - 1:
dis = nums[pos]
if dis == 0:
return False
farthest = posToFarthest = 0
for i in xrange(pos + 1, min(pos + dis + 1, bound)):
canReach = i + nums[i]
if i == len(nums) - 1:
return True
if canReach > farthest:
farthest = canReach
posToFarthest = i
pos = posToFarthest
return True if pos >= len(nums) - 1 else False
| 33.045455
| 65
| 0.416781
|
2c9845b16fcdbb10780ef664170002fe5463a85e
| 3,633
|
py
|
Python
|
main.py
|
jiharal/multi_camera
|
006c3afed64302bda067ad19080173fee5c11206
|
[
"MIT"
] | null | null | null |
main.py
|
jiharal/multi_camera
|
006c3afed64302bda067ad19080173fee5c11206
|
[
"MIT"
] | null | null | null |
main.py
|
jiharal/multi_camera
|
006c3afed64302bda067ad19080173fee5c11206
|
[
"MIT"
] | null | null | null |
from threading import Thread
from multicamera import MultiCapture, FramesThreadBody, NormalizerCLAHE
import time, sys, queue, cv2
import numpy as np
def check_pressed_keys(key):
if key == 32: # Pause
while True:
key = cv2.waitKey(0)
if key == 27 or key == 32 or key == 13: # enter: resume, space: next frame, esc: exit
break
else:
key = cv2.waitKey(1)
return key
def get_target_size(frame_sizes,
vis=None,
max_window_size=(1920, 1080),
stack_frames='vertical',
**kwargs):
if vis is None:
width = 0
height = 0
for size in frame_sizes:
if width > 0 and height > 0:
if stack_frames == 'vertical':
height += size[1]
elif stack_frames == 'horizontal':
width += size[0]
else:
width, height = size
else:
height, width = vis.shape[:2]
if stack_frames == 'vertical':
target_height = max_window_size[1]
target_ratio = target_height / height
target_width = int(width * target_ratio)
elif stack_frames == 'horizontal':
target_width = max_window_size[0]
target_ratio = target_width / width
target_height = int(height * target_ratio)
return target_width, target_height
def visualize_multicam_detections(frames,
max_window_size=(1920, 1080),
stack_frames='horizontal'):
assert stack_frames in ['vertical', 'horizontal']
vis = None
for i, frame in enumerate(frames):
if vis is not None:
if stack_frames == 'vertical':
vis = np.vstack([vis, frame])
elif stack_frames == 'horizontal':
vis = np.hstack([vis, frame])
else:
vis = frame
# print(len(frames))
target_width, target_height = get_target_size(frames, vis, max_window_size,
stack_frames)
vis = cv2.resize(vis, (target_width, target_height))
return vis
if __name__ == "__main__":
print("Program started")
sources = [
"/home/thinkbook/Videos/cctv/cctv4.mp4",
"/home/thinkbook/Videos/cctv/cctv3.mp4"
]
capture = MultiCapture(sources=sources, loop=False)
capture.add_transform(
NormalizerCLAHE(clip_limit=1.0, tile_size=8)
)
thread_body = FramesThreadBody(capture,
max_queue_length=len(capture.captures) * 2)
frames_thread = Thread(target=thread_body)
frames_thread.start()
frames_read = False
set_output_frames = False
prev_frames = thread_body.frames_queue.get()
key = -1
frame_number = 0
# stack_frames='vertical'
# max_window_size=(1920, 1080)
while thread_body.process:
key = check_pressed_keys(key)
if key == 27:
break
start = time.perf_counter()
try:
frames = thread_body.frames_queue.get_nowait()
frames_read = True
except queue.Empty:
frames = None
if frames is None:
continue
frame_number += 1
# print(prev_frames)
vis = visualize_multicam_detections(prev_frames)
prev_frames, frames = frames, prev_frames
# for frame in frames:
# print(len(frame))
# cv2.resize(frame, (1000, 1000))
cv2.imshow("video", vis)
thread_body.process = False
frames_thread.join()
| 29.536585
| 98
| 0.566199
|
b6b964cb72d8dc9de805702c25a4a5cf51057d01
| 10,926
|
py
|
Python
|
m3tl/predefined_problems/ner_data.py
|
vishalbelsare/bert-multitask-learning
|
688c2bab1dcbcd8ab6c795c116d252a19b66b793
|
[
"Apache-2.0"
] | 456
|
2018-12-11T09:43:10.000Z
|
2021-11-14T17:33:21.000Z
|
m3tl/predefined_problems/ner_data.py
|
vishalbelsare/bert-multitask-learning
|
688c2bab1dcbcd8ab6c795c116d252a19b66b793
|
[
"Apache-2.0"
] | 57
|
2018-12-24T05:59:53.000Z
|
2021-11-16T05:58:52.000Z
|
m3tl/predefined_problems/ner_data.py
|
vishalbelsare/bert-multitask-learning
|
688c2bab1dcbcd8ab6c795c116d252a19b66b793
|
[
"Apache-2.0"
] | 123
|
2018-12-25T03:41:03.000Z
|
2021-11-12T18:00:53.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/09_predefined_problems_ner.ipynb (unless otherwise specified).
__all__ = ['gold_horse_ent_type_process_fn', 'chinese_literature_ent_type_process_fn', 'read_ner_data',
'get_weibo_ner_fn', 'gold_horse_segment_process_fn', 'get_weibo_cws_fn', 'read_bosonnlp_data', 'read_msra',
'get_msra_ner_fn', 'get_boson_ner_fn', 'NER_TYPE']
# Cell
from glob import glob
import re
import random
from sklearn.model_selection import train_test_split
from ..utils import cluster_alphnum
from ..preproc_decorator import preprocessing_fn
NER_TYPE = ['LOC', # location
'GPE',
'PER', # person
'ORG', # organization
'PRD', # Product
]
def gold_horse_ent_type_process_fn(d):
"""golden horse ent type process fn
Source: https://github.com/hltcoe/golden-ho rse
Entity type:
B, I, O: Begining \ In middle \ Outside of entity
GPE: Country, City, District...
LOC: Location, zoo, school...
PER: Person
ORG: Organiazation
NAM: Entity
NOM: More general, 女生, 男的...
Example:
B-PER.NAM
Only keep NAM here
So after process:
B-PER
Arguments:
ent_type {str} -- ent type from gold_horse data
Returns:
str -- processed enttype
"""
ent_type = d.split('\t')[1].replace('\n', '')
# keep nam only
ent_type = ent_type if 'NAM' in ent_type else 'O'
ent_type = ent_type.replace('.NAM', '')
return ent_type
def chinese_literature_ent_type_process_fn(d):
"""Not match my need
Arguments:
d {[type]} -- [description]
Returns:
[type] -- [description]
"""
ent_type = d.split(' ')[1].replace('\n', '')
return ent_type
def read_ner_data(file_pattern='data/ner/weiboNER*', proc_fn=None):
"""Read data from golden horse data
Arguments:
file_pattern {str} -- file patterns
Returns:
dict -- dict, key: 'train', 'eval', value: dict {'inputs', 'target'}
"""
result_dict = {
'train': {
'inputs': [],
'target': []
},
'eval': {
'inputs': [],
'target': []
}
}
file_list = glob(file_pattern)
for file_path in file_list:
with open(file_path, 'r', encoding='utf8') as f:
raw_data = f.readlines()
inputs_list = [[]]
target_list = [[]]
for d in raw_data:
if d != '\n':
# put first char to input
inputs_list[-1].append(d[0])
ent_type = proc_fn(d)
target_list[-1].append(ent_type)
else:
inputs_list.append([])
target_list.append([])
# remove trailing empty str/list
if not inputs_list[-1]:
del inputs_list[-1]
if not target_list[-1]:
del target_list[-1]
inputs_with_ent = []
target_with_ent = []
for inputs, target in zip(inputs_list, target_list):
# if len(set(target)) > 1:
inputs_with_ent.append(inputs)
target_with_ent.append(target)
if 'train' in file_path or 'dev' in file_path:
result_dict['train']['inputs'] = inputs_with_ent
result_dict['train']['target'] = target_with_ent
else:
result_dict['eval']['inputs'] = inputs_with_ent
result_dict['eval']['target'] = target_with_ent
return result_dict
def get_weibo_ner_fn(file_path):
@preprocessing_fn
def weibo_ner(params, mode):
data = read_ner_data(file_pattern=file_path,
proc_fn=gold_horse_ent_type_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs']
target_list = data['target']
return inputs_list, target_list
return weibo_ner
def gold_horse_segment_process_fn(d):
ent_type = d.split('\t')[0][-1]
if ent_type not in ['0', '1', '2']:
ent_type = '0'
return ent_type
def get_weibo_cws_fn(file_path):
@preprocessing_fn
def weibo_cws(params, mode):
data = read_ner_data(file_pattern=file_path,
proc_fn=gold_horse_segment_process_fn)
if mode == 'train':
data = data['train']
else:
data = data['eval']
inputs_list = data['inputs']
target_list = data['target']
return inputs_list, target_list
return weibo_cws
def read_bosonnlp_data(file_pattern, eval_size=0.2):
file_list = glob(file_pattern)
sentence_split = r'[!?。?!]'
project_table = {
'person_name': 'PER',
'company_name': 'ORG',
'location': 'LOC',
'product_name': 'PRD',
'time': 'TME',
'org_name': 'ORG2'
}
input_list = []
target_list = []
if not file_list:
raise FileNotFoundError('Please make sure you have downloaded BosonNLP\
data and put it in the path you specified. \
Download: https://bosonnlp.com/resources/BosonNLP_NER_6C.zip')
for file_path in file_list:
with open(file_path, 'r', encoding='utf8') as f:
data_list = f.readlines()
for doc in data_list:
if '}}}}' in doc:
continue
splited_doc = re.split(sentence_split, doc)
for sentence in splited_doc:
# split doc into sentences
input_list.append([])
target_list.append([])
# split by {{
doc_chunk_list = sentence.split('{{')
for chunk in doc_chunk_list:
if '}}' not in chunk or ':' not in chunk:
target_list[-1] += ['O']*len(chunk)
input_list[-1] += list(chunk)
else:
ent_chunk, text_chunk = chunk.split('}}')
punc_ind = ent_chunk.index(':')
ent_type = ent_chunk[:punc_ind]
ent = ent_chunk[punc_ind+1:]
if ent_type in project_table:
ent = cluster_alphnum(ent)
for char_ind, ent_char in enumerate(ent):
if char_ind == 0:
loc_char = 'B'
else:
loc_char = 'I'
target_list[-1].append(loc_char +
'-'+project_table[ent_type])
input_list[-1].append(ent_char)
else:
target_list[-1] += ['O']*len(ent)
input_list[-1] += list(ent)
target_list[-1] += ['O']*len(text_chunk)
input_list[-1] += list(text_chunk)
return_input, return_target = [], []
for inp, tar in zip(input_list, target_list):
if inp and tar:
return_input.append(inp)
return_target.append(tar)
assert len(inp) == len(tar)
train_input, eval_input, train_target, eval_target = train_test_split(
return_input, return_target, test_size=eval_size, random_state=1024)
result_dict = {
'train': {},
'eval': {}
}
result_dict['train']['inputs'] = train_input
result_dict['train']['target'] = train_target
result_dict['eval']['inputs'] = eval_input
result_dict['eval']['target'] = eval_target
return result_dict
def read_msra(file_pattern, eval_size):
file_list = glob(file_pattern)
project_table = {
'nr': 'PER',
'nt': 'ORG',
'ns': 'LOC'
}
input_list = []
target_list = []
for file_path in file_list:
with open(file_path, 'r', encoding='utf8') as f:
data_list = f.readlines()
for sentence in data_list:
sentence = sentence.replace('\n', '')
input_list.append([])
target_list.append([])
sentence_word_list = sentence.split(' ')
for word in sentence_word_list:
if word:
ent, ent_type = word.split('/')
ent = cluster_alphnum(ent)
if ent_type not in project_table:
input_list[-1] += list(ent)
target_list[-1] += ['O'] * len(ent)
else:
for char_ind, ent_char in enumerate(ent):
if char_ind == 0:
loc_char = 'B'
else:
loc_char = 'I'
target_list[-1].append(loc_char +
'-'+project_table[ent_type])
input_list[-1].append(ent_char)
return_input, return_target = [], []
for inp, tar in zip(input_list, target_list):
if inp and tar:
return_input.append(inp)
return_target.append(tar)
assert len(inp) == len(tar)
train_input, eval_input, train_target, eval_target = train_test_split(
return_input, return_target, test_size=eval_size, random_state=1024)
result_dict = {
'train': {},
'eval': {}
}
result_dict['train']['inputs'] = train_input
result_dict['train']['target'] = train_target
result_dict['eval']['inputs'] = eval_input
result_dict['eval']['target'] = eval_target
return result_dict
def get_msra_ner_fn(file_path):
@preprocessing_fn
def msra_ner(params, mode):
msra_data = read_msra(
file_pattern=file_path, eval_size=0.2)
inputs_list = []
target_list = []
for data in [msra_data]:
if mode == 'train':
inputs_list += data['train']['inputs']
target_list += data['train']['target']
else:
inputs_list += data['eval']['inputs']
target_list += data['eval']['target']
return inputs_list, target_list
return msra_ner
def get_boson_ner_fn(file_path):
@preprocessing_fn
def boson_ner(params, mode):
boson_data = read_bosonnlp_data(
file_pattern=file_path, eval_size=0.2)
inputs_list = []
target_list = []
for data in [boson_data]:
if mode == 'train':
inputs_list += data['train']['inputs']
target_list += data['train']['target']
else:
inputs_list += data['eval']['inputs']
target_list += data['eval']['target']
return inputs_list, target_list
return boson_ner
| 30.777465
| 118
| 0.528739
|
79269e8cdd6f3cc7473709460bfe901f53d26d14
| 4,686
|
py
|
Python
|
contrib/python-sdk/openpaisdk/io_utils.py
|
vdedyukhin/pai
|
0b63e1590f63b9de2f7038900664af4622d22354
|
[
"MIT"
] | 2
|
2018-07-16T03:40:31.000Z
|
2018-11-15T02:45:59.000Z
|
contrib/python-sdk/openpaisdk/io_utils.py
|
ydye/patch-for-openpai-build
|
8a004492326db398ddc7b006f67f8997c0b15801
|
[
"MIT"
] | 139
|
2019-07-05T06:22:28.000Z
|
2020-07-07T06:33:57.000Z
|
contrib/python-sdk/openpaisdk/io_utils.py
|
ydye/patch-for-openpai-build
|
8a004492326db398ddc7b006f67f8997c0b15801
|
[
"MIT"
] | null | null | null |
import os
import errno
import shutil
from webbrowser import open_new_tab
from contextlib import contextmanager
import json
from openpaisdk import __logger__, __local_default_file__
from urllib.request import urlopen
from urllib.parse import urlparse, urlsplit
from urllib.request import urlretrieve
import cgi
__yaml_exts__ = ['.yaml', '.yml']
__json_exts__ = ['.json', '.jsn']
def get_defaults():
if os.path.isfile(__local_default_file__):
return from_file(__local_default_file__, default="==FATAL==")
return {}
def browser_open(url: str):
__logger__.info("open in browser: %s", url)
try:
open_new_tab(url)
except Exception as e:
__logger__.warn("failed to open %s due to %s", url, e)
def return_default_if_error(func):
def f(*args, default="==FATAL==", **kwargs):
try:
return func(*args, **kwargs)
except Exception as identifier:
if default == "==FATAL==":
__logger__.error('Error: %s', identifier, exc_info=True)
__logger__.warn('error occurs when reading %s (%s), return default (%s)', args, identifier, default)
return default
return f
@return_default_if_error
def from_json_file(fname: str, **kwargs):
import json
with open(fname) as fp:
return json.load(fp, **kwargs)
@return_default_if_error
def from_yaml_file(fname: str, **kwargs):
import yaml
with open(fname) as fp:
kwargs.setdefault('Loader', yaml.FullLoader)
return yaml.load(fp, **kwargs)
def get_url_filename_from_server(url):
try:
blah = urlopen(url).info()['Content-Disposition']
_, params = cgi.parse_header(blah)
return params["filename"]
except Exception as e:
__logger__.warn('Failed to get filename from server: %s', e)
return None
def web_download_to_folder(url: str, folder: str, filename: str=None):
if not filename:
split = urlsplit(url)
filename = split.path.split("/")[-1]
filename = os.path.join(folder, filename)
os.makedirs(folder, exist_ok=True)
try:
urlretrieve(url, filename)
__logger__.info('download from %s to %s', url, filename)
return filename
except Exception as e:
__logger__.error("failed to download", exc_info=True)
def from_file(fname: str, default={}, fmt: str=None, **kwargs):
if fmt == "json" or os.path.splitext(fname)[1] in __json_exts__:
return from_json_file(fname, default=default, **kwargs)
if fmt == "yaml" or os.path.splitext(fname)[1] in __yaml_exts__:
return from_yaml_file(fname, default=default, **kwargs)
def mkdir_for(pth: str):
d = os.path.dirname(pth)
if d:
os.makedirs(d, exist_ok=True)
return d
def file_func(kwargs: dict, func=shutil.copy2, tester: str='dst'):
try:
return func(**kwargs)
except IOError as identifier:
# ENOENT(2): file does not exist, raised also on missing dest parent dir
if identifier.errno != errno.ENOENT:
print(identifier.__dict__)
assert tester in kwargs.keys(), 'wrong parameter {}'.format(tester)
os.makedirs(os.path.dirname(kwargs[tester]), exist_ok=True)
return func(**kwargs)
except Exception as identifier:
print(identifier)
return None
@contextmanager
def safe_open(filename: str, mode: str='r', **kwargs):
"if directory of filename doesnot exist, create it first"
args = dict(kwargs)
args.update({'file':filename, 'mode':mode})
fn = file_func(args, func=open, tester='file')
yield fn
fn.close()
@contextmanager
def safe_chdir(pth:str):
"safely change directory to pth, and then go back"
currdir = os.getcwd()
try:
if not pth:
pth = currdir
os.chdir(pth)
__logger__.info("changing directory to %s", pth)
yield pth
finally:
os.chdir(currdir)
__logger__.info("changing directory back to %s", currdir)
def safe_copy(src: str, dst: str):
"if directory of filename doesnot exist, create it first"
return file_func({'src':src, 'dst':dst})
def to_file(obj, fname: str, fmt=None, **kwargs):
if not fmt:
_, ext = os.path.splitext(fname)
if ext in __json_exts__:
fmt, dic = json, dict(indent=4)
elif ext in __yaml_exts__:
import yaml
fmt, dic = yaml, dict(default_flow_style=False)
else:
raise NotImplementedError
dic.update(kwargs)
else:
dic = kwargs
with safe_open(fname, 'w') as fp:
fmt.dump(obj, fp, **dic)
__logger__.debug("serialize object to file %s", fname)
| 29.847134
| 112
| 0.644473
|
4633e0fbd06a016ff934d0e8d1e0ad2875a10f04
| 1,294
|
py
|
Python
|
dashboard/dashboard/common/bot_configurations.py
|
bopopescu/chromium72-third-party-catapult
|
774e1355b871e13bb858147a136e9cb476f55030
|
[
"BSD-3-Clause"
] | 1
|
2019-01-04T10:08:58.000Z
|
2019-01-04T10:08:58.000Z
|
dashboard/dashboard/common/bot_configurations.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/common/bot_configurations.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | 1
|
2019-04-21T23:48:15.000Z
|
2019-04-21T23:48:15.000Z
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import string
from google.appengine.ext import ndb
from dashboard.common import namespaced_stored_object
BOT_CONFIGURATIONS_KEY = 'bot_configurations'
def Get(name):
configurations = namespaced_stored_object.Get(BOT_CONFIGURATIONS_KEY)
configuration = configurations[name]
if 'alias' in configuration:
return configurations[configuration['alias']]
return configuration
@ndb.tasklet
def GetAliasesAsync(bot):
aliases = {bot}
configurations = yield namespaced_stored_object.GetAsync(
BOT_CONFIGURATIONS_KEY)
if not configurations or bot not in configurations:
raise ndb.Return(aliases)
if 'alias' in configurations[bot]:
bot = configurations[bot]['alias']
aliases.add(bot)
for name, configuration in configurations.iteritems():
if configuration.get('alias') == bot:
aliases.add(name)
raise ndb.Return(aliases)
def List():
bot_configurations = namespaced_stored_object.Get(BOT_CONFIGURATIONS_KEY)
canonical_names = [name for name, value in bot_configurations.iteritems()
if 'alias' not in value]
return sorted(canonical_names, key=string.lower)
| 29.409091
| 75
| 0.757342
|
13e30ce01654129e27047def0c3f660c1403563a
| 5,414
|
py
|
Python
|
tests/commands/test_teams.py
|
Teja-Nagoori/platformio-core
|
e0e97a36297852128016b2ba8360d77f64b276db
|
[
"Apache-2.0"
] | 1
|
2020-06-09T05:03:45.000Z
|
2020-06-09T05:03:45.000Z
|
tests/commands/test_teams.py
|
Teja7048/platformio-core
|
e0e97a36297852128016b2ba8360d77f64b276db
|
[
"Apache-2.0"
] | null | null | null |
tests/commands/test_teams.py
|
Teja7048/platformio-core
|
e0e97a36297852128016b2ba8360d77f64b276db
|
[
"Apache-2.0"
] | 1
|
2020-06-05T18:50:48.000Z
|
2020-06-05T18:50:48.000Z
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
import pytest
from platformio.commands.account import cli as cmd_account
from platformio.commands.org import cli as cmd_org
from platformio.commands.team import cli as cmd_team
pytestmark = pytest.mark.skipif(
not (
os.environ.get("PLATFORMIO_TEST_ACCOUNT_LOGIN")
and os.environ.get("PLATFORMIO_TEST_ACCOUNT_PASSWORD")
),
reason="requires PLATFORMIO_TEST_ACCOUNT_LOGIN, PLATFORMIO_TEST_ACCOUNT_PASSWORD environ variables",
)
@pytest.fixture(scope="session")
def credentials():
return {
"login": os.environ["PLATFORMIO_TEST_ACCOUNT_LOGIN"],
"password": os.environ["PLATFORMIO_TEST_ACCOUNT_PASSWORD"],
}
def test_teams(clirunner, credentials, validate_cliresult, isolated_pio_home):
orgname = ""
teamname = "test-" + str(int(time.time() * 1000))
try:
result = clirunner.invoke(
cmd_account,
["login", "-u", credentials["login"], "-p", credentials["password"]],
)
validate_cliresult(result)
assert "Successfully logged in!" in result.output
result = clirunner.invoke(cmd_org, ["list", "--json-output"],)
validate_cliresult(result)
json_result = json.loads(result.output.strip())
if len(json_result) < 3:
for i in range(3 - len(json_result)):
result = clirunner.invoke(
cmd_org,
[
"create",
"%s-%s" % (i, credentials["login"]),
"--email",
"test@test.com",
"--display-name",
"TEST ORG %s" % i,
],
)
validate_cliresult(result)
result = clirunner.invoke(cmd_org, ["list", "--json-output"],)
validate_cliresult(result)
json_result = json.loads(result.output.strip())
assert len(json_result) >= 3
orgname = json_result[0].get("orgname")
result = clirunner.invoke(
cmd_team,
[
"create",
"%s:%s" % (orgname, teamname),
"--description",
"team for CI test",
],
)
validate_cliresult(result)
result = clirunner.invoke(cmd_team, ["list", "%s" % orgname, "--json-output"],)
validate_cliresult(result)
json_result = json.loads(result.output.strip())
assert len(json_result) >= 1
check = False
for team in json_result:
assert team["id"]
assert team["name"]
if team["name"] == teamname:
check = True
assert "description" in team
assert "members" in team
assert check
result = clirunner.invoke(
cmd_team, ["add", "%s:%s" % (orgname, teamname), credentials["login"]],
)
validate_cliresult(result)
result = clirunner.invoke(cmd_team, ["list", "%s" % orgname, "--json-output"],)
validate_cliresult(result)
json_result = json.loads(result.output.strip())
check = False
for team in json_result:
assert team["id"]
assert team["name"]
assert "description" in team
assert "members" in team
if (
len(team["members"]) > 0
and team["members"][0]["username"] == credentials["login"]
):
check = True
assert check
result = clirunner.invoke(
cmd_team, ["remove", "%s:%s" % (orgname, teamname), credentials["login"]],
)
validate_cliresult(result)
result = clirunner.invoke(cmd_team, ["list", "%s" % orgname, "--json-output"],)
validate_cliresult(result)
result = clirunner.invoke(
cmd_team,
[
"update",
"%s:%s" % (orgname, teamname),
"--description",
"Updated Description",
],
)
validate_cliresult(result)
result = clirunner.invoke(cmd_team, ["list", "%s" % orgname, "--json-output"],)
validate_cliresult(result)
json_result = json.loads(result.output.strip())
assert len(json_result) >= 1
check = False
for team in json_result:
assert team["id"]
assert team["name"]
assert "description" in team
if team.get("description") == "Updated Description":
check = True
assert "members" in team
assert check
finally:
clirunner.invoke(
cmd_team, ["destroy", "%s:%s" % (orgname, teamname),],
)
clirunner.invoke(cmd_account, ["logout"])
| 34.050314
| 104
| 0.561507
|
48e1c17d55fdc70de9b3b9ba649c2ffc96f82ebc
| 17,431
|
py
|
Python
|
scripts/releaser/release.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | null | null | null |
scripts/releaser/release.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | null | null | null |
scripts/releaser/release.py
|
aag09/azurecli
|
30c98a75c36c02a657f1753ff5c48502dc7f7933
|
[
"MIT"
] | 1
|
2017-12-28T04:51:44.000Z
|
2017-12-28T04:51:44.000Z
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from __future__ import print_function, unicode_literals
import os
import sys
import tempfile
import glob
import re
import time
import fileinput
import requests
import hashlib
from datetime import datetime
from subprocess import check_call, check_output, CalledProcessError
from uritemplate import URITemplate, expand
script_env = {}
def add_script_env(name):
script_env[name] = os.environ.get(name)
add_script_env('REPO_NAME')
add_script_env('GITHUB_USER')
add_script_env('GITHUB_USER_TOKEN')
add_script_env('PYPI_REPO')
# although not used directly here, twine env vars are needed for releasing
add_script_env('TWINE_USERNAME')
add_script_env('TWINE_PASSWORD')
# the new version of the CLI
add_script_env('CLI_VERSION')
add_script_env('AZURE_STORAGE_CONNECTION_STRING')
assert (all(script_env[n] != None for n in script_env)), "Not all required environment variables have been set. {}".format(script_env)
GITHUB_API_AUTH = (script_env.get('GITHUB_USER'), script_env.get('GITHUB_USER_TOKEN'))
GITHUB_API_HEADERS = {'Accept': 'application/vnd.github.v3+json', 'user-agent': 'azure-cli-pypi-github-releaser/v1'}
SOURCE_ARCHIVE_NAME = 'source.tar.gz'
GITHUB_RELEASE_BODY_TMPL = """
The module has been published to PyPI.
View HISTORY.rst of the module for a changelog.
{}
Full release notes at https://docs.microsoft.com/en-us/cli/azure/release-notes-azure-cli
"""
COMMAND_MODULE_PREFIX = 'azure-cli-'
MODULES_TO_ALWAYS_RELEASE = ['azure-cli']
MODULES_TO_SKIP = ['azure-cli-testsdk']
def give_chance_to_cancel(msg_prefix=''):
cancel_time_secs = 10
msg_tmpl = '{}: Starting in {} seconds.'
for i in range(cancel_time_secs, 0, -1):
print_status(msg_tmpl.format(msg_prefix, i))
time.sleep(1)
def print_env_vars():
for n in script_env:
print('{} = {}'.format(n, script_env[n]))
def print_status(msg=''):
print('-- '+msg)
def print_heading(heading):
print('{0}\n{1}\n{0}'.format('=' * len(heading), heading))
def _get_core_modules_paths(repo_working_dir):
for path in glob.glob(repo_working_dir + '/src/*/setup.py'):
yield os.path.basename(os.path.dirname(path)), os.path.dirname(path)
def _get_command_modules_paths(repo_working_dir, include_prefix=False):
for path in glob.glob(repo_working_dir + '/src/command_modules/{}*/setup.py'.format(
COMMAND_MODULE_PREFIX)):
folder = os.path.dirname(path)
name = os.path.basename(folder)
if not include_prefix:
name = name[len(COMMAND_MODULE_PREFIX):]
yield name, folder
def _get_all_module_paths(repo_working_dir):
return list(_get_core_modules_paths(repo_working_dir)) + list(_get_command_modules_paths(repo_working_dir, include_prefix=True))
def _get_current_module_version(mod_path):
mod_version = None
with open(os.path.join(mod_path, 'setup.py'), 'r') as fh:
version_re = re.compile('VERSION = *')
lines = fh.readlines()
for _, line in enumerate(lines):
if version_re.match(line):
mod_version = line.split('=')[1].strip(' "\'').split('+')[0]
return mod_version
def clone_repo(repo_working_dir):
check_call(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), repo_working_dir])
check_call(['git', 'checkout', 'master'], cwd=repo_working_dir)
def should_release_module(mod_name, mod_path, repo_working_dir):
if mod_name in MODULES_TO_ALWAYS_RELEASE:
print_status('We always release {}.'.format(mod_name))
return True
if mod_name in MODULES_TO_SKIP:
print_status('Skipping module {} as in modules to skip list.'.format(mod_name))
return False
# Determine if should release based on the current version
cur_mod_version = _get_current_module_version(mod_path)
r_start = '{}-{}'.format(mod_name, cur_mod_version)
revision_range = "{}..{}".format(r_start, 'HEAD')
try:
module_changes = check_output(["git", "log", "--pretty=format:* %s", revision_range, "--", mod_path, ":(exclude)*/tests/*"],
cwd=repo_working_dir)
except CalledProcessError:
# Maybe the revision_range is invalid if this is a new module.
return True
if module_changes:
print_status('Begin changes in {}'.format(mod_name))
print(str(module_changes, 'utf-8'))
print_status('End changes in {}'.format(mod_name))
return True
print_status('Skipping module {} as there are no changes.'.format(mod_name))
return False
def modify_setuppy_version(mod_name, mod_path):
setuppy_path = os.path.join(mod_path, 'setup.py')
with open(setuppy_path, 'r') as fh:
version_re = re.compile('VERSION = *')
lines = fh.readlines()
for index, line in enumerate(lines):
if version_re.match(line):
old_version = line.split('=')[1].strip(' "\'').split('+')[0]
major, minor, rev = old_version.split('.')
rev = int(rev) + 1
version = '{}.{}.{}'.format(major, minor, rev)
lines[index] = 'VERSION = "{}+dev"\n'.format(version)
update_setup = lines
break
else:
raise ValueError('In the setup file {}, version is not found.'.format(setuppy_path))
if update_setup:
with open(setuppy_path, 'w') as fh:
fh.writelines(update_setup)
else:
raise ValueError('No updated content for setup.py in {}.'.format(mod_name))
return old_version, version
def modify_initpy_version(mod_name, mod_path, old_version, new_version):
if mod_name == 'azure-cli':
path_to_init = os.path.join(mod_path, 'azure', 'cli', '__init__.py')
elif mod_name == 'azure-cli-core':
path_to_init = os.path.join(mod_path, 'azure', 'cli', 'core', '__init__.py')
for _, line in enumerate(fileinput.input(path_to_init, inplace=1)):
if line.startswith('__version__'):
sys.stdout.write(line.replace(old_version, new_version))
else:
sys.stdout.write(line)
def modify_historyrst(mod_name, mod_path, old_version, new_version):
historyrst_path = os.path.join(mod_path, 'HISTORY.rst')
new_history_lines = []
just_seen_unreleased = False
contains_unreleased = False
with open(historyrst_path, 'r') as fq:
lines = fq.readlines()
for _, line in enumerate(lines):
if 'unreleased' in line.lower() and not line.startswith('* '):
contains_unreleased = True
if contains_unreleased:
for _, line in enumerate(lines):
if just_seen_unreleased:
# skip the line as it's just a heading for the old unreleased section
just_seen_unreleased = False
continue
if 'unreleased' in line.lower() and not line.startswith('* '):
new_heading = '{} ({})'.format(new_version, datetime.utcnow().strftime('%Y-%m-%d'))
line = '{}\n{}\n'.format(new_heading, '+' * len(new_heading))
just_seen_unreleased = True
new_history_lines.append(line)
else:
for index, line in enumerate(lines):
if line.startswith('Release History'):
begin = index + 2
if old_version in line:
end = index
break
new_heading = '{} ({})'.format(new_version, datetime.utcnow().strftime('%Y-%m-%d'))
line = '{}\n{}\n'.format(new_heading, '+' * len(new_heading))
release_notes = [line]
if mod_name in MODULES_TO_ALWAYS_RELEASE:
release_notes.append('* no changes\n\n')
else:
release_notes.append('* minor fixes\n\n')
new_history_lines = lines[:begin] + release_notes + lines[end:]
with open(historyrst_path, 'w') as fq:
fq.writelines(new_history_lines)
def release_module(mod_name, mod_path, repo_working_dir):
# Change version in setup.py
old_version, new_version = modify_setuppy_version(mod_name, mod_path)
# Need to modify __init__.py for these modules as well
if mod_name in ['azure-cli', 'azure-cli-core']:
modify_initpy_version(mod_name, mod_path, old_version, new_version)
# Modify HISTORY.rst
modify_historyrst(mod_name, mod_path, old_version, new_version)
# Create commit with appropriate message.
commit_message = 'Release {} {}'.format(mod_name, new_version)
check_call(['git', 'commit', '-am', commit_message], cwd=repo_working_dir)
commitish = check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_working_dir)
commitish = str(commitish, 'utf-8')
commitish = commitish.strip()
return mod_name, commitish, new_version
def install_cli_into_venv():
venv_dir = tempfile.mkdtemp()
check_call(['virtualenv', venv_dir])
path_to_pip = os.path.join(venv_dir, 'bin', 'pip')
extra_index_url = 'https://testpypi.python.org/simple' if script_env.get('PYPI_REPO') == 'https://test.pypi.org/legacy/' else None
args = [path_to_pip, 'install', 'azure-cli']
if extra_index_url:
args.extend(['--extra-index-url', extra_index_url])
check_call(args)
deps = check_output([path_to_pip, 'freeze'])
deps = str(deps, 'utf-8')
deps = deps.split('\n')
cli_components = []
for dep in deps:
if dep.startswith('azure-cli'):
cli_components.append(dep.split('=='))
return cli_components
def run_push_to_git():
repo_working_dir = tempfile.mkdtemp()
clone_repo(repo_working_dir)
configure_git(repo_working_dir)
commitish_list = []
for mod_name, mod_path in _get_all_module_paths(repo_working_dir):
print_heading(mod_name.upper())
if should_release_module(mod_name, mod_path, repo_working_dir):
mod_name, commitish, new_version = release_module(mod_name, mod_path, repo_working_dir)
commitish_list.append((mod_name, commitish, new_version))
else:
print_status('Skipped {}'.format(mod_name))
# Push all commits to master.
check_call(['git', 'push', '-f', 'origin', 'master'], cwd=repo_working_dir)
return commitish_list
def set_up_cli_repo_dir():
working_dir = tempfile.mkdtemp()
check_call(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), working_dir])
check_call(['pip', 'install', '-e', 'tools'], cwd=working_dir)
return working_dir
def publish_to_pypi(working_dir, commitish_list):
# Publish all in commitish list to PyPI
assets_dir_map = {}
for mod_name, commitish, _ in commitish_list:
assets_dir = tempfile.mkdtemp()
check_call(['git', 'checkout', commitish], cwd=working_dir)
check_call(['python', '-m', 'tools.automation.release.run', '-c', mod_name,
'-r', script_env.get('PYPI_REPO'), '--dest', assets_dir], cwd=working_dir)
assets_dir_map[mod_name] = assets_dir
# reset back
check_call(['git', 'checkout', 'master'], cwd=working_dir)
return assets_dir_map
def upload_asset(upload_uri_tmpl, filepath, label):
filename = os.path.basename(filepath)
upload_url = URITemplate(upload_uri_tmpl).expand(name=filename, label=label)
headers = GITHUB_API_HEADERS
headers['Content-Type'] = 'application/octet-stream'
with open(filepath, 'rb') as payload:
requests.post(upload_url, data=payload, auth=GITHUB_API_AUTH, headers=headers)
def upload_assets_for_github_release(upload_uri_tmpl, component_name, component_version, assets_dir):
for filename in os.listdir(assets_dir):
fullpath = os.path.join(assets_dir, filename)
if filename == SOURCE_ARCHIVE_NAME:
upload_asset(upload_uri_tmpl, fullpath, '{} {} source code (.tar.gz)'.format(component_name, component_version))
elif filename.endswith('.tar.gz'):
upload_asset(upload_uri_tmpl, fullpath, '{} {} Source Distribution (.tar.gz)'.format(component_name, component_version))
elif filename.endswith('.whl'):
upload_asset(upload_uri_tmpl, fullpath, '{} {} Python Wheel (.whl)'.format(component_name, component_version))
def run_create_github_release(commitish_list, assets_dir_map):
# Create Github release (inc. the artifacts .whl etc.).
print_heading('Creating GitHub releases')
for mod_name, commitish, mod_version in commitish_list:
print_status('Publishing GitHub release for {} {}'.format(mod_name, mod_version))
tag_name = '{}-{}'.format(mod_name, mod_version)
release_name = "{} {}".format(mod_name, mod_version)
if script_env.get('PYPI_REPO') == 'https://upload.pypi.org/legacy/':
released_pypi_url = 'https://pypi.org/project/{}/{}'.format(mod_name, mod_version)
elif script_env.get('PYPI_REPO') == 'https://test.pypi.org/legacy/':
released_pypi_url = 'https://test.pypi.org/project/{}/{}'.format(mod_name, mod_version)
else:
released_pypi_url = ''
payload = {'tag_name': tag_name, "target_commitish": commitish, "name": release_name, "body": GITHUB_RELEASE_BODY_TMPL.format(released_pypi_url), "prerelease": False}
r = requests.post('https://api.github.com/repos/{}/releases'.format(script_env.get('REPO_NAME')), json=payload, auth=GITHUB_API_AUTH, headers=GITHUB_API_HEADERS)
if r.status_code == 201:
upload_url = r.json()['upload_url']
upload_assets_for_github_release(upload_url, mod_name, mod_version, assets_dir_map[mod_name])
print_status('Published GitHub release for {} {}'.format(mod_name, mod_version))
else:
print_status('ERROR: Failed to create GitHub release for {} {}'.format(mod_name, mod_version))
def run_create_packaged_release(working_dir):
# After releasing, create a new venv, and pip install and verify then create
# list of components for the package release step.
print_status('Start installing CLI into venv')
components_list = install_cli_into_venv()
print_status('Finished installing CLI into venv')
archive_dir = tempfile.mkdtemp()
# create the packaged releases automatically
args = ['python', '-m', 'tools.automation.release.packaged', '--version', script_env.get('CLI_VERSION'), '--dest', archive_dir, '--components']
for name, version in components_list:
# The tag for this module is slightly different so make that change.
if name == 'azure-cli-command-modules-nspkg':
name = 'azure-cli-command_modules-nspkg'
args.append('{}={}'.format(name, version))
print_status(' '.join(args))
check_call(args, cwd=working_dir)
print_status('Created packaged release in dir {}'.format(archive_dir))
# Get the sha256sum
archive_file_name = os.listdir(archive_dir)[0]
archive_file_path = os.path.join(archive_dir, archive_file_name)
sha256 = hashlib.sha256()
with open(archive_file_path, 'rb') as f:
sha256.update(f.read())
computed_hash = sha256.hexdigest()
print_status('SHA256 of {} is {}'.format(archive_file_path, computed_hash))
# Upload release archive to Azure Storage
check_call(['az', 'storage', 'blob', 'upload', '--file', archive_file_path, '--name', archive_file_name, '--container-name', 'releases', '--connection-string', script_env.get('AZURE_STORAGE_CONNECTION_STRING')])
archive_url = check_output(['az', 'storage', 'blob', 'url', '--name', archive_file_name, '--container-name', 'releases', '--connection-string', script_env.get('AZURE_STORAGE_CONNECTION_STRING'), '--output', 'tsv'])
archive_url = str(archive_url, 'utf-8')
archive_url = archive_url.strip()
print_status('Archive URL is {}'.format(archive_url))
def configure_git(repo_working_dir):
check_call(['git', 'config', 'user.email', '{}@users.noreply.github.com'.format(script_env.get('GITHUB_USER'))], cwd=repo_working_dir)
check_call(['git', 'config', 'user.name', script_env.get('GITHUB_USER')], cwd=repo_working_dir)
check_call(['git', 'remote', 'set-url', 'origin', 'https://{}:{}@github.com/{}'.format(script_env.get('GITHUB_USER'), script_env.get('GITHUB_USER_TOKEN'), script_env.get('REPO_NAME'))], cwd=repo_working_dir)
if __name__ == "__main__":
print_env_vars()
give_chance_to_cancel('Create Git release commits')
release_commitish_list = run_push_to_git()
cli_repo_dir = set_up_cli_repo_dir()
give_chance_to_cancel('Publish to PyPI')
release_assets_dir_map = publish_to_pypi(cli_repo_dir, release_commitish_list)
give_chance_to_cancel('Create GitHub releases and tags')
run_create_github_release(release_commitish_list, release_assets_dir_map)
give_chance_to_cancel('Create Packaged Release archive')
# We need to clone the repo again as we've now pushed the git tags and we need them to create the packaged release.
# (we could do 'git pull' but this is easier and uses a clean directory just to be safe)
cli_repo_dir = set_up_cli_repo_dir()
run_create_packaged_release(cli_repo_dir)
print_status('Done.')
| 46.731903
| 218
| 0.674373
|
697622be9db126fbf5d638b17dadece2f92b3638
| 23,758
|
py
|
Python
|
tests/test_objects.py
|
AvRajath/infoblox-client
|
357317e00db0f95e50afff4493fa7ff67f2bfcb5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_objects.py
|
AvRajath/infoblox-client
|
357317e00db0f95e50afff4493fa7ff67f2bfcb5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_objects.py
|
AvRajath/infoblox-client
|
357317e00db0f95e50afff4493fa7ff67f2bfcb5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import copy
import mock
from infoblox_client import objects
REC = 'ZG5zLmJpbmRfbXgkLjQuY29tLm15X3pvbmUuZGVtby5teC5kZW1vLm15X3pvbmUuY29tLjE'
DEFAULT_HOST_RECORD = {
'_ref': 'record:host/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5nbG9iYWwuY22NA'
':test_host_name.testsubnet.cloud.global.com/default',
'ipv4addrs': [{
'configure_for_dhcp': False,
'_ref': 'record:host_ipv4addr/lMmQ3ZjkuM4Zj5Mi00Y2:22.0.0.2/'
'test_host_name.testsubnet.cloud.global.com/default',
'ipv4addr': '22.0.0.2',
'mac': 'fa:16:3e:29:87:70',
'host': '2c8f8e97-0d92-4cac-a350-096ff2b79.cloud.global.com'}],
'extattrs': {
'Account': {'value': '8a21c40495f04f30a1b2dc6fd1d9ed1a'},
'Cloud API Owned': {'value': 'True'},
'VM ID': {'value': 'None'},
'IP Type': {'value': 'Fixed'},
'CMP Type': {'value': 'OpenStack'},
'Port ID': {'value': '136ef9ad-9c88-41ea-9fa6-bd48d8ec789a'},
'Tenant ID': {'value': '00fd80791dee4112bb538c872b206d4c'}}
}
DEFAULT_MX_RECORD = {
'_ref': 'record:mx/%s'
'mx.demo.my_zone.com/my_dns_view' % REC,
'view': 'my_dns_view',
'name': 'mx.demo.my_zone.com',
'preference': '1',
'mail_exchanger': 'demo.my_zone.com'
}
DEFAULT_TXT_RECORD = {
'_ref': 'record:txt/%s'
'text_test.my_zone.com/my_dns_view' % REC,
'view': 'my_dns_view',
'name': 'text_test.my_zone.com',
'text': 'hello_test'
}
class TestObjects(unittest.TestCase):
def _mock_connector(self, get_object=None, create_object=None,
delete_object=None):
connector = mock.Mock()
connector.get_object.return_value = get_object
connector.create_object.return_value = create_object
connector.delete_object.return_value = delete_object
return connector
def test_search_network(self):
connector = self._mock_connector()
objects.Network.search(connector,
network_view='some-view',
cidr='192.68.1.0/20')
connector.get_object.assert_called_once_with(
'network',
{'network_view': 'some-view', 'network': '192.68.1.0/20'},
extattrs=None, force_proxy=False, return_fields=mock.ANY,
max_results=None)
def test_search_network_v6(self):
connector = self._mock_connector()
objects.Network.search(connector,
network_view='some-view',
cidr='fffe:2312::/64')
connector.get_object.assert_called_once_with(
'ipv6network',
{'network_view': 'some-view', 'network': 'fffe:2312::/64'},
extattrs=None, force_proxy=False, return_fields=mock.ANY,
max_results=None)
def test_search_network_v6_using_network_field(self):
connector = self._mock_connector()
objects.Network.search(connector,
network_view='some-view',
network='fffe:2312::/64')
connector.get_object.assert_called_once_with(
'ipv6network',
{'network_view': 'some-view', 'network': 'fffe:2312::/64'},
extattrs=None, force_proxy=False, return_fields=mock.ANY,
max_results=None)
def test_search_network_with_results(self):
found = {"_ref": "network/ZG5zLm5ldHdvcmskMTAuMzkuMTEuMC8yNC8w"
":10.39.11.0/24/default",
"network_view": 'some-view',
"network": '192.68.1.0/20'}
connector = self._mock_connector(get_object=[found])
network = objects.Network.search(connector,
network_view='some-view',
cidr='192.68.1.0/20')
connector.get_object.assert_called_once_with(
'network',
{'network_view': 'some-view', 'network': '192.68.1.0/20'},
extattrs=None, force_proxy=False, return_fields=mock.ANY,
max_results=None)
self.assertEqual('192.68.1.0/20', network.network)
self.assertEqual('some-view', network.network_view)
# verify aliased fields works too
self.assertEqual('192.68.1.0/20', network.cidr)
def test_create_IP(self):
ip = objects.IP.create(ip='192.168.1.12', mac='4a:ac:de:12:34:45')
self.assertIsInstance(ip, objects.IPv4)
self.assertEqual('192.168.1.12', ip.ip)
self.assertEqual('192.168.1.12', ip.ipv4addr)
self.assertEqual('4a:ac:de:12:34:45', ip.mac)
self.assertEqual(None, ip.configure_for_dhcp)
self.assertEqual(None, ip.host)
def test_Create_MX_Record(self):
mock_record = DEFAULT_MX_RECORD
mx_record_copy = copy.deepcopy(mock_record)
connector = self._mock_connector(create_object=mx_record_copy)
mx = objects.MXRecord.create(connector, name='mx.demo.my_zone.com',
mail_exchanger='demo.my_zone.com',
view='my_dns_view', preference=1)
self.assertIsInstance(mx, objects.MXRecord)
connector.create_object.assert_called_once_with(
'record:mx',
{'mail_exchanger': 'demo.my_zone.com',
'name': 'mx.demo.my_zone.com',
'preference': 1,
'view': 'my_dns_view'
}, ['extattrs', 'mail_exchanger','name', 'preference', 'view'])
def test_update_MX_Record(self):
mx_record_copy = [
{'_ref': 'record:mx/%s' % REC,
'name': 'mx.demo.my_zone.com',
'preference': 1,
'mail_exchanger': 'demo.my_zone.com'}]
connector = self._mock_connector(get_object=mx_record_copy)
mx = objects.MXRecord.create(
connector, name='mx1.demo.my_zone.com',
mail_exchanger='demo2.my_zone.com',
preference=1,
update_if_exists=True)
connector.update_object.assert_called_once_with(
mx_record_copy[0]['_ref'],
{'mail_exchanger': 'demo2.my_zone.com',
'name': 'mx1.demo.my_zone.com', 'preference': 1},
['extattrs', 'mail_exchanger', 'name', 'preference', 'view'])
def test_search_and_delete_MX_Record(self):
mx_record_copy = copy.deepcopy(DEFAULT_MX_RECORD)
connector = self._mock_connector(get_object=[mx_record_copy])
mx_record = objects.MXRecord.search(connector,
view='some_view',
name='some_name')
connector.get_object.assert_called_once_with(
'record:mx', {'view': 'some_view',
'name': 'some_name'},
extattrs=None, force_proxy=False, max_results=None,
return_fields=['extattrs', 'mail_exchanger', 'name', 'preference', 'view'])
mx_record.delete()
connector.delete_object.assert_called_once_with(
DEFAULT_MX_RECORD['_ref'])
def test_create_host_record_with_ttl(self):
mock_record = DEFAULT_HOST_RECORD
host_record_copy = copy.deepcopy(mock_record)
connector = self._mock_connector(create_object=host_record_copy)
ip = objects.IP.create(ip='22.0.0.2', mac='fa:16:3e:29:87:70')
self.assertIsInstance(ip, objects.IPv4)
host_record = objects.HostRecord.create(connector,
ttl=42,
view='some-dns-view',
ip=[ip])
self.assertIsInstance(host_record, objects.HostRecordV4)
connector.create_object.assert_called_once_with(
'record:host',
{'ttl': 42,
'ipv4addrs': [
{'mac': 'fa:16:3e:29:87:70',
'ipv4addr': '22.0.0.2'}],
'view': 'some-dns-view'},
['extattrs', 'ipv4addrs', 'name', 'view', 'aliases'])
def test_create_host_record_with_ip(self):
mock_record = DEFAULT_HOST_RECORD
host_record_copy = copy.deepcopy(mock_record)
connector = self._mock_connector(create_object=host_record_copy)
ip = objects.IP.create(ip='22.0.0.2', mac='fa:16:3e:29:87:70')
self.assertIsInstance(ip, objects.IPv4)
host_record = objects.HostRecord.create(connector,
view='some-dns-view',
ip=[ip])
# Validate that ip object was converted to simple ip
# as a string representation for searching
connector.get_object.assert_called_once_with(
'record:host',
{'view': 'some-dns-view', 'ipv4addr': '22.0.0.2'},
return_fields=mock.ANY)
# Validate create_object call
ip_dict = {'ipv4addr': '22.0.0.2', 'mac': 'fa:16:3e:29:87:70'}
connector.create_object.assert_called_once_with(
'record:host',
{'view': 'some-dns-view', 'ipv4addrs': [ip_dict]}, mock.ANY)
self.assertIsInstance(host_record, objects.HostRecordV4)
# validate nios reply was parsed correctly
self.assertEqual(mock_record['_ref'], host_record._ref)
nios_ip = host_record.ipv4addrs[0]
self.assertIsInstance(ip, objects.IPv4)
self.assertEqual(mock_record['ipv4addrs'][0]['mac'], nios_ip.mac)
self.assertEqual(mock_record['ipv4addrs'][0]['ipv4addr'],
nios_ip.ipv4addr)
self.assertEqual(mock_record['ipv4addrs'][0]['host'],
nios_ip.host)
self.assertEqual(mock_record['ipv4addrs'][0]['configure_for_dhcp'],
nios_ip.configure_for_dhcp)
# Validate 'host' field is not send on update
new_ip = objects.IP.create(ip='22.0.0.10', mac='fa:16:3e:29:87:71',
configure_for_dhcp=False)
host_record.ip.append(new_ip)
host_record.extattrs = {}
host_record.update()
ip_dict['configure_for_dhcp'] = False
ip_dict_new = {'ipv4addr': '22.0.0.10', 'mac': 'fa:16:3e:29:87:71',
'configure_for_dhcp': False}
connector.update_object.assert_called_once_with(
host_record.ref,
{'ipv4addrs': [ip_dict, ip_dict_new],
'extattrs': {}}, mock.ANY)
def test_search_and_delete_host_record(self):
host_record_copy = copy.deepcopy(DEFAULT_HOST_RECORD)
connector = self._mock_connector(get_object=[host_record_copy])
host_record = objects.HostRecord.search(connector,
view='some-dns-view',
ip='192.168.15.20',
network_view='test-netview')
connector.get_object.assert_called_once_with(
'record:host',
{'view': 'some-dns-view', 'ipv4addr': '192.168.15.20',
'network_view': 'test-netview'},
extattrs=None, force_proxy=False, return_fields=mock.ANY,
max_results=None)
# Validate extattrs in host_record are converted to EA object
self.assertIsInstance(host_record.extattrs, objects.EA)
host_record.delete()
connector.delete_object.assert_called_once_with(
DEFAULT_HOST_RECORD['_ref'])
def test_create_fixed_address(self):
mock_fixed_address = {
'_ref': 'fixedaddress/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5nbG9iYWw2NA',
'ipv4addr': '192.168.1.15',
'mac': 'aa:ac:cd:11:22:33',
}
connector = self._mock_connector(create_object=mock_fixed_address)
fixed_addr = objects.FixedAddress.create(
connector,
ip='192.168.1.15',
network_view='some-view',
mac='aa:ac:cd:11:22:33',
ms_server={'_struct': 'msdhcpserver',
'ipv4addr': '192.168.1.0'})
connector.get_object.assert_called_once_with(
'fixedaddress',
{'network_view': 'some-view', 'ipv4addr': '192.168.1.15',
'mac': 'aa:ac:cd:11:22:33'},
return_fields=mock.ANY)
self.assertIsInstance(fixed_addr, objects.FixedAddressV4)
connector.create_object.assert_called_once_with(
'fixedaddress',
{'network_view': 'some-view',
'ipv4addr': '192.168.1.15',
'mac': 'aa:ac:cd:11:22:33',
'ms_server': {'_struct': 'msdhcpserver',
'ipv4addr': '192.168.1.0'}}, mock.ANY)
def test_create_fixed_address_v6(self):
mock_fixed_address = {
'_ref': 'ipv6fixedaddress/ZG5zLmhvc3QkLl9kZWZhdWx0LmNvbS5nbG9iYA',
'ipv6addr': 'fffe:1234:1234::1',
'duid': '00:23:97:49:aa:ac:cd:11:22:33',
}
connector = self._mock_connector(create_object=mock_fixed_address)
fixed_addr = objects.FixedAddress.create(
connector,
ip='fffe:1234:1234::1',
network_view='some-view',
mac='aa:ac:cd:11:22:33',
ms_server={'_struct': 'msdhcpserver',
'ipv4addr': '192.168.1.0'})
self.assertIsInstance(fixed_addr, objects.FixedAddressV6)
self.assertEqual(mock_fixed_address['duid'], fixed_addr.duid)
connector.get_object.assert_called_once_with(
'ipv6fixedaddress',
{'duid': mock.ANY, 'ipv6addr': 'fffe:1234:1234::1',
'network_view': 'some-view' },
return_fields=mock.ANY)
connector.create_object.assert_called_once_with(
'ipv6fixedaddress',
{'duid': mock.ANY, 'ipv6addr': 'fffe:1234:1234::1',
'network_view': 'some-view' }, mock.ANY)
@mock.patch('infoblox_client.utils.generate_duid')
def test_fixed_address_v6(self, generate):
mac = 'aa:ac:cd:11:22:33'
duid = '00:0a:d3:9b:aa:ac:cd:11:22:33'
generate.return_value = duid
connector = self._mock_connector()
fixed_addr = objects.FixedAddress(
connector,
ip='fffe:1234:1234::1',
network_view='some-view',
mac=mac)
self.assertIsInstance(fixed_addr, objects.FixedAddressV6)
self.assertEqual(mac, fixed_addr.mac)
self.assertEqual(duid, fixed_addr.duid)
generate.assert_called_once_with(mac)
def test_search_ipaddress(self):
ip_mock = [{'_ref': ('ipv4address/Li5pcHY0X2FkZHJlc3MkMTky'
'LjE2OC4xLjEwLzE:192.168.1.10/my_view'),
'objects': ['ref_1', 'ref_2']}]
connector = self._mock_connector(get_object=ip_mock)
ip = objects.IPAddress.search(connector,
network_view='some_view',
ip_address='192.168.1.5')
payload = {'network_view': 'some_view', 'ip_address': '192.168.1.5'}
connector.get_object.assert_called_once_with(
'ipv4address', payload, return_fields=mock.ANY,
extattrs=None, force_proxy=mock.ANY, max_results=None)
self.assertIsInstance(ip, objects.IPv4Address)
self.assertEqual(ip_mock[0]['objects'], ip.objects)
def test__process_value(self):
data = (([1, 2, 3], ['1', '2', '3']),
((1, 2), ['1', '2']),
(1, '1'),
('string', 'string'))
for input, output in data:
self.assertEqual(output, objects.EA._process_value(str, input))
def test_ea_parse_generate(self):
eas = {'Subnet ID': {'value': 'some-id'},
'Tenant Name': {'value': 'tenant-name'},
'Cloud API Owned': {'value': 'True'},
'Some EA': {'value': 'False'},
'Zero EA': {'value': '0'}}
ea = objects.EA.from_dict(eas)
self.assertIsInstance(ea, objects.EA)
# validate True and False are converted to booleans
self.assertEqual(True, ea.get('Cloud API Owned'))
self.assertEqual(False, ea.get('Some EA'))
self.assertEqual('0', ea.get('Zero EA'))
self.assertEqual(eas, ea.to_dict())
def test_ea_to_dict(self):
ea = {'Subnet ID': 'some-id',
'Tenant Name': 'tenant-name',
'Cloud API Owned': 'True',
'DNS Record Types': ['record_a', 'record_ptr'],
'False String EA': 'False',
'Empty String EA': '',
'False EA': False,
'Zero EA': 0,
'None EA': None,
'None String EA': 'None',
'Empty List EA': [],
'Zero String EA': '0'}
processed_ea = {'Subnet ID': 'some-id',
'Tenant Name': 'tenant-name',
'Cloud API Owned': 'True',
'DNS Record Types': ['record_a', 'record_ptr'],
'False String EA': 'False',
'False EA': 'False',
'Zero EA': '0',
'None String EA': 'None',
'Zero String EA': '0'}
ea_exist = ['Subnet ID',
'Tenant Name',
'Cloud API Owned',
'DNS Record Types',
'False String EA',
'False EA',
'Zero EA',
'None String EA',
'Zero String EA']
ea_purged = ['Empty String EA',
'None EA',
'Empty List EA']
ea_dict = objects.EA(ea).to_dict()
self.assertIsInstance(ea_dict, dict)
for key in ea_exist:
self.assertEqual(True, key in ea_dict)
for key in ea_purged:
self.assertEqual(False, key in ea_dict)
for key in processed_ea:
self.assertEqual(processed_ea[key], ea_dict.get(key).get('value'))
def test_ea_returns_none(self):
for ea in (None, '', 0):
self.assertEqual(None, objects.EA.from_dict(ea))
def test_ea_set_get(self):
ea = objects.EA()
ea_name = 'Subnet ID'
id = 'subnet-id'
generated_eas = {ea_name: {'value': id}}
ea.set(ea_name, id)
self.assertEqual(id, ea.get(ea_name))
self.assertEqual(generated_eas, ea.to_dict())
def test_ea_returns_ea_dict(self):
ea_dict = {'Subnet ID': 'some-id'}
ea = objects.EA(ea_dict)
ea_dict_from_EA_object = ea.ea_dict
self.assertEqual(ea_dict, ea_dict_from_EA_object)
# Make sure a copy of dict is returned,
# and updating returned value do not affect EA object
ea_dict_from_EA_object['Subnet ID'] = 'another-id'
self.assertEqual('some-id', ea.get('Subnet ID'))
def test_update_from_dict(self):
net = objects.Network(mock.Mock(), network='192.168.1.0/24')
self.assertEqual(None, net._ref)
reply = {'_ref': 'network/asdwdqwecaszxcrqqwe',
'network': '192.168.100.0/24',
'network_view': 'default'}
net.update_from_dict(reply, only_ref=True)
self.assertEqual(reply['_ref'], net._ref)
self.assertEqual('192.168.1.0/24', net.network)
self.assertEqual(None, net.network_view)
def test_update_fields_on_create(self):
a_record = [{'_ref': 'record:a/Awsdrefsasdwqoijvoriibtrni',
'ip': '192.168.1.52',
'name': 'other_name'}]
connector = self._mock_connector(get_object=a_record)
objects.ARecordBase.create(connector,
ip='192.168.1.52',
name='some-new_name',
view='view',
update_if_exists=True)
connector.get_object.assert_called_once_with(
'record:a',
{'view': 'view', 'ipv4addr': '192.168.1.52'},
return_fields=[])
connector.update_object.assert_called_once_with(
a_record[0]['_ref'],
{'name': 'some-new_name', 'ipv4addr': '192.168.1.52'},
mock.ANY)
def test_update_fields_on_create_v6(self):
aaaa_record = [{'_ref': 'record:aaaa/Awsdrefsasdwqoijvoriibtrni',
'ip': '2001:610:240:22::c100:68b',
'name': 'other_name'}]
connector = self._mock_connector(get_object=aaaa_record)
objects.ARecordBase.create(connector,
ip='2001:610:240:22::c100:68b',
name='some-new_name',
view='view',
update_if_exists=True)
connector.get_object.assert_called_once_with(
'record:aaaa',
{'view': 'view', 'ipv6addr': '2001:610:240:22::c100:68b'},
return_fields=[])
connector.update_object.assert_called_once_with(
aaaa_record[0]['_ref'],
{'name': 'some-new_name'},
mock.ANY)
def test_ip_version(self):
conn = mock.Mock()
net_v4 = objects.Network(conn, network='192.168.1.0/24')
self.assertEqual(4, net_v4.ip_version)
net_v6 = objects.Network(conn, network='fffe::/64')
self.assertEqual(6, net_v6.ip_version)
def test_get_tenant(self):
id = 'tenant_id'
fake_tenant = {
'_ref': 'grid:cloudapi:tenant/ZG5zLm5ldHdvcmskMTAuMzk',
'id': id,
'name': 'Tenant Name',
'comment': 'Some comment'}
conn = self._mock_connector(get_object=[fake_tenant])
tenant = objects.Tenant.search(conn, id=id)
conn.get_object.assert_called_once_with(
'grid:cloudapi:tenant', {'id': id},
return_fields=mock.ANY, extattrs=None, force_proxy=mock.ANY,
max_results=None)
self.assertEqual(fake_tenant['id'], tenant.id)
self.assertEqual(fake_tenant['name'], tenant.name)
self.assertEqual(fake_tenant['comment'], tenant.comment)
def test__remap_fields_support_unknown_fields(self):
data = {'host_name': 'cp.com',
'unknown_field': 'some_data'}
self.assertEqual(data, objects.Member._remap_fields(data))
def test_TXT_Record(self):
mock_record = DEFAULT_TXT_RECORD
txt_record_copy = copy.deepcopy(mock_record)
connector = self._mock_connector(create_object=txt_record_copy)
txt = objects.TXTRecord.create(connector, name='text_test.my_zone.com',
text='hello_text',
view='my_dns_view')
self.assertIsInstance(txt, objects.TXTRecord)
connector.create_object.assert_called_once_with(
'record:txt',
{'name': 'text_test.my_zone.com',
'text': 'hello_text',
'view': 'my_dns_view',
}, ['extattrs', 'name', 'text', 'view'])
| 43.196364
| 87
| 0.56743
|
9319e7de7a61f6e5843bb1f421ed06d0a08e4869
| 34,065
|
py
|
Python
|
tools/pythonpoint/pythonpoint.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 8
|
2018-11-01T10:40:18.000Z
|
2021-12-16T03:20:48.000Z
|
tools/pythonpoint/pythonpoint.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 2
|
2015-03-16T18:32:58.000Z
|
2019-03-20T07:17:04.000Z
|
tools/pythonpoint/pythonpoint.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 26
|
2015-03-16T18:27:04.000Z
|
2022-03-25T10:08:33.000Z
|
#!/usr/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""
This is PythonPoint!
The idea is a simple markup languages for describing presentation
slides, and other documents which run page by page. I expect most
of it will be reusable in other page layout stuff.
Look at the sample near the top, which shows how the presentation
should be coded up.
The parser, which is in a separate module to allow for multiple
parsers, turns the XML sample into an object tree. There is a
simple class hierarchy of items, the inner levels of which create
flowable objects to go in the frames. These know how to draw
themselves.
The currently available 'Presentation Objects' are:
The main hierarchy...
PPPresentation
PPSection
PPSlide
PPFrame
PPAuthor, PPTitle and PPSubject are optional
Things to flow within frames...
PPPara - flowing text
PPPreformatted - text with line breaks and tabs, for code..
PPImage
PPTable - bulk formatted tabular data
PPSpacer
Things to draw directly on the page...
PPRect
PPRoundRect
PPDrawingElement - user base class for graphics
PPLine
PPEllipse
Features added by H. Turgut Uyar <uyar@cs.itu.edu.tr>
- TrueType support (actually, just an import in the style file);
this also enables the use of Unicode symbols
- para, image, table, line, rectangle, roundrect, ellipse, polygon
and string elements can now have effect attributes
(careful: new slide for each effect!)
- added printout mode (no new slides for effects, see item above)
- added a second-level bullet: Bullet2
- small bugfixes in handleHiddenSlides:
corrected the outlineEntry of included hidden slide
and made sure to include the last slide even if hidden
Recently added features are:
- file globbing
- package structure
- named colors throughout (using names from reportlab/lib/colors.py)
- handout mode with arbitrary number of columns per page
- stripped off pages hidden in the outline tree (hackish)
- new <notes> tag for speaker notes (paragraphs only)
- new <pycode> tag for syntax-colorized Python code
- reformatted pythonpoint.xml and monterey.xml demos
- written/extended DTD
- arbitrary font support
- print proper speaker notes (TODO)
- fix bug with partially hidden graphics (TODO)
- save in combined presentation/handout mode (TODO)
- add pyRXP support (TODO)
"""
__version__='''$Id$'''
import os, sys, imp, string, pprint, getopt, glob
from reportlab import rl_config
from reportlab.lib import styles
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.utils import getStringIO
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.platypus.flowables import Flowable
from reportlab.platypus.xpreformatted import PythonPreformatted
from reportlab.platypus import Preformatted, Paragraph, Frame, \
Image, Table, TableStyle, Spacer
USAGE_MESSAGE = """\
PythonPoint - a tool for making presentations in PDF.
Usage:
pythonpoint.py [options] file1.xml [file2.xml [...]]
where options can be any of these:
-h / --help prints this message
-n / --notes leave room for comments
-v / --verbose verbose mode
-s / --silent silent mode (NO output)
--handout produce handout document
--printout produce printout document
--cols specify number of columns
on handout pages (default: 2)
To create the PythonPoint user guide, do:
pythonpoint.py pythonpoint.xml
"""
#####################################################################
# This should probably go into reportlab/lib/fonts.py...
#####################################################################
class FontNameNotFoundError(Exception):
pass
class FontFilesNotFoundError(Exception):
pass
##def findFontName(path):
## "Extract a Type-1 font name from an AFM file."
##
## f = open(path)
##
## found = 0
## while not found:
## line = f.readline()[:-1]
## if not found and line[:16] == 'StartCharMetrics':
## raise FontNameNotFoundError, path
## if line[:8] == 'FontName':
## fontName = line[9:]
## found = 1
##
## return fontName
##
##
##def locateFilesForFontWithName(name):
## "Search known paths for AFM/PFB files describing T1 font with given name."
##
## join = os.path.join
## splitext = os.path.splitext
##
## afmFile = None
## pfbFile = None
##
## found = 0
## while not found:
## for p in rl_config.T1SearchPath:
## afmFiles = glob.glob(join(p, '*.[aA][fF][mM]'))
## for f in afmFiles:
## T1name = findFontName(f)
## if T1name == name:
## afmFile = f
## found = 1
## break
## if afmFile:
## break
## break
##
## if afmFile:
## pfbFile = glob.glob(join(splitext(afmFile)[0] + '.[pP][fF][bB]'))[0]
##
## return afmFile, pfbFile
##
##
##def registerFont(name):
## "Register Type-1 font for future use."
##
## rl_config.warnOnMissingFontGlyphs = 0
## rl_config.T1SearchPath.append(r'C:\Programme\Python21\reportlab\test')
##
## afmFile, pfbFile = locateFilesForFontWithName(name)
## if not afmFile and not pfbFile:
## raise FontFilesNotFoundError
##
## T1face = pdfmetrics.EmbeddedType1Face(afmFile, pfbFile)
## T1faceName = name
## pdfmetrics.registerTypeFace(T1face)
## T1font = pdfmetrics.Font(name, T1faceName, 'WinAnsiEncoding')
## pdfmetrics.registerFont(T1font)
def registerFont0(sourceFile, name, path):
"Register Type-1 font for future use, simple version."
rl_config.warnOnMissingFontGlyphs = 0
p = os.path.join(os.path.dirname(sourceFile), path)
afmFiles = glob.glob(p + '.[aA][fF][mM]')
pfbFiles = glob.glob(p + '.[pP][fF][bB]')
assert len(afmFiles) == len(pfbFiles) == 1, FontFilesNotFoundError
T1face = pdfmetrics.EmbeddedType1Face(afmFiles[0], pfbFiles[0])
T1faceName = name
pdfmetrics.registerTypeFace(T1face)
T1font = pdfmetrics.Font(name, T1faceName, 'WinAnsiEncoding')
pdfmetrics.registerFont(T1font)
#####################################################################
def checkColor(col):
"Converts a color name to an RGB tuple, if possible."
if type(col) == type('') and col in dir(colors):
col = getattr(colors, col)
col = (col.red, col.green, col.blue)
return col
def handleHiddenSlides(slides):
"""Filters slides from a list of slides.
In a sequence of hidden slides all but the last one are
removed. Also, the slide before the sequence of hidden
ones is removed.
This assumes to leave only those slides in the handout
that also appear in the outline, hoping to reduce se-
quences where each new slide only adds one new line
to a list of items...
"""
itd = indicesToDelete = map(lambda s:s.outlineEntry == None, slides)
for i in range(len(itd)-1):
if itd[i] == 1:
if itd[i+1] == 0:
itd[i] = 0
if i > 0 and itd[i-1] == 0:
itd[i-1] = 1
itd[len(itd)-1] = 0
for i in range(len(itd)):
if slides[i].outlineEntry:
curOutlineEntry = slides[i].outlineEntry
if itd[i] == 1:
slides[i].delete = 1
else:
slides[i].outlineEntry = curOutlineEntry
slides[i].delete = 0
slides = filter(lambda s:s.delete == 0, slides)
return slides
def makeSlideTable(slides, pageSize, docWidth, numCols):
"""Returns a table containing a collection of SlideWrapper flowables.
"""
slides = handleHiddenSlides(slides)
# Set table style.
tabStyle = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (0,0), (-1,-1), 'CENTRE')
])
# Build table content.
width = docWidth/numCols
height = width * pageSize[1]/pageSize[0]
matrix = []
row = []
for slide in slides:
sw = SlideWrapper(width, height, slide, pageSize)
if (len(row)) < numCols:
row.append(sw)
else:
matrix.append(row)
row = []
row.append(sw)
if len(row) > 0:
for i in range(numCols-len(row)):
row.append('')
matrix.append(row)
# Make Table flowable.
t = Table(matrix,
[width + 5]*len(matrix[0]),
[height + 5]*len(matrix))
t.setStyle(tabStyle)
return t
class SlideWrapper(Flowable):
"""A Flowable wrapping a PPSlide object.
"""
def __init__(self, width, height, slide, pageSize):
Flowable.__init__(self)
self.width = width
self.height = height
self.slide = slide
self.pageSize = pageSize
def __repr__(self):
return "SlideWrapper(w=%s, h=%s)" % (self.width, self.height)
def draw(self):
"Draw the slide in our relative coordinate system."
slide = self.slide
pageSize = self.pageSize
canv = self.canv
canv.saveState()
canv.scale(self.width/pageSize[0], self.height/pageSize[1])
slide.effectName = None
slide.drawOn(self.canv)
canv.restoreState()
class PPPresentation:
def __init__(self):
self.sourceFilename = None
self.filename = None
self.outDir = None
self.description = None
self.title = None
self.author = None
self.subject = None
self.notes = 0 # different printing mode
self.handout = 0 # prints many slides per page
self.printout = 0 # remove hidden slides
self.cols = 0 # columns per handout page
self.slides = []
self.effectName = None
self.showOutline = 1 #should it be displayed when opening?
self.compression = rl_config.pageCompression
self.pageDuration = None
#assume landscape
self.pageWidth = rl_config.defaultPageSize[1]
self.pageHeight = rl_config.defaultPageSize[0]
self.verbose = rl_config.verbose
def saveAsPresentation(self):
"""Write the PDF document, one slide per page."""
if self.verbose:
print 'saving presentation...'
pageSize = (self.pageWidth, self.pageHeight)
if self.sourceFilename:
filename = os.path.splitext(self.sourceFilename)[0] + '.pdf'
if self.outDir: filename = os.path.join(self.outDir,os.path.basename(filename))
if self.verbose:
print filename
#canv = canvas.Canvas(filename, pagesize = pageSize)
outfile = getStringIO()
if self.notes:
#translate the page from landscape to portrait
pageSize= pageSize[1], pageSize[0]
canv = canvas.Canvas(outfile, pagesize = pageSize)
canv.setPageCompression(self.compression)
canv.setPageDuration(self.pageDuration)
if self.title:
canv.setTitle(self.title)
if self.author:
canv.setAuthor(self.author)
if self.subject:
canv.setSubject(self.subject)
slideNo = 0
for slide in self.slides:
#need diagnostic output if something wrong with XML
slideNo = slideNo + 1
if self.verbose:
print 'doing slide %d, id = %s' % (slideNo, slide.id)
if self.notes:
#frame and shift the slide
#canv.scale(0.67, 0.67)
scale_amt = (min(pageSize)/float(max(pageSize)))*.95
#canv.translate(self.pageWidth / 6.0, self.pageHeight / 3.0)
#canv.translate(self.pageWidth / 2.0, .025*self.pageHeight)
canv.translate(.025*self.pageHeight, (self.pageWidth/2.0) + 5)
#canv.rotate(90)
canv.scale(scale_amt, scale_amt)
canv.rect(0,0,self.pageWidth, self.pageHeight)
slide.drawOn(canv)
canv.showPage()
#ensure outline visible by default
if self.showOutline:
canv.showOutline()
canv.save()
return self.savetofile(outfile, filename)
def saveAsHandout(self):
"""Write the PDF document, multiple slides per page."""
styleSheet = getSampleStyleSheet()
h1 = styleSheet['Heading1']
bt = styleSheet['BodyText']
if self.sourceFilename :
filename = os.path.splitext(self.sourceFilename)[0] + '.pdf'
outfile = getStringIO()
doc = SimpleDocTemplate(outfile, pagesize=rl_config.defaultPageSize, showBoundary=0)
doc.leftMargin = 1*cm
doc.rightMargin = 1*cm
doc.topMargin = 2*cm
doc.bottomMargin = 2*cm
multiPageWidth = rl_config.defaultPageSize[0] - doc.leftMargin - doc.rightMargin - 50
story = []
orgFullPageSize = (self.pageWidth, self.pageHeight)
t = makeSlideTable(self.slides, orgFullPageSize, multiPageWidth, self.cols)
story.append(t)
## #ensure outline visible by default
## if self.showOutline:
## doc.canv.showOutline()
doc.build(story)
return self.savetofile(outfile, filename)
def savetofile(self, pseudofile, filename):
"""Save the pseudo file to disk and return its content as a
string of text."""
pseudofile.flush()
content = pseudofile.getvalue()
pseudofile.close()
if filename :
outf = open(filename, "wb")
outf.write(content)
outf.close()
return content
def save(self):
"Save the PDF document."
if self.handout:
return self.saveAsHandout()
else:
return self.saveAsPresentation()
#class PPSection:
# """A section can hold graphics which will be drawn on all
# pages within it, before frames and other content are done.
# In other words, a background template."""
# def __init__(self, name):
# self.name = name
# self.graphics = []
#
# def drawOn(self, canv):
# for graphic in self.graphics:
### graphic.drawOn(canv)
#
# name = str(hash(graphic))
# internalname = canv._doc.hasForm(name)
#
# canv.saveState()
# if not internalname:
# canv.beginForm(name)
# graphic.drawOn(canv)
# canv.endForm()
# canv.doForm(name)
# else:
# canv.doForm(name)
# canv.restoreState()
definedForms = {}
class PPSection:
"""A section can hold graphics which will be drawn on all
pages within it, before frames and other content are done.
In other words, a background template."""
def __init__(self, name):
self.name = name
self.graphics = []
def drawOn(self, canv):
for graphic in self.graphics:
graphic.drawOn(canv)
continue
name = str(hash(graphic))
#internalname = canv._doc.hasForm(name)
if name in definedForms:
internalname = 1
else:
internalname = None
definedForms[name] = 1
if not internalname:
canv.beginForm(name)
canv.saveState()
graphic.drawOn(canv)
canv.restoreState()
canv.endForm()
canv.doForm(name)
else:
canv.doForm(name)
class PPNotes:
def __init__(self):
self.content = []
def drawOn(self, canv):
print self.content
class PPSlide:
def __init__(self):
self.id = None
self.title = None
self.outlineEntry = None
self.outlineLevel = 0 # can be higher for sub-headings
self.effectName = None
self.effectDirection = 0
self.effectDimension = 'H'
self.effectMotion = 'I'
self.effectDuration = 1
self.frames = []
self.notes = []
self.graphics = []
self.section = None
def drawOn(self, canv):
if self.effectName:
canv.setPageTransition(
effectname=self.effectName,
direction = self.effectDirection,
dimension = self.effectDimension,
motion = self.effectMotion,
duration = self.effectDuration
)
if self.outlineEntry:
#gets an outline automatically
self.showOutline = 1
#put an outline entry in the left pane
tag = self.title
canv.bookmarkPage(tag)
canv.addOutlineEntry(tag, tag, self.outlineLevel)
if self.section:
self.section.drawOn(canv)
for graphic in self.graphics:
graphic.drawOn(canv)
for frame in self.frames:
frame.drawOn(canv)
## # Need to draw the notes *somewhere*...
## for note in self.notes:
## print note
class PPFrame:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.content = []
self.showBoundary = 0
def drawOn(self, canv):
#make a frame
frame = Frame( self.x,
self.y,
self.width,
self.height
)
frame.showBoundary = self.showBoundary
#build a story for the frame
story = []
for thingy in self.content:
#ask it for any flowables
story.append(thingy.getFlowable())
#draw it
frame.addFromList(story,canv)
class PPPara:
"""This is a placeholder for a paragraph."""
def __init__(self):
self.rawtext = ''
self.style = None
def escapeAgain(self, text):
"""The XML has been parsed once, so '>' became '>'
in rawtext. We need to escape this to get back to
something the Platypus parser can accept"""
pass
def getFlowable(self):
## print 'rawText for para:'
## print repr(self.rawtext)
p = Paragraph(
self.rawtext,
getStyles()[self.style],
self.bulletText
)
return p
class PPPreformattedText:
"""Use this for source code, or stuff you do not want to wrap"""
def __init__(self):
self.rawtext = ''
self.style = None
def getFlowable(self):
return Preformatted(self.rawtext, getStyles()[self.style])
class PPPythonCode:
"""Use this for colored Python source code"""
def __init__(self):
self.rawtext = ''
self.style = None
def getFlowable(self):
return PythonPreformatted(self.rawtext, getStyles()[self.style])
class PPImage:
"""Flowing image within the text"""
def __init__(self):
self.filename = None
self.width = None
self.height = None
def getFlowable(self):
return Image(self.filename, self.width, self.height)
class PPTable:
"""Designed for bulk loading of data for use in presentations."""
def __init__(self):
self.rawBlocks = [] #parser stuffs things in here...
self.fieldDelim = ',' #tag args can override
self.rowDelim = '\n' #tag args can override
self.data = None
self.style = None #tag args must specify
self.widths = None #tag args can override
self.heights = None #tag args can override
def getFlowable(self):
self.parseData()
t = Table(
self.data,
self.widths,
self.heights)
if self.style:
t.setStyle(getStyles()[self.style])
return t
def parseData(self):
"""Try to make sense of the table data!"""
rawdata = string.strip(string.join(self.rawBlocks, ''))
lines = string.split(rawdata, self.rowDelim)
#clean up...
lines = map(string.strip, lines)
self.data = []
for line in lines:
cells = string.split(line, self.fieldDelim)
self.data.append(cells)
#get the width list if not given
if not self.widths:
self.widths = [None] * len(self.data[0])
if not self.heights:
self.heights = [None] * len(self.data)
## import pprint
## print 'table data:'
## print 'style=',self.style
## print 'widths=',self.widths
## print 'heights=',self.heights
## print 'fieldDelim=',repr(self.fieldDelim)
## print 'rowDelim=',repr(self.rowDelim)
## pprint.pprint(self.data)
class PPSpacer:
def __init__(self):
self.height = 24 #points
def getFlowable(self):
return Spacer(72, self.height)
#############################################################
#
# The following are things you can draw on a page directly.
#
##############################################################
##class PPDrawingElement:
## """Base class for something which you draw directly on the page."""
## def drawOn(self, canv):
## raise "NotImplementedError", "Abstract base class!"
class PPFixedImage:
"""You place this on the page, rather than flowing it"""
def __init__(self):
self.filename = None
self.x = 0
self.y = 0
self.width = None
self.height = None
def drawOn(self, canv):
if self.filename:
x, y = self.x, self.y
w, h = self.width, self.height
canv.drawImage(self.filename, x, y, w, h)
class PPRectangle:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.fillColor = None
self.strokeColor = (1,1,1)
self.lineWidth=0
def drawOn(self, canv):
canv.saveState()
canv.setLineWidth(self.lineWidth)
if self.fillColor:
r,g,b = checkColor(self.fillColor)
canv.setFillColorRGB(r,g,b)
if self.strokeColor:
r,g,b = checkColor(self.strokeColor)
canv.setStrokeColorRGB(r,g,b)
canv.rect(self.x, self.y, self.width, self.height,
stroke=(self.strokeColor!=None),
fill = (self.fillColor!=None)
)
canv.restoreState()
class PPRoundRect:
def __init__(self, x, y, width, height, radius):
self.x = x
self.y = y
self.width = width
self.height = height
self.radius = radius
self.fillColor = None
self.strokeColor = (1,1,1)
self.lineWidth=0
def drawOn(self, canv):
canv.saveState()
canv.setLineWidth(self.lineWidth)
if self.fillColor:
r,g,b = checkColor(self.fillColor)
canv.setFillColorRGB(r,g,b)
if self.strokeColor:
r,g,b = checkColor(self.strokeColor)
canv.setStrokeColorRGB(r,g,b)
canv.roundRect(self.x, self.y, self.width, self.height,
self.radius,
stroke=(self.strokeColor!=None),
fill = (self.fillColor!=None)
)
canv.restoreState()
class PPLine:
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.fillColor = None
self.strokeColor = (1,1,1)
self.lineWidth=0
def drawOn(self, canv):
canv.saveState()
canv.setLineWidth(self.lineWidth)
if self.strokeColor:
r,g,b = checkColor(self.strokeColor)
canv.setStrokeColorRGB(r,g,b)
canv.line(self.x1, self.y1, self.x2, self.y2)
canv.restoreState()
class PPEllipse:
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.fillColor = None
self.strokeColor = (1,1,1)
self.lineWidth=0
def drawOn(self, canv):
canv.saveState()
canv.setLineWidth(self.lineWidth)
if self.strokeColor:
r,g,b = checkColor(self.strokeColor)
canv.setStrokeColorRGB(r,g,b)
if self.fillColor:
r,g,b = checkColor(self.fillColor)
canv.setFillColorRGB(r,g,b)
canv.ellipse(self.x1, self.y1, self.x2, self.y2,
stroke=(self.strokeColor!=None),
fill = (self.fillColor!=None)
)
canv.restoreState()
class PPPolygon:
def __init__(self, pointlist):
self.points = pointlist
self.fillColor = None
self.strokeColor = (1,1,1)
self.lineWidth=0
def drawOn(self, canv):
canv.saveState()
canv.setLineWidth(self.lineWidth)
if self.strokeColor:
r,g,b = checkColor(self.strokeColor)
canv.setStrokeColorRGB(r,g,b)
if self.fillColor:
r,g,b = checkColor(self.fillColor)
canv.setFillColorRGB(r,g,b)
path = canv.beginPath()
(x,y) = self.points[0]
path.moveTo(x,y)
for (x,y) in self.points[1:]:
path.lineTo(x,y)
path.close()
canv.drawPath(path,
stroke=(self.strokeColor!=None),
fill=(self.fillColor!=None))
canv.restoreState()
class PPString:
def __init__(self, x, y):
self.text = ''
self.x = x
self.y = y
self.align = TA_LEFT
self.font = 'Times-Roman'
self.size = 12
self.color = (0,0,0)
self.hasInfo = 0 # these can have data substituted into them
def normalizeText(self):
"""It contains literal XML text typed over several lines.
We want to throw away
tabs, newlines and so on, and only accept embedded string
like '\n'"""
lines = string.split(self.text, '\n')
newtext = []
for line in lines:
newtext.append(string.strip(line))
#accept all the '\n' as newlines
self.text = newtext
def drawOn(self, canv):
# for a string in a section, this will be drawn several times;
# so any substitution into the text should be in a temporary
# variable
if self.hasInfo:
# provide a dictionary of stuff which might go into
# the string, so they can number pages, do headers
# etc.
info = {}
info['title'] = canv._doc.info.title
info['author'] = canv._doc.info.author
info['subject'] = canv._doc.info.subject
info['page'] = canv.getPageNumber()
drawText = self.text % info
else:
drawText = self.text
if self.color is None:
return
lines = string.split(string.strip(drawText), '\\n')
canv.saveState()
canv.setFont(self.font, self.size)
r,g,b = checkColor(self.color)
canv.setFillColorRGB(r,g,b)
cur_y = self.y
for line in lines:
if self.align == TA_LEFT:
canv.drawString(self.x, cur_y, line)
elif self.align == TA_CENTER:
canv.drawCentredString(self.x, cur_y, line)
elif self.align == TA_RIGHT:
canv.drawRightString(self.x, cur_y, line)
cur_y = cur_y - 1.2*self.size
canv.restoreState()
class PPDrawing:
def __init__(self):
self.drawing = None
def getFlowable(self):
return self.drawing
class PPFigure:
def __init__(self):
self.figure = None
def getFlowable(self):
return self.figure
def getSampleStyleSheet():
from tools.pythonpoint.styles.standard import getParagraphStyles
return getParagraphStyles()
def toolsDir():
import tools
return tools.__path__[0]
#make a singleton and a function to access it
_styles = None
def getStyles():
global _styles
if not _styles:
_styles = getSampleStyleSheet()
return _styles
def setStyles(newStyleSheet):
global _styles
_styles = newStyleSheet
_pyRXP_Parser = None
def validate(rawdata):
global _pyRXP_Parser
if not _pyRXP_Parser:
try:
import pyRXP
except ImportError:
return
from reportlab.lib.utils import open_and_read, rl_isfile
dtd = 'pythonpoint.dtd'
if not rl_isfile(dtd):
dtd = os.path.join(toolsDir(),'pythonpoint','pythonpoint.dtd')
if not rl_isfile(dtd): return
def eocb(URI,dtdText=open_and_read(dtd),dtd=dtd):
if os.path.basename(URI)=='pythonpoint.dtd': return dtd,dtdText
return URI
_pyRXP_Parser = pyRXP.Parser(eoCB=eocb)
return _pyRXP_Parser.parse(rawdata)
def process(datafile, notes=0, handout=0, printout=0, cols=0, verbose=0, outDir=None, datafilename=None, fx=1):
"Process one PythonPoint source file."
if not hasattr(datafile, "read"):
if not datafilename: datafilename = datafile
datafile = open(datafile)
else:
if not datafilename: datafilename = "PseudoFile"
rawdata = datafile.read()
#if pyRXP present, use it to check and get line numbers for errors...
validate(rawdata)
return _process(rawdata, datafilename, notes, handout, printout, cols, verbose, outDir, fx)
def _process(rawdata, datafilename, notes=0, handout=0, printout=0, cols=0, verbose=0, outDir=None, fx=1):
#print 'inner process fx=%d' % fx
from tools.pythonpoint.stdparser import PPMLParser
parser = PPMLParser()
parser.fx = fx
parser.sourceFilename = datafilename
parser.feed(rawdata)
pres = parser.getPresentation()
pres.sourceFilename = datafilename
pres.outDir = outDir
pres.notes = notes
pres.handout = handout
pres.printout = printout
pres.cols = cols
pres.verbose = verbose
if printout:
pres.slides = handleHiddenSlides(pres.slides)
#this does all the work
pdfcontent = pres.save()
if verbose:
print 'saved presentation %s.pdf' % os.path.splitext(datafilename)[0]
parser.close()
return pdfcontent
##class P:
## def feed(self, text):
## parser = stdparser.PPMLParser()
## d = pyRXP.parse(text)
##
##
##def process2(datafilename, notes=0, handout=0, cols=0):
## "Process one PythonPoint source file."
##
## import pyRXP, pprint
##
## rawdata = open(datafilename).read()
## d = pyRXP.parse(rawdata)
## pprint.pprint(d)
def handleOptions():
# set defaults
from reportlab import rl_config
options = {'cols':2,
'handout':0,
'printout':0,
'help':0,
'notes':0,
'fx':1,
'verbose':rl_config.verbose,
'silent':0,
'outDir': None}
args = sys.argv[1:]
args = filter(lambda x: x and x[0]=='-',args) + filter(lambda x: not x or x[0]!='-',args)
try:
shortOpts = 'hnvsx'
longOpts = string.split('cols= outdir= handout help notes printout verbose silent nofx')
optList, args = getopt.getopt(args, shortOpts, longOpts)
except getopt.error, msg:
options['help'] = 1
if not args and os.path.isfile('pythonpoint.xml'):
args = ['pythonpoint.xml']
# Remove leading dashes (max. two).
for i in range(len(optList)):
o, v = optList[i]
while o[0] == '-':
o = o[1:]
optList[i] = (o, v)
if o == 'cols': options['cols'] = int(v)
elif o=='outdir': options['outDir'] = v
if filter(lambda ov: ov[0] == 'handout', optList):
options['handout'] = 1
if filter(lambda ov: ov[0] == 'printout', optList):
options['printout'] = 1
if optList == [] and args == [] or \
filter(lambda ov: ov[0] in ('h', 'help'), optList):
options['help'] = 1
if filter(lambda ov: ov[0] in ('n', 'notes'), optList):
options['notes'] = 1
if filter(lambda ov: ov[0] in ('x', 'nofx'), optList):
options['fx'] = 0
if filter(lambda ov: ov[0] in ('v', 'verbose'), optList):
options['verbose'] = 1
#takes priority over verbose. Used by our test suite etc.
#to ensure no output at all
if filter(lambda ov: ov[0] in ('s', 'silent'), optList):
options['silent'] = 1
options['verbose'] = 0
return options, args
def main():
options, args = handleOptions()
if options['help']:
print USAGE_MESSAGE
sys.exit(0)
if options['verbose'] and options['notes']:
print 'speaker notes mode'
if options['verbose'] and options['handout']:
print 'handout mode'
if options['verbose'] and options['printout']:
print 'printout mode'
if not options['fx']:
print 'suppressing special effects'
for fileGlobs in args:
files = glob.glob(fileGlobs)
if not files:
print fileGlobs, "not found"
return
for datafile in files:
if os.path.isfile(datafile):
file = os.path.join(os.getcwd(), datafile)
notes, handout, printout, cols, verbose, fx = options['notes'], options['handout'], options['printout'], options['cols'], options['verbose'], options['fx']
process(file, notes, handout, printout, cols, verbose, options['outDir'], fx=fx)
else:
print 'Data file not found:', datafile
if __name__ == '__main__':
main()
| 30.28
| 172
| 0.582122
|
9671be561a1455cf16f32621b5af0192d7bdc626
| 5,065
|
py
|
Python
|
compiler/test_add.py
|
SandraKersevan/BioProc
|
be267ff78a6d2aca98950e58534962437c831260
|
[
"CC-BY-4.0"
] | null | null | null |
compiler/test_add.py
|
SandraKersevan/BioProc
|
be267ff78a6d2aca98950e58534962437c831260
|
[
"CC-BY-4.0"
] | 1
|
2020-12-15T14:35:13.000Z
|
2020-12-15T14:35:13.000Z
|
compiler/test_add.py
|
SandraKersevan/BioProc
|
be267ff78a6d2aca98950e58534962437c831260
|
[
"CC-BY-4.0"
] | 1
|
2021-01-09T20:36:03.000Z
|
2021-01-09T20:36:03.000Z
|
from bioproc.hill_functions import *
def model(Y, T, params):
a0, not_a0, q0, not_q0,a1, not_a1, q1, not_q1,a2, not_a2, q2, not_q2,a3, not_a3, q3, not_q3,i1, i2, i3, i4, i5, i6, i7, i8,A,B,C,=Y
alpha1, alpha2, alpha3, alpha4, delta1, delta2, Kd, n, deltaE, KM,alpha_a, delta_a, Kd_a, n_a,KD_cond,=params
clk = get_clock(T)
prog_Kd_C=10
prog_Kd_A=10
prog_delta_A=0.1
prog_n_C=2
prog_alpha_B=10
prog_n_B=2
prog_alpha_C=10
prog_delta_B=0.1
prog_delta_C=0.1
prog_n_A=2
prog_alpha_A=10
prog_Kd_B=10
dA_dt=+prog_alpha_A*activate_1(i1,prog_Kd_A,prog_n_A)-prog_delta_A*A
dB_dt=+prog_alpha_B*activate_1(i2,prog_Kd_B,prog_n_B)-prog_delta_B*B
dC_dt=+prog_alpha_C*activate_2(i3,A,prog_Kd_C,prog_n_C)+prog_alpha_C*activate_2(i3,B,prog_Kd_C,prog_n_C)-prog_delta_C*C
RESET0=0 if T > 1 else 100
SET0=0
RESET1=0 if T > 1 else 100
SET1=0
RESET2=0 if T > 1 else 100
SET2=0
RESET3=0 if T > 1 else 100
SET3=0
d0=not_q3
sum_one = a0 + q0
sum_zero = not_a0 + not_q0
da0_dt = alpha1*(pow(d0/Kd, n)/(1 + pow(d0/Kd, n) + pow(clk/Kd, n) + pow(d0/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(not_a0/Kd, n))) - delta1 *a0 -a0*(deltaE*RESET0/(KM+sum_one))
dnot_a0_dt = alpha1*(1/(1 + pow(d0/Kd, n) + pow(clk/Kd, n) + pow(d0/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(a0/Kd, n))) - delta1*not_a0-not_a0*(deltaE*SET0/(KM+sum_zero))
dq0_dt = alpha3*((pow(a0/Kd, n)*pow(clk/Kd, n))/(1 + pow(a0/Kd, n) + pow(clk/Kd, n) + pow(a0/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(not_q0/Kd, n))) - delta2*q0-q0*(deltaE*RESET0/(KM+sum_one))
dnot_q0_dt = alpha3*((pow(not_a0/Kd, n)*pow(clk/Kd, n))/(1 + pow(not_a0/Kd, n) + pow(clk/Kd, n) + pow(not_a0/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(q0/Kd, n))) - delta2*not_q0 -not_q0*(deltaE*SET0/(KM+sum_zero))
d1=q0
sum_one = a1 + q1
sum_zero = not_a1 + not_q1
da1_dt = alpha1*(pow(d1/Kd, n)/(1 + pow(d1/Kd, n) + pow(clk/Kd, n) + pow(d1/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(not_a1/Kd, n))) - delta1 *a1 -a1*(deltaE*RESET1/(KM+sum_one))
dnot_a1_dt = alpha1*(1/(1 + pow(d1/Kd, n) + pow(clk/Kd, n) + pow(d1/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(a1/Kd, n))) - delta1*not_a1-not_a1*(deltaE*SET1/(KM+sum_zero))
dq1_dt = alpha3*((pow(a1/Kd, n)*pow(clk/Kd, n))/(1 + pow(a1/Kd, n) + pow(clk/Kd, n) + pow(a1/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(not_q1/Kd, n))) - delta2*q1-q1*(deltaE*RESET1/(KM+sum_one))
dnot_q1_dt = alpha3*((pow(not_a1/Kd, n)*pow(clk/Kd, n))/(1 + pow(not_a1/Kd, n) + pow(clk/Kd, n) + pow(not_a1/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(q1/Kd, n))) - delta2*not_q1 -not_q1*(deltaE*SET1/(KM+sum_zero))
d2=q1
sum_one = a2 + q2
sum_zero = not_a2 + not_q2
da2_dt = alpha1*(pow(d2/Kd, n)/(1 + pow(d2/Kd, n) + pow(clk/Kd, n) + pow(d2/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(not_a2/Kd, n))) - delta1 *a2 -a2*(deltaE*RESET2/(KM+sum_one))
dnot_a2_dt = alpha1*(1/(1 + pow(d2/Kd, n) + pow(clk/Kd, n) + pow(d2/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(a2/Kd, n))) - delta1*not_a2-not_a2*(deltaE*SET2/(KM+sum_zero))
dq2_dt = alpha3*((pow(a2/Kd, n)*pow(clk/Kd, n))/(1 + pow(a2/Kd, n) + pow(clk/Kd, n) + pow(a2/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(not_q2/Kd, n))) - delta2*q2-q2*(deltaE*RESET2/(KM+sum_one))
dnot_q2_dt = alpha3*((pow(not_a2/Kd, n)*pow(clk/Kd, n))/(1 + pow(not_a2/Kd, n) + pow(clk/Kd, n) + pow(not_a2/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(q2/Kd, n))) - delta2*not_q2 -not_q2*(deltaE*SET2/(KM+sum_zero))
d3=q2
sum_one = a3 + q3
sum_zero = not_a3 + not_q3
da3_dt = alpha1*(pow(d3/Kd, n)/(1 + pow(d3/Kd, n) + pow(clk/Kd, n) + pow(d3/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(not_a3/Kd, n))) - delta1 *a3 -a3*(deltaE*RESET3/(KM+sum_one))
dnot_a3_dt = alpha1*(1/(1 + pow(d3/Kd, n) + pow(clk/Kd, n) + pow(d3/Kd, n)*pow(clk/Kd, n))) + alpha2*(1/(1 + pow(a3/Kd, n))) - delta1*not_a3-not_a3*(deltaE*SET3/(KM+sum_zero))
dq3_dt = alpha3*((pow(a3/Kd, n)*pow(clk/Kd, n))/(1 + pow(a3/Kd, n) + pow(clk/Kd, n) + pow(a3/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(not_q3/Kd, n))) - delta2*q3-q3*(deltaE*RESET3/(KM+sum_one))
dnot_q3_dt = alpha3*((pow(not_a3/Kd, n)*pow(clk/Kd, n))/(1 + pow(not_a3/Kd, n) + pow(clk/Kd, n) + pow(not_a3/Kd, n)*pow(clk/Kd, n))) + alpha4*(1/(1 + pow(q3/Kd, n))) - delta2*not_q3 -not_q3*(deltaE*SET3/(KM+sum_zero))
di1_dt = alpha_a * activate_2(not_q0, not_q3, Kd_a, n_a) - delta_a * i1
di2_dt = alpha_a * activate_2(q0, not_q1, Kd_a, n_a) - delta_a * i2
di3_dt = alpha_a * activate_2(q1, not_q2, Kd_a, n_a) - delta_a * i3
di4_dt = alpha_a * activate_2(q2, not_q3, Kd_a, n_a) - delta_a * i4
di5_dt = alpha_a * activate_2(q0, q3, Kd_a, n_a) - delta_a * i5
di6_dt = alpha_a * activate_2(not_q0, q1, Kd_a, n_a) - delta_a * i6
di7_dt = alpha_a * activate_2(not_q1, q2, Kd_a, n_a) - delta_a * i7
di8_dt = alpha_a * activate_2(not_q2, q3, Kd_a, n_a) - delta_a * i8
return [da0_dt,dnot_a0_dt,dq0_dt,dnot_q0_dt,da1_dt,dnot_a1_dt,dq1_dt,dnot_q1_dt,da2_dt,dnot_a2_dt,dq2_dt,dnot_q2_dt,da3_dt,dnot_a3_dt,dq3_dt,dnot_q3_dt,di1_dt,di2_dt,di3_dt,di4_dt,di5_dt,di6_dt,di7_dt,di8_dt,dA_dt,dB_dt,dC_dt]
| 77.923077
| 227
| 0.640079
|
26c09961d2bae31a5e0630fbef90058f14b00e82
| 885
|
py
|
Python
|
11-iface-abc/tombola_runner.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 5,651
|
2015-01-06T21:58:46.000Z
|
2022-03-31T13:39:07.000Z
|
11-iface-abc/tombola_runner.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 42
|
2016-12-11T19:17:11.000Z
|
2021-11-23T19:41:16.000Z
|
11-iface-abc/tombola_runner.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 2,394
|
2015-01-18T10:57:38.000Z
|
2022-03-31T11:41:12.000Z
|
# BEGIN TOMBOLA_RUNNER
import doctest
from tombola import Tombola
# modules to test
import bingo, lotto, tombolist, drum # <1>
TEST_FILE = 'tombola_tests.rst'
TEST_MSG = '{0:16} {1.attempted:2} tests, {1.failed:2} failed - {2}'
def main(argv):
verbose = '-v' in argv
real_subclasses = Tombola.__subclasses__() # <2>
virtual_subclasses = list(Tombola._abc_registry) # <3>
for cls in real_subclasses + virtual_subclasses: # <4>
test(cls, verbose)
def test(cls, verbose=False):
res = doctest.testfile(
TEST_FILE,
globs={'ConcreteTombola': cls}, # <5>
verbose=verbose,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE)
tag = 'FAIL' if res.failed else 'OK'
print(TEST_MSG.format(cls.__name__, res, tag)) # <6>
if __name__ == '__main__':
import sys
main(sys.argv)
# END TOMBOLA_RUNNER
| 23.918919
| 68
| 0.647458
|
bfa19869db8292b1a1a1deba103dc63f902c1727
| 24,112
|
py
|
Python
|
RequestsGeneration/generateMediSynDataset.py
|
eman-ramadan/deepcache_netai2018
|
b13474dc677f4b13095498baa7b8be05f9c2b3d0
|
[
"BSD-3-Clause"
] | 2
|
2020-10-18T19:56:14.000Z
|
2021-01-08T09:28:49.000Z
|
RequestsGeneration/generateMediSynDataset.py
|
eman-ramadan/deepcache_netai2018
|
b13474dc677f4b13095498baa7b8be05f9c2b3d0
|
[
"BSD-3-Clause"
] | null | null | null |
RequestsGeneration/generateMediSynDataset.py
|
eman-ramadan/deepcache_netai2018
|
b13474dc677f4b13095498baa7b8be05f9c2b3d0
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T12:20:32.000Z
|
2020-11-05T12:20:32.000Z
|
"""
DeepCache
DeepCache is distributed under the following BSD 3-Clause License:
Copyright(c) 2019
University of Minensota - Twin Cities
Authors: Arvind Narayanan, Saurabh Verma, Eman Ramadan, Pariya Babaie, and Zhi-Li Zhang
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author: Pariya Babaie (babai008@umn.edu) & Eman Ramadan (eman@cs.umn.edu)
DESCRIPTION:
The code is based on
Tang, Wenting, et al. "MediSyn: A synthetic streaming media service workload generator."
Proceedings of the 13th international workshop on Network and operating systems support
for digital audio and video. ACM, 2003.
https://dl.acm.org/citation.cfm?id=776327
The variables are mostly based on the paper, but we did some changes to generate requests which fit our need as
shown in the request analysis plots. In addition to object popularity, and request interarrival distribution, this
dataset simulates real traffic by:
1- having new objects introduced at different times
2- objects have different types with variable lifespans
3- requests for each object are generated based on an hourly request ratio
INPUT:
The inputs are hard coded and initialized based on the paper.
You can modify the variables in the {initialization} section @Line 50
FLOW:
1- Load the diurnal ratios for hourly rates, and regenerate them if GENERATE_NEW_HOURLY_REQUEST_RATIO is True
2- Generate the days of introduction for objects, number of objects generated per day, and the
interarrival time between objects introduced each day.
3- Generate object frequencies.
4- Generate objects and their properties, lifespan, introduction-time, and end-time.
5- Generate and export the requests.
OUTPUT:
The output is generated in '../Datasets' directory with the name:
'mediSynDataset_x{'hourly_request_function_degree( i.e. 2)'}_O{'NUM_OF_OBJECTS'}.csv'
the requests file contains object_IDs to be requested and their request_time.
"""
from __future__ import print_function
import pandas as pd
import numpy as np
import random
import math
import csv
import sys
import os
""""########################### initialization ####################################################################"""
NUM_OF_OBJECTS = 3500 # number of objects to generate
lambdaFile = 'hourly_request_ratio.csv' # file with hourly request ratio
lambdas = [] # stores the diurnal ratios for non-homogeneous Poisson
curTime = [] # for each object shows the last timeStamp that it was requested
objectPopularities = [] # Contains object popularity
M = 178310 # max frequency for HPC dataset
traceType = 'HPC'
hourly_request_function_degree = 2 # the degree for the function that sets the objects per bin pattern, X^2
dayGaps = [] # interarrival between days
numOfObjectsIntroduced = [] # number of objects generated in each day
interArrivals = [] # generates the interarrival time between objects introduced in a day
lifeSpanType = [] # for each object it holds the type of its lifeSpan
ObjectsLifeSpan = [] # the length of lifeSpan value for each object
requestGenInfo = {} # for each object it holds the info about requests
startTimes = {} # sorted objects based on their introduction time
introductionOrder = [] # random order for introducing objects in a day
sortedOnIntoTime = []
requests = [] # generated requests
objectLengths = []
if sys.version_info[0] < 3:
maxEndDay = -sys.maxint - 1
else:
maxEndDay = -sys.maxsize - 1
WITH_INTRODUCTION = True # flag to allow objects to be introduced at any time
WITH_DAY_GAPS_INTRODUCTION = False # If True, introduce gaps between the objects introduction days,
# otherwise objects are introduced each day
GENERATE_NEW_HOURLY_REQUEST_RATIO = False # If True, a new 'hourly_request_ratio.csv' is generated
MIN_REQ_PER_DAY_THRESHOLD = 1500 # min number of requests to be generated for each object in a day
MIN_OBJ_INTRODCUED_PER_DAY_THRESHOLD = 0.0035 * NUM_OF_OBJECTS # min number of objects to be generated in a day
MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD = 0.0095 * NUM_OF_OBJECTS # max number of objects to be generated in a day
# Creating output directory if it does not exist
OUTPUTDIR = '../Datasets'
if not os.path.isdir(OUTPUTDIR):
os.mkdir(OUTPUTDIR)
# Checking the existence of hourly_request_ratio.csv file
if not os.path.isfile('hourly_request_ratio.csv'):
GENERATE_NEW_HOURLY_REQUEST_RATIO = True
if GENERATE_NEW_HOURLY_REQUEST_RATIO:
print('Generating hourly request ratio file ...')
rands = np.random.randint(1, 100, 24)
rands = rands/float(np.sum(rands))
index = np.arange(1, 25)
res = 'hourly_request_ratio.csv'
f = open(res, 'w+')
for i in range(len(index)):
if i != len(index)-1:
f.write(str(index[i]) + ',' + str(rands[i])+'\n')
else:
f.write(str(index[i]) + ',' + str(rands[i]))
f.close()
def initialize():
global curTime
loadDiurnalRatios()
print('Generating Objects for Dataset ...')
generateObjectsIntroductionInfo(traceType)
generatePopularities(traceType, int(NUM_OF_OBJECTS))
generateObjects()
print('Generating Requests for Dataset ...')
curTime = [0] * NUM_OF_OBJECTS
generateRequests()
"""################################ Load diurnal ratios #############################################################"""
def loadDiurnalRatios():
with open(lambdaFile, "r+") as fi:
for line in fi:
tmpLambdas = float(line.rstrip('\n').rstrip('\r').split(',')[1])
lambdas.append(tmpLambdas)
fi.close()
"""########################### Object Popularity ##################################################################"""
K = {'HPC': 30, 'HCL': 7}
def generatePopularities(traceType, N):
zipalpha = 0.8
k = K[traceType]
for i in range(1, N+1):
Mk = ((M-1)/k)+1
tmp = (((float(Mk)/(math.pow((float(i+k-1)/k), zipalpha)))-1)*k)+1
objectPopularities.append(tmp)
"""######################## Object Type ###########################################################################"""
def getObjectType():
decision = random.uniform(0, 1)
if decision <= 0.1: # 10 % of objects are news
return 'news'
else:
return 'regular'
"""##################### generating random variates #################################################################"""
def generatePoissonVariate(rand, lambda_poisson):
"""
for diurnal access generation
"""
return -1 * (math.log(1-rand))/lambda_poisson
def generateParetoVariate(rand, alpha):
return math.pow(1/rand, 1/alpha)
def generateParetoScaledVariate(rand, alpha, beta):
""" F(x) = 1 - (b/x)^a, x >= b """
return beta / (math.pow((1 - rand), (1/alpha)))
def generateNormalVariate(mu, sigma):
"""
RV generated using rejection method
"""
variateGenerated = False
while not variateGenerated:
u1 = random.uniform(0, 1)
u2 = random.uniform(0, 1)
x = -1*math.log(u1)
if u2 > math.exp(-1*math.pow((x-1), 2)/2):
continue
else:
u3 = random.uniform(0, 1)
if u3 > 0.5:
return mu+(sigma*x)
else:
return mu-(sigma*x)
def generateLogNormalVariate(mu, sigma):
"""
RV generated using rejection method
"""
variateGenerated = False
while not variateGenerated:
u1 = random.uniform(0, 1)
u2 = random.uniform(0, 1)
x = -1*math.log(u1)
if u2 > math.exp(-1*math.pow((x-1), 2)/2):
continue
else:
return math.exp(mu+(sigma*x))
def generateExponentialVariate(rand, a):
return -(1/a)*math.log(1-rand)
def generateRandVariate(dist, params, numOfVariates):
variates = []
if dist is 'pareto':
alpha = params['alpha']
for i in range(numOfVariates):
rand = random.uniform(0, 1)
variates.append(generateParetoVariate(rand, alpha))
if dist is 'paretoScaled':
alpha = params['alpha']
beta = params['beta']
for i in range(numOfVariates):
rand = random.uniform(0, 1)
variates.append(generateParetoScaledVariate(rand, alpha, beta))
elif dist is 'normal':
mu = params['mu']
sigma = params['sigma']
for i in range(numOfVariates):
variates.append(generateNormalVariate(mu, sigma))
elif dist is 'logNormal':
mu = params['mu']
sigma = params['sigma']
for i in range(numOfVariates):
variates.append(generateLogNormalVariate(mu, sigma))
elif dist is 'exp':
mu = params['mu']
for i in range(numOfVariates):
rand = random.uniform(0, 1)
variates.append(generateExponentialVariate(rand, mu))
elif dist is 'poisson':
mu = params['mu']
for i in range(numOfVariates):
rand = random.uniform(0, 1)
variates.append(generatePoissonVariate(rand, mu))
return variates
"""#################### Object Introduction Info ##################################################################"""
def generateObjectsIntroductionInfo(typeMode):
"""
generates gaps between introduction days based on either pareto or exponential distribution
"""
global NUM_OF_OBJECTS
global numOfObjectsIntroduced
tempNumOfObjectsIntroduced = []
while sum(tempNumOfObjectsIntroduced) < NUM_OF_OBJECTS:
if typeMode is 'HPC':
if WITH_DAY_GAPS_INTRODUCTION:
pareto_alpha_objectIntro_hpc = 1.0164
object_intro_days_gap = generateRandVariate('pareto', {'alpha':pareto_alpha_objectIntro_hpc}, 1)[0]
if object_intro_days_gap > 20:
object_intro_days_gap = 20
dayGaps.append(object_intro_days_gap)
else:
dayGaps.append(1)
else:
exponential_mu_objectIntro_hpl = 4.2705
object_intro_days_gap = generateRandVariate('exp', {'mu': exponential_mu_objectIntro_hpl}, 1)[0]
dayGaps.append(object_intro_days_gap)
# number of new objects generated in each introduction day Pareto dist
pareto_alpha_numOfObjectsGeneration = 0.8
pareto_beta_numOfObjectsGeneration = MIN_OBJ_INTRODCUED_PER_DAY_THRESHOLD
numOfObjects_intro_in_day = generateRandVariate('paretoScaled', {'alpha': pareto_alpha_numOfObjectsGeneration,
'beta': pareto_beta_numOfObjectsGeneration}, 1)[0]
if numOfObjects_intro_in_day > MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD:
numOfObjects_intro_in_day = MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD
tempNumOfObjectsIntroduced.append(numOfObjects_intro_in_day)
# sort generated items
tempNumOfObjectsIntroduced.sort()
extra_days = 0
if len(tempNumOfObjectsIntroduced) % 7 != 0:
extra_days = len(tempNumOfObjectsIntroduced) % 7
for i in range(extra_days):
# generate random int to add these objects to other introduction days to generate full weeks of data
added = False
while not added:
u = random.randint(extra_days+1, len(tempNumOfObjectsIntroduced) - 1)
if tempNumOfObjectsIntroduced[i] + tempNumOfObjectsIntroduced[u] < MAX_OBJ_INTRODCUED_PER_DAY_THRESHOLD:
tempNumOfObjectsIntroduced[u] += tempNumOfObjectsIntroduced[i]
added = True
# Exclude the extra days after being added to other days
tempNumOfObjectsIntroduced = tempNumOfObjectsIntroduced[extra_days:]
tempNumOfObjectsIntroduced.sort()
# Fill in the days by dividing the sorted data as following
# This induces that more objects are introduced on Friday then Saturday, and so on.
# The least number of objects are introduced on Tuesday.
# Fri 1, Sat 2, Sun 3, Thu 4, Wed 5, Mon 6, Tuesday 7
weeks = int(len(tempNumOfObjectsIntroduced) / 7)
FriIndex = weeks * 6
SatIndex = weeks * 5
SunIndex = weeks * 4
MonIndex = weeks * 1
TuesIndex = weeks * 0
WedIndex = weeks * 2
ThuIndex = weeks * 3
for i in range(weeks):
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[MonIndex+i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[TuesIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[WedIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[ThuIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[FriIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[SatIndex + i])
numOfObjectsIntroduced.append(tempNumOfObjectsIntroduced[SunIndex + i])
# interarrivalTime for objects introduction in a day
pareto_alpha_interArrival = 1.0073
numOfDays = len(numOfObjectsIntroduced)
for i in range(numOfDays):
objectsCountInDay = int(np.round(numOfObjectsIntroduced)[i])
if WITH_INTRODUCTION:
interArrivals.append(generateRandVariate('pareto', {'alpha': pareto_alpha_interArrival}, objectsCountInDay))
else:
interArrivals.append([0]*objectsCountInDay)
NUM_OF_OBJECTS = int(sum(np.round(numOfObjectsIntroduced)))
def generateObjectIntroductionOrder():
return np.random.permutation(range(len(objectPopularities)))+1
"""######################### Object lifespan ######################################################################"""
def generateLifeSpans(numOfObjects, objMode):
logNormal_mu_mean = 3.0935
logNormal_mu_std = 0.9612
logNormal_sigma_mean = 1.1417
logNormal_sigma_std = 0.3067
pareto_alpha_mean = 1.7023
pareto_alpha_std = 0.2092
lifeSpans = []
logNormalMu = generateRandVariate('normal', {'mu': logNormal_mu_mean, 'sigma': logNormal_mu_std}, 1)[0]
logNormalSigma = generateRandVariate('normal', {'mu': logNormal_sigma_mean, 'sigma': logNormal_sigma_std}, 1)[0]
paretoAlpha = generateRandVariate('normal', {'mu': pareto_alpha_mean, 'sigma': pareto_alpha_std}, 1)[0]
for i in range(numOfObjects):
if objMode[i] is 'regular':
tmpLifeSpan = generateRandVariate('logNormal', {'mu': logNormalMu, 'sigma': logNormalSigma}, 1)[0]
elif objMode[i] is 'news':
tmpLifeSpan = generateRandVariate('pareto', {'alpha': paretoAlpha}, 1)[0]
if tmpLifeSpan > 80:
tmpLifeSpan = random.randint(2, 80)
lifeSpans.append((i+1, tmpLifeSpan))
return lifeSpans
"""######################### Object Generation ####################################################################"""
def normalizePopularities():
normalized = np.array(objectPopularities)/max(objectPopularities)
return normalized
def getBinInterval(time):
return (math.floor(time/float(3600)))/float(23)
def generateObjects():
global ObjectsLifeSpan
global introductionOrder
global sortedOnIntoTime
global maxEndDay
normalizedPop = normalizePopularities()
for i in range(len(normalizedPop)):
lifeSpanType.append(getObjectType())
# tuple (objID, LifeSpan), objID from 1 to N
ObjectsLifeSpan = generateLifeSpans(len(objectPopularities), lifeSpanType)
introductionOrder = generateObjectIntroductionOrder() # objectIntroductionOrder from 1 to N
for i in range(1, len(objectPopularities)+1):
requestGenInfo[i] = {'startDay': 0, 'lifeSpan': 0, 'endDay': 0, 'arrivalTime': 0, 'type': '', 'freq': 0,
'unitPerDay': 0} # From 1 to N
startTimes[i] = 0
objCnt = 0
dayCnt = 0
for i in range(len(numOfObjectsIntroduced)):
dayTime = 0
dayCnt = dayCnt+round(dayGaps[i])
for j in range(int(np.round(numOfObjectsIntroduced)[i])):
objIntroduced = introductionOrder[objCnt]
dayTime = dayTime+interArrivals[i][j]
requestGenInfo[objIntroduced]['startDay'] = dayCnt
requestGenInfo[objIntroduced]['arrivalTime'] = dayTime
requestGenInfo[objIntroduced]['lifeSpan'] = ObjectsLifeSpan[objIntroduced-1][1]
requestGenInfo[objIntroduced]['type'] = lifeSpanType[objIntroduced-1]
requestGenInfo[objIntroduced]['freq'] = objectPopularities[objIntroduced-1]
# Generating at least a minimum number of requests per day
if requestGenInfo[objIntroduced]['freq'] / requestGenInfo[objIntroduced]['lifeSpan'] \
< MIN_REQ_PER_DAY_THRESHOLD:
# generate a random number for which number to update
decision = random.uniform(0, 1)
if decision <= 0.5:
# update the object frequency
life_span = random.randint(10, 80)
requestGenInfo[objIntroduced]['freq'] = life_span * MIN_REQ_PER_DAY_THRESHOLD
requestGenInfo[objIntroduced]['lifeSpan'] = life_span
else:
# update the object life-span
freq = random.randint(MIN_REQ_PER_DAY_THRESHOLD, 80*MIN_REQ_PER_DAY_THRESHOLD)
requestGenInfo[objIntroduced]['freq'] = freq
requestGenInfo[objIntroduced]['lifeSpan'] = freq / MIN_REQ_PER_DAY_THRESHOLD
startTimes[objIntroduced] = dayCnt+getBinInterval(dayTime)
requestGenInfo[objIntroduced]['endDay'] = requestGenInfo[objIntroduced]['lifeSpan'] + \
requestGenInfo[objIntroduced]['startDay']
requestGenInfo[objIntroduced]['totalDens'] = math.pow(requestGenInfo[objIntroduced]['lifeSpan'],
hourly_request_function_degree)
objectLengths.append([objIntroduced, requestGenInfo[objIntroduced]['startDay'],
requestGenInfo[objIntroduced]['lifeSpan'], requestGenInfo[objIntroduced]['endDay'],
requestGenInfo[objIntroduced]['freq']])
if requestGenInfo[objIntroduced]['endDay'] > maxEndDay:
maxEndDay = requestGenInfo[objIntroduced]['endDay']
objCnt = objCnt+1
sortedOnIntoTime = sorted(startTimes, key=startTimes.get)
def generateDiurnalAccess(obj, diurnalRatio, dayCnt):
global requests
lifeTimeLeft = requestGenInfo[obj]['lifeSpan']
if lifeTimeLeft > 1:
lastDay = requestGenInfo[obj]['endDay']
objCount = abs(requestGenInfo[obj]['freq']*(((math.pow(dayCnt-lastDay, hourly_request_function_degree)
- math.pow(lastDay-dayCnt+1, hourly_request_function_degree)))/requestGenInfo[obj]['totalDens']))
requestGenInfo[obj]['lifeSpan'] = requestGenInfo[obj]['lifeSpan']-1
for i in range(len(diurnalRatio)):
tmpCount = int(np.round(objCount*diurnalRatio[i]))
if tmpCount != 0:
tmpLambda = (tmpCount/float(3600))
reqInterArrivals = generateRandVariate('exp', {'mu': tmpLambda}, tmpCount)
for tmpInter in reqInterArrivals:
requests.append((obj, (curTime[obj-1]+tmpInter)))
curTime[obj-1] = curTime[obj-1]+tmpInter
else:
lastDay = requestGenInfo[obj]['endDay']
objCount = abs(requestGenInfo[obj]['freq']*(((math.pow(lastDay-dayCnt, hourly_request_function_degree)
- math.pow(lastDay-(dayCnt+requestGenInfo[obj]['lifeSpan']), hourly_request_function_degree))) /
requestGenInfo[obj]['totalDens']))
spanToGenerate = int(math.floor(requestGenInfo[obj]['lifeSpan']*10))
requestGenInfo[obj]['lifeSpan'] = 0
for i in range(spanToGenerate):
tmpCount = int(np.round(objCount*diurnalRatio[i]))
if tmpCount != 0:
tmpLambda = (tmpCount/float(3600))
reqInterArrivals = generateRandVariate('exp', {'mu': tmpLambda}, tmpCount)
for tmpInter in reqInterArrivals:
requests.append((obj, (curTime[obj-1]+tmpInter)))
curTime[obj-1] = curTime[obj-1]+tmpInter
"""######################### Requests Generation ##################################################################"""
def generateRequests():
global requests
global curTime
OUTPUTFILENAME = '{0}/mediSynDataset_x{1}_O{2}.csv'.format(OUTPUTDIR, hourly_request_function_degree, NUM_OF_OBJECTS)
if not os.path.isfile(OUTPUTFILENAME):
fi = open(OUTPUTFILENAME, 'w')
fi.write('object_ID,request_time\n')
fi.close()
dayCount = requestGenInfo[sortedOnIntoTime[0]]['startDay']
reqGendf = pd.DataFrame.from_dict(requestGenInfo, orient='index')
reqGendf['objID'] = reqGendf.index
while dayCount <= maxEndDay:
objList = list(reqGendf[(reqGendf['startDay'] <= dayCount) & (reqGendf['endDay'] >= dayCount)]['objID'])
for obj in objList:
if curTime[obj-1] == 0:
curTime[obj-1] = (dayCount*86400) + requestGenInfo[obj]['arrivalTime']
generateDiurnalAccess(obj, lambdas, dayCount)
dayCount = dayCount + 1
if dayCount % 20 == 0:
requests = sorted(requests, key=lambda x: x[1])
saveRequestsToFile(OUTPUTFILENAME)
requests = []
print('{} Days Processed of {} Total Days'.format(dayCount, int(maxEndDay)))
print('MediSyn Dataset Saved to Output file: {}'.format(OUTPUTFILENAME))
def saveRequestsToFile(OUTPUTFILENAME):
with open(OUTPUTFILENAME, 'a') as resultFile:
wr = csv.writer(resultFile, dialect='excel')
wr.writerows(requests)
"""##################################################################################################################"""
def main():
initialize()
if __name__ == "__main__": main()
| 43.21147
| 121
| 0.623797
|
6e5b43b4277df4f548ca85c5df9163fdb9438cf2
| 1,761
|
py
|
Python
|
src/wgm/newhost.py
|
allhailthetail/wgm
|
6d8fbc84deaf3feeda22d6c5c58a0dc3d40e323b
|
[
"MIT"
] | null | null | null |
src/wgm/newhost.py
|
allhailthetail/wgm
|
6d8fbc84deaf3feeda22d6c5c58a0dc3d40e323b
|
[
"MIT"
] | null | null | null |
src/wgm/newhost.py
|
allhailthetail/wgm
|
6d8fbc84deaf3feeda22d6c5c58a0dc3d40e323b
|
[
"MIT"
] | null | null | null |
import wgkey
import os
def newhost(hostname, ip_cidr, listenport):
'''
Responsible for creating a new "host" interface,
writing the interface to /etc/wireguard/.conf
and backing up a copy to /etc/wireguard/hosts/.conf.bak
'''
# replace default values if passed a None type:
if hostname == None:
hostname = 'wg0'
if ip_cidr == None:
ip_cidr = '10.0.0.1/24'
if listenport == None:
listenport = 51820
# PLACEHOLDER VALUES, REPLACE LATER!!
# can a single firewall.d zone handle the whole thing? simple on and off?
PostUp = '#/etc/wireguard/PostUp.sh'
PostDown = '#/etc/wireguard/PostDown.sh'
# call lib.wgkey.genkey to fetch a unique pair
KeyPair = wgkey.genkeys()
# create hostname.* files
os.mkdir(f'/etc/wireguard/{hostname}.d')
with open(f'/etc/wireguard/{hostname}.d/{hostname}.host.conf', 'w') as f: # hostfile in drop directory
f.writelines([
f'#{hostname}.host.conf\n',
f"#PublicKey = {KeyPair['pubkey']}\n\n"
'[Interface]\n',
f"PrivateKey = {KeyPair['privkey']}\n", # private key of host interface
f'Address = {ip_cidr}\n', # public-facing WAN address
f'ListenPort = {listenport}\n', # port wireguard listens for connections
f'PostUp = {PostUp}\n', # firewall script to run on initialization
f'PostDown = {PostDown}\n' # firewall script to run on shutdown
])
f.close()
| 44.025
| 124
| 0.522998
|
3fc0a3c0da154fb343f6eda1d37bc6ce805f4c99
| 362
|
py
|
Python
|
lab12/IRtest1.py
|
chrijack/csap-rpi
|
afc8bc8b826285682855d044d54a87b7ab8ab2e3
|
[
"Unlicense"
] | null | null | null |
lab12/IRtest1.py
|
chrijack/csap-rpi
|
afc8bc8b826285682855d044d54a87b7ab8ab2e3
|
[
"Unlicense"
] | null | null | null |
lab12/IRtest1.py
|
chrijack/csap-rpi
|
afc8bc8b826285682855d044d54a87b7ab8ab2e3
|
[
"Unlicense"
] | null | null | null |
"""
$ If KEY_1 is pressed,this script will be executed,LED1 will turn on(or off)
$ LED1 connect to GPIO5(BCM_GPIO 24)
"""
import RPi.GPIO as GPIO
PIN = 24
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(PIN, GPIO.IN)
GPIO.setup(PIN, GPIO.OUT)
if GPIO.input(PIN) == 0:
GPIO.output(PIN, GPIO.HIGH)
else:
GPIO.output(PIN, GPIO.LOW)
| 20.111111
| 81
| 0.679558
|
dc89874534623185904c22303858b5a30fe3155b
| 1,425
|
py
|
Python
|
Python3/0558-Logical-Or-of-Two-Binary-Grids-Represented-as-Quad-Trees/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0558-Logical-Or-of-Two-Binary-Grids-Represented-as-Quad-Trees/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0558-Logical-Or-of-Two-Binary-Grids-Represented-as-Quad-Trees/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
"""
# Definition for a QuadTree node.
class Node:
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
"""
class Solution:
def intersect(self, quadTree1, quadTree2):
"""
:type quadTree1: Node
:type quadTree2: Node
:rtype: Node
"""
if quadTree1.isLeaf:
return quadTree1.val and quadTree1 or quadTree2
elif quadTree2.isLeaf:
return quadTree2.val and quadTree2 or quadTree1
else:
topLeft = self.intersect(quadTree1.topLeft, quadTree2.topLeft)
topRight = self.intersect(quadTree1.topRight, quadTree2.topRight)
bottomLeft = self.intersect(quadTree1.bottomLeft, quadTree2.bottomLeft)
bottomRight = self.intersect(quadTree1.bottomRight, quadTree2.bottomRight)
children = [topLeft, topRight, bottomLeft, bottomRight]
leaves = [child.isLeaf for child in children]
values = [child.val for child in children]
if all(leaves) and (sum(values) in (0, 4)):
return Node(topLeft.val, True, None, None, None, None)
else:
return Node(False, False, topLeft, topRight, bottomLeft, bottomRight)
| 40.714286
| 86
| 0.625263
|
27614ce2996396a9c7b320562e9797e935f67056
| 761
|
py
|
Python
|
src/LDTPLibrary/utils/events/scope_event.py
|
wywincl/LDTPLibrary
|
2efe912216fddccb63aae15d58adce9334dd89bf
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2016-02-14T16:58:38.000Z
|
2022-01-17T00:50:05.000Z
|
src/LDTPLibrary/utils/events/scope_event.py
|
wywincl/LDTPLibrary
|
2efe912216fddccb63aae15d58adce9334dd89bf
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2016-01-12T14:07:09.000Z
|
2020-01-29T16:10:31.000Z
|
src/LDTPLibrary/utils/events/scope_event.py
|
wywincl/LDTPLibrary
|
2efe912216fddccb63aae15d58adce9334dd89bf
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2016-04-10T06:51:38.000Z
|
2020-01-21T15:12:00.000Z
|
from .event import Event
from robot.libraries.BuiltIn import BuiltIn
class ScopeEvent(Event):
def __init__(self, scope, action, *args, **kwargs):
self.scope = scope
self.action = action
self.action_args = args
self.action_kwargs = kwargs
if scope == 'current':
suite = BuiltIn().get_variable_value('${SUITE NAME}')
test = BuiltIn().get_variable_value('${TEST NAME}', '')
self.scope = suite + '.' + test if test != '' else suite
def trigger(self, *args, **kwargs):
if args[0] == self.scope:
self.action(*self.action_args, **self.action_kwargs)
class ScopeStart(ScopeEvent):
name = 'scope_start'
class ScopeEnd(ScopeEvent):
name = 'scope_end'
| 27.178571
| 68
| 0.613666
|
97bc0aadd5bc0403f78eb0de635d00be72269da3
| 12,759
|
py
|
Python
|
alembic/versions/affc03cb46f5_game_data.py
|
andreasots/lrrbot
|
6564f641be3548fcdeb500337770cf2c03759428
|
[
"Apache-2.0"
] | 24
|
2015-02-06T10:58:44.000Z
|
2021-12-07T14:55:12.000Z
|
alembic/versions/affc03cb46f5_game_data.py
|
andreasots/lrrbot
|
6564f641be3548fcdeb500337770cf2c03759428
|
[
"Apache-2.0"
] | 343
|
2015-01-13T21:55:41.000Z
|
2022-03-31T16:32:39.000Z
|
alembic/versions/affc03cb46f5_game_data.py
|
andreasots/lrrbot
|
6564f641be3548fcdeb500337770cf2c03759428
|
[
"Apache-2.0"
] | 18
|
2015-03-07T02:10:48.000Z
|
2019-09-02T00:48:01.000Z
|
revision = 'affc03cb46f5'
down_revision = '988883a6be1d'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
import json
import itertools
import requests
import logging
import urllib.parse
log = logging.getLogger("affc03cb46f5_game_data")
def upgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
all_users = dict(conn.execute(sqlalchemy.select([users.c.name, users.c.id])).fetchall())
shows = alembic.op.create_table(
"shows",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("string_id", sqlalchemy.Text, nullable=False, unique=True),
sqlalchemy.Column("name", sqlalchemy.Text, nullable=False),
)
alembic.op.execute(sqlalchemy.schema.CreateSequence(sqlalchemy.Sequence("games_id_seq", start=-1, increment=-1)))
games = alembic.op.create_table(
"games",
sqlalchemy.Column("id", sqlalchemy.Integer, sqlalchemy.Sequence("game_id_seq"), primary_key=True, server_default=sqlalchemy.func.nextval('games_id_seq')),
sqlalchemy.Column("name", sqlalchemy.Text, unique=True, nullable=False),
)
alembic.op.execute("ALTER SEQUENCE games_id_seq OWNED BY games.id")
game_per_show_data = alembic.op.create_table(
"game_per_show_data",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("display_name", sqlalchemy.Text),
sqlalchemy.Column("verified", sqlalchemy.Boolean),
)
alembic.op.create_primary_key("game_per_show_data_pk", "game_per_show_data", ["game_id", "show_id"])
stats = alembic.op.create_table(
"stats",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("string_id", sqlalchemy.Text, nullable=False, unique=True),
sqlalchemy.Column("singular", sqlalchemy.Text),
sqlalchemy.Column("plural", sqlalchemy.Text),
sqlalchemy.Column("emote", sqlalchemy.Text),
)
game_stats = alembic.op.create_table(
"game_stats",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("stat_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("count", sqlalchemy.Integer, nullable=False),
)
alembic.op.create_primary_key("game_stats_pk", "game_stats", ["game_id", "show_id", "stat_id"])
game_votes = alembic.op.create_table(
"game_votes",
sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("user_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("users.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("vote", sqlalchemy.Boolean, nullable=False),
)
alembic.op.create_primary_key("game_votes_pk", "game_votes", ["game_id", "show_id", "user_id"])
disabled_stats = alembic.op.create_table(
"disabled_stats",
sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
sqlalchemy.Column("stat_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("stats.id", ondelete="CASCADE", onupdate="CASCADE"), nullable=False),
)
alembic.op.create_primary_key("disabled_stats_pk", "disabled_stats", ["show_id", "stat_id"])
# Move data
datafile = alembic.context.config.get_section_option("lrrbot", "datafile", "data.json")
clientid = alembic.context.config.get_section_option("lrrbot", "twitch_clientid")
with open(datafile) as f:
data = json.load(f)
# stats
alembic.op.bulk_insert(stats, [{
"string_id": string_id,
"emote": values.get("emote"),
"plural": values.get("plural"),
"singular": values.get("singular"),
} for string_id, values in data.get("stats", {}).items()])
all_stats = dict(conn.execute(sqlalchemy.select([stats.c.string_id, stats.c.id])).fetchall())
# shows
alembic.op.bulk_insert(shows, [{
"string_id": show,
"name": values["name"],
} for show, values in data.get("shows", {}).items()])
all_shows = dict(conn.execute(sqlalchemy.select([shows.c.string_id, shows.c.id])).fetchall())
# games
def parse_id(id):
if id is None:
return None
try:
return int(id)
except ValueError:
return None
for show in data.get("shows", {}).values():
for game_id, game in show.get("games", {}).items():
game_id = parse_id(game_id) or parse_id(game.get("id"))
if game_id is None:
conn.execute("INSERT INTO games (name) VALUES (%(name)s) ON CONFLICT (name) DO NOTHING", {"name": game["name"]})
else:
conn.execute("""
INSERT INTO games (
id,
name
) VALUES (
%(id)s,
%(name)s
) ON CONFLICT (name) DO UPDATE SET
id = EXCLUDED.id
""", {"id": game_id, "name": game["name"]})
all_games = dict(conn.execute(sqlalchemy.select([games.c.name, games.c.id])).fetchall())
# game_per_show_data
display_names = []
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
if "display" in game:
display_names.append({
"show_id": all_shows[show_id],
"game_id": parse_id(game.get("id")) or all_games[game["name"]],
"display_name": game["display"],
})
alembic.op.bulk_insert(game_per_show_data, display_names)
# game_stats
all_game_stats = []
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
game_id = parse_id(game.get("id")) or all_games[game["name"]]
for stat, count in game.get("stats", {}).items():
all_game_stats.append({
"show_id": all_shows[show_id],
"game_id": game_id,
"stat_id": all_stats[stat],
"count": count,
})
alembic.op.bulk_insert(game_stats, all_game_stats)
# game_votes
all_votes = []
with requests.Session() as session:
for show_id, show in data.get("shows", {}).items():
for game in show.get("games", {}).values():
game_id = parse_id(game.get("id")) or all_games[game["name"]]
for nick, vote in game.get("votes", {}).items():
if nick not in all_users:
try:
req = session.get(
"https://api.twitch.tv/kraken/users?login=%s" % urllib.parse.quote(nick),
headers={'Client-ID': clientid, 'Accept': 'application/vnd.twitchtv.v5+json'})
req.raise_for_status()
user = req.json()['users'][0]
all_users[nick] = user["_id"]
alembic.op.bulk_insert(users, [{
"id": user["_id"],
"name": user["name"],
"display_name": user.get("display_name"),
}])
except Exception:
log.exception("Failed to fetch data for %r", nick)
all_users[nick] = None
if all_users[nick] is None:
continue
all_votes.append({
"show_id": all_shows[show_id],
"game_id": game_id,
"user_id": all_users[nick],
"vote": vote,
})
alembic.op.bulk_insert(game_votes, all_votes)
# disabled_stats
if "swiftlycam" in all_shows:
for_cameron = []
if "death" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["death"]
})
if "tilt" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["tilt"]
})
if "pave" in all_stats:
for_cameron.append({
"show_id": all_shows["swiftlycam"],
"stat_id": all_stats["pave"],
})
alembic.op.bulk_insert(disabled_stats, for_cameron)
alembic.op.add_column("quotes", sqlalchemy.Column("game_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("games.id", ondelete="CASCADE", onupdate="CASCADE")))
alembic.op.add_column("quotes", sqlalchemy.Column("show_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("shows.id", ondelete="CASCADE", onupdate="CASCADE")))
alembic.op.execute("""
UPDATE quotes
SET
show_id = shows.id
FROM shows
WHERE quotes.show = shows.name
""")
alembic.op.execute("""
UPDATE quotes
SET
game_id = game_per_show_data.game_id
FROM game_per_show_data
WHERE quotes.game = game_per_show_data.display_name AND game_per_show_data.show_id = quotes.show_id
""")
alembic.op.execute("""
UPDATE quotes
SET
game_id = games.id
FROM games
WHERE quotes.game = games.name
""")
alembic.op.drop_column("quotes", "game")
alembic.op.drop_column("quotes", "show")
data.pop("shows", None)
data.pop("stats", None)
with open(datafile, "w") as f:
json.dump(data, f, indent=2, sort_keys=True)
def downgrade():
conn = alembic.context.get_context().bind
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
datafile = alembic.context.config.get_section_option("lrrbot", "datafile", "data.json")
with open(datafile) as f:
data = json.load(f)
data["stats"] = {}
stats = meta.tables["stats"]
for id, singular, plural, emote in conn.execute(sqlalchemy.select([stats.c.string_id, stats.c.singular, stats.c.plural, stats.c.emote])):
data["stats"][id] = {}
if singular is not None:
data["stats"][id]["singular"] = singular
if plural is not None:
data["stats"][id]["plural"] = plural
if emote is not None:
data["stats"][id]["emote"] = emote
data["shows"] = {}
shows = meta.tables["shows"]
games = meta.tables["games"]
game_per_show_data = meta.tables["game_per_show_data"]
game_votes = meta.tables["game_votes"]
game_stats = meta.tables["game_stats"]
users = meta.tables["users"]
for fkey, id, name in conn.execute(sqlalchemy.select([shows.c.id, shows.c.string_id, shows.c.name])).fetchall():
data["shows"][id] = {"name": name, "games": {}}
query = sqlalchemy.select([games.c.id, games.c.name, stats.c.string_id, game_stats.c.count])
query = query.select_from(
game_stats
.join(games, game_stats.c.game_id == games.c.id)
.join(stats, game_stats.c.stat_id == stats.c.id)
)
query = query.where(game_stats.c.show_id == fkey)
for game_id, name, stat_id, count in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["stats"][stat_id] = count
query = sqlalchemy.select([games.c.id, games.c.name, users.c.name, game_votes.c.vote])
query = query.select_from(
game_votes
.join(games, game_votes.c.game_id == games.c.id)
.join(users, game_votes.c.user_id == users.c.id)
)
query = query.where(game_votes.c.show_id == fkey)
for game_id, name, user, vote in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["votes"][user] = vote
query = sqlalchemy.select([games.c.id, games.c.name, game_per_show_data.c.display_name])
query = query.select_from(
game_per_show_data.join(games, game_per_show_data.c.game_id == games.c.id)
)
query = query.where(game_per_show_data.c.show_id == fkey)
for game_id, name, display_name in conn.execute(query).fetchall():
if game_id < 0:
game_id = name
else:
game_id = str(game_id)
if display_name is not None:
data["shows"][id]["games"].setdefault(game_id, {"id": game_id, "name": name, "stats": {}, "votes": {}})["display"] = display_name
alembic.op.add_column("quotes", sqlalchemy.Column("game", sqlalchemy.Text))
alembic.op.add_column("quotes", sqlalchemy.Column("show", sqlalchemy.Text))
alembic.op.execute("""
UPDATE quotes
SET
show = shows.name
FROM shows
WHERE quotes.show_id = shows.id
""")
alembic.op.execute("""
UPDATE quotes
SET
game = games.name
FROM games
WHERE quotes.game_id = games.id
""")
alembic.op.execute("""
UPDATE quotes
SET
game = game_per_show_data.display_name
FROM game_per_show_data
WHERE quotes.game_id = game_per_show_data.game_id AND game_per_show_data.show_id = quotes.show_id
""")
alembic.op.drop_column("quotes", "game_id")
alembic.op.drop_column("quotes", "show_id")
alembic.op.drop_table("disabled_stats")
alembic.op.drop_table("game_votes")
alembic.op.drop_table("game_stats")
alembic.op.drop_table("stats")
alembic.op.drop_table("game_per_show_data")
alembic.op.drop_table("games")
alembic.op.drop_table("shows")
with open(datafile, "w") as f:
json.dump(data, f, indent=2, sort_keys=True)
| 37.198251
| 157
| 0.695352
|
d75ffcf6a870d4d3ab46a783fe89cd8d15252e43
| 6,674
|
py
|
Python
|
tests/applicator-utils/test_applicators.py
|
marcgarreau/eth-utils
|
386863e6b7d95ac5acb11f6ba81619dc88ed6eb9
|
[
"MIT"
] | 1
|
2021-05-13T18:32:16.000Z
|
2021-05-13T18:32:16.000Z
|
tests/applicator-utils/test_applicators.py
|
marcgarreau/eth-utils
|
386863e6b7d95ac5acb11f6ba81619dc88ed6eb9
|
[
"MIT"
] | null | null | null |
tests/applicator-utils/test_applicators.py
|
marcgarreau/eth-utils
|
386863e6b7d95ac5acb11f6ba81619dc88ed6eb9
|
[
"MIT"
] | null | null | null |
import collections
import pytest
import eth_utils
from eth_utils.curried import (
apply_formatter_at_index,
apply_formatter_if,
apply_formatter_to_array,
apply_formatters_to_dict,
apply_formatters_to_sequence,
apply_key_map,
apply_one_of_formatters,
is_list_like,
is_string,
)
def i_put_my_thing_down_flip_it_and_reverse_it(lyric):
return "".join(reversed(lyric))
CONDITION_FORMATTER_PAIRS = (
(is_list_like, tuple),
(is_string, i_put_my_thing_down_flip_it_and_reverse_it),
)
def test_format_dict_error():
with pytest.raises(ValueError) as exc_info:
apply_formatters_to_dict({"myfield": int}, {"myfield": "a"})
with pytest.raises(ValueError) as exc_info:
eth_utils.apply_formatters_to_dict({"myfield": int}, {"myfield": "a"})
assert "myfield" in str(exc_info.value)
@pytest.mark.parametrize(
"formatter, value, expected",
(
(
{"should_be_int": int, "should_be_bool": bool},
{"should_be_int": 1.2, "should_be_bool": 3.4, "pass_through": 5.6},
{"should_be_int": 1, "should_be_bool": True, "pass_through": 5.6},
),
),
)
def test_apply_formatters_to_dict(formatter, value, expected):
assert eth_utils.apply_formatters_to_dict(formatter, value) == expected
mapper = apply_formatters_to_dict(formatter)
assert mapper(value) == expected
@pytest.mark.parametrize(
"formatter, value, expected",
(
(
{"black": "orange", "Internet": "Ethereum"},
{"black": 1.2, "Internet": 3.4, "pass_through": 5.6},
{"orange": 1.2, "Ethereum": 3.4, "pass_through": 5.6},
),
),
)
def test_apply_key_map(formatter, value, expected):
assert eth_utils.apply_key_map(formatter, value) == expected
mapper = apply_key_map(formatter)
assert mapper(value) == expected
@pytest.mark.parametrize(
"formatter, value",
(
({"a": "b"}, {"b": 3}),
({"a": "b"}, {"a": 2, "b": 3}),
({"a": "b"}, collections.OrderedDict((("a", 2), ("b", 3)))),
({"a": "b"}, collections.OrderedDict((("b", 3), ("a", 2)))),
),
)
def test_apply_key_map_with_key_conflicts_raises_exception(formatter, value):
with pytest.raises(KeyError):
eth_utils.apply_key_map(formatter, value)
@pytest.mark.parametrize(
"condition, formatter, value, expected",
(
(is_string, bool, 1, 1),
(is_string, bool, "1", True),
(is_string, bool, "", False),
),
)
def test_apply_formatter_if(condition, formatter, value, expected):
assert eth_utils.apply_formatter_if(condition, formatter, value) == expected
# must be able to curry
conditional_formatter = apply_formatter_if(condition, formatter)
assert conditional_formatter(value) == expected
@pytest.mark.parametrize(
"condition_formatters, value, expected",
(
(CONDITION_FORMATTER_PAIRS, "my thing", "gniht ym"),
(CONDITION_FORMATTER_PAIRS, [2, 3], (2, 3)),
(CONDITION_FORMATTER_PAIRS, 1, ValueError),
),
)
def test_apply_one_of_formatters(condition_formatters, value, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
apply_one_of_formatters(condition_formatters, value)
with pytest.raises(expected):
eth_utils.apply_one_of_formatters(condition_formatters, value)
else:
assert (
eth_utils.apply_one_of_formatters(condition_formatters, value) == expected
)
# must be able to curry
apply_one = apply_one_of_formatters(condition_formatters)
assert apply_one(value) == expected
@pytest.mark.parametrize(
"formatter, index, value, expected",
(
(bool, 1, [1, 2, 3], [1, True, 3]),
(bool, 1, (1, 2, 3), (1, True, 3)),
(bool, 3, (1, 2, 3), IndexError),
),
)
def test_apply_formatter_at_index(formatter, index, value, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
apply_formatter_at_index(formatter, index, value)
with pytest.raises(expected):
eth_utils.apply_formatter_at_index(formatter, index, value)
else:
assert eth_utils.apply_formatter_at_index(formatter, index, value) == expected
# must be able to curry
targetted_formatter = apply_formatter_at_index(formatter, index)
assert targetted_formatter(value) == expected
SEQUENCE_FORMATTER_PARAMETERS = (
([bool, int, str], (1.2, 3.4, 5.6), (True, 3, "5.6")),
([bool, int, str], [1.2, 3.4, 5.6], [True, 3, "5.6"]),
([bool, int, str, float], (1.2, 3.4, 5.6), IndexError),
)
LOOSE_SEQUENCE_FORMATTER_PARAMETERS = SEQUENCE_FORMATTER_PARAMETERS + (
([bool, int], (1.2, 3.4, 5.6), (True, 3, 5.6)),
)
@pytest.mark.parametrize(
"formatters, value, expected", LOOSE_SEQUENCE_FORMATTER_PARAMETERS
)
def test_combine_argument_formatters(formatters, value, expected):
list_formatter = eth_utils.combine_argument_formatters(*formatters)
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
list_formatter(value)
else:
assert list_formatter(value) == expected
STRICT_SEQUENCE_FORMATTER_PARAMETERS = SEQUENCE_FORMATTER_PARAMETERS + (
([bool, int], (1.2, 3.4, 5.6), IndexError),
)
@pytest.mark.parametrize(
"formatters, value, expected", STRICT_SEQUENCE_FORMATTER_PARAMETERS
)
def test_apply_formatters_to_sequence_curried(formatters, value, expected):
list_formatter = apply_formatters_to_sequence(formatters)
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
list_formatter(value)
else:
assert list_formatter(value) == expected
@pytest.mark.parametrize(
"formatters, value, expected", STRICT_SEQUENCE_FORMATTER_PARAMETERS
)
def test_apply_formatters_to_sequence(formatters, value, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
eth_utils.apply_formatters_to_sequence(formatters, value)
else:
assert eth_utils.apply_formatters_to_sequence(formatters, value) == expected
@pytest.mark.parametrize(
"formatter, value, expected",
((int, [1.2, 3.4, 5.6], [1, 3, 5]), (int, (1.2, 3.4, 5.6), (1, 3, 5))),
)
def test_apply_formatter_to_array(formatter, value, expected):
assert eth_utils.apply_formatter_to_array(formatter, value) == expected
mapper = apply_formatter_to_array(formatter)
assert mapper(value) == expected
| 32.241546
| 86
| 0.67246
|
482acc1c0f224d7fa1f52025ce3e039426053d63
| 47,389
|
py
|
Python
|
Lib/test/test_urllib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_urllib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_urllib.py
|
deadsnakes/python3.1
|
88d77610a7873c5161bfc15cd69557fc7697b1a3
|
[
"PSF-2.0"
] | null | null | null |
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from test import support
import os
import sys
import tempfile
import warnings
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertTrue(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertTrue(isinstance(self.returned_obj.info(), email.message.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, str): pass
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed: return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed: return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(http.client.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
def test_read(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b'Hello!')
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_userpass_inurl(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.request.pathname2url(
os.path.abspath(filePath))
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertTrue(isinstance(result[1], email.message.Message),
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertTrue(isinstance(count, int))
self.assertTrue(isinstance(block_size, int))
self.assertTrue(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
| 41.24369
| 91
| 0.571019
|
b06db2ac41f7767ec9c2757ad02626dc6527f0fa
| 1,173
|
py
|
Python
|
setup.py
|
martinfleis/contextily
|
fc00861c9756821f62de27bb06ef6771abc517d6
|
[
"BSD-3-Clause"
] | 182
|
2020-04-08T15:56:50.000Z
|
2022-03-24T15:02:19.000Z
|
setup.py
|
martinfleis/contextily
|
fc00861c9756821f62de27bb06ef6771abc517d6
|
[
"BSD-3-Clause"
] | 66
|
2020-04-09T06:23:50.000Z
|
2022-02-20T19:04:38.000Z
|
setup.py
|
martinfleis/contextily
|
fc00861c9756821f62de27bb06ef6771abc517d6
|
[
"BSD-3-Clause"
] | 28
|
2020-04-08T12:53:44.000Z
|
2021-12-18T01:05:58.000Z
|
from setuptools import setup
# Dependencies.
with open("requirements.txt") as f:
tests_require = f.readlines()
install_requires = [t.strip() for t in tests_require]
with open("README.md") as f:
long_description = f.read()
setup(
name="contextily",
version="1.2.0",
description="Context geo-tiles in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/darribas/contextily",
author="Dani Arribas-Bel",
author_email="daniel.arribas.bel@gmail.com",
license="3-Clause BSD",
packages=["contextily"],
package_data={"": ["requirements.txt"]},
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Framework :: Matplotlib",
],
python_requires=">=3.6",
install_requires=install_requires,
zip_safe=False,
)
| 31.702703
| 70
| 0.647059
|
02ae70349d69e9c130d58228ed8fc722ea65d891
| 687
|
py
|
Python
|
setup.py
|
nowindxdw/dingdingwebhook
|
305f2faf6bf003e5fc19cdadd3ddef9723335fd9
|
[
"MIT"
] | null | null | null |
setup.py
|
nowindxdw/dingdingwebhook
|
305f2faf6bf003e5fc19cdadd3ddef9723335fd9
|
[
"MIT"
] | null | null | null |
setup.py
|
nowindxdw/dingdingwebhook
|
305f2faf6bf003e5fc19cdadd3ddef9723335fd9
|
[
"MIT"
] | 1
|
2021-03-10T07:28:16.000Z
|
2021-03-10T07:28:16.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dingding_webhook", # Replace with your own username
version="0.0.3",
author="nowindxdw",
author_email="nowindxdw@126.com",
description="A simple api for dingding robot",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nowindxdw/dingdingwebhook",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.7',
)
| 31.227273
| 61
| 0.6754
|
6ab5a8199d25487b6699dffa62b6497b8ebdcd83
| 20,051
|
py
|
Python
|
configs/example/read_config.py
|
taomiao/gem5
|
4effe34f94b599add133357473e1b120b54719ab
|
[
"BSD-3-Clause"
] | 1
|
2021-08-31T13:49:29.000Z
|
2021-08-31T13:49:29.000Z
|
configs/example/read_config.py
|
taomiao/gem5
|
4effe34f94b599add133357473e1b120b54719ab
|
[
"BSD-3-Clause"
] | 1
|
2019-06-10T21:33:54.000Z
|
2019-06-10T21:33:54.000Z
|
configs/example/read_config.py
|
taomiao/gem5
|
4effe34f94b599add133357473e1b120b54719ab
|
[
"BSD-3-Clause"
] | 1
|
2019-04-13T20:24:43.000Z
|
2019-04-13T20:24:43.000Z
|
# Copyright (c) 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Andrew Bardsley
# This script allows .ini and .json system config file generated from a
# previous gem5 run to be read in and instantiated.
#
# This may be useful as a way of allowing variant run scripts (say,
# with more complicated than usual checkpointing/stats dumping/
# simulation control) to read pre-described systems from config scripts
# with better system-description capabilities. Splitting scripts
# between system construction and run control may allow better
# debugging.
from __future__ import print_function
from __future__ import absolute_import
import argparse
import ConfigParser
import inspect
import json
import re
import sys
import m5
import m5.ticks as ticks
sim_object_classes_by_name = {
cls.__name__: cls for cls in m5.objects.__dict__.itervalues()
if inspect.isclass(cls) and issubclass(cls, m5.objects.SimObject) }
# Add some parsing functions to Param classes to handle reading in .ini
# file elements. This could be moved into src/python/m5/params.py if
# reading .ini files from Python proves to be useful
def no_parser(cls, flags, param):
raise Exception('Can\'t parse string: %s for parameter'
' class: %s' % (str(param), cls.__name__))
def simple_parser(suffix='', cast=lambda i: i):
def body(cls, flags, param):
return cls(cast(param + suffix))
return body
# def tick_parser(cast=m5.objects.Latency): # lambda i: i):
def tick_parser(cast=lambda i: i):
def body(cls, flags, param):
old_param = param
ret = cls(cast(str(param) + 't'))
return ret
return body
def addr_range_parser(cls, flags, param):
sys.stdout.flush()
(low, high, intlv_high_bit, xor_high_bit,
intlv_bits, intlv_match) = param.split(':')
return m5.objects.AddrRange(
start=long(low), end=long(high),
intlvHighBit=long(intlv_high_bit), xorHighBit=long(xor_high_bit),
intlvBits=long(intlv_bits), intlvMatch=long(intlv_match))
def memory_bandwidth_parser(cls, flags, param):
# The string will be in tick/byte
# Convert to byte/tick
value = 1.0 / float(param)
# Convert to byte/s
value = ticks.fromSeconds(value)
return cls('%fB/s' % value)
# These parameters have trickier parsing from .ini files than might be
# expected
param_parsers = {
'Bool': simple_parser(),
'ParamValue': no_parser,
'NumericParamValue': simple_parser(cast=long),
'TickParamValue': tick_parser(),
'Frequency': tick_parser(cast=m5.objects.Latency),
'Current': simple_parser(suffix='A'),
'Voltage': simple_parser(suffix='V'),
'Enum': simple_parser(),
'MemorySize': simple_parser(suffix='B'),
'MemorySize32': simple_parser(suffix='B'),
'AddrRange': addr_range_parser,
'String': simple_parser(),
'MemoryBandwidth': memory_bandwidth_parser,
'Time': simple_parser(),
'EthernetAddr': simple_parser()
}
for name, parser in param_parsers.iteritems():
setattr(m5.params.__dict__[name], 'parse_ini', classmethod(parser))
class PortConnection(object):
"""This class is similar to m5.params.PortRef but with just enough
information for ConfigManager"""
def __init__(self, object_name, port_name, index):
self.object_name = object_name
self.port_name = port_name
self.index = index
@classmethod
def from_string(cls, str):
m = re.match('(.*)\.([^.\[]+)(\[(\d+)\])?', str)
object_name, port_name, whole_index, index = m.groups()
if index is not None:
index = int(index)
else:
index = 0
return PortConnection(object_name, port_name, index)
def __str__(self):
return '%s.%s[%d]' % (self.object_name, self.port_name, self.index)
def __cmp__(self, right):
return cmp((self.object_name, self.port_name, self.index),
(right.object_name, right.port_name, right.index))
def to_list(v):
"""Convert any non list to a singleton list"""
if isinstance(v, list):
return v
else:
return [v]
class ConfigManager(object):
"""Manager for parsing a Root configuration from a config file"""
def __init__(self, config):
self.config = config
self.objects_by_name = {}
self.flags = config.get_flags()
def find_object(self, object_name):
"""Find and configure (with just non-SimObject parameters)
a single object"""
if object_name == 'Null':
return NULL
if object_name in self.objects_by_name:
return self.objects_by_name[object_name]
object_type = self.config.get_param(object_name, 'type')
if object_type not in sim_object_classes_by_name:
raise Exception('No SimObject type %s is available to'
' build: %s' % (object_type, object_name))
object_class = sim_object_classes_by_name[object_type]
parsed_params = {}
for param_name, param in object_class._params.iteritems():
if issubclass(param.ptype, m5.params.ParamValue):
if isinstance(param, m5.params.VectorParamDesc):
param_values = self.config.get_param_vector(object_name,
param_name)
param_value = [ param.ptype.parse_ini(self.flags, value)
for value in param_values ]
else:
param_value = param.ptype.parse_ini(
self.flags, self.config.get_param(object_name,
param_name))
parsed_params[param_name] = param_value
obj = object_class(**parsed_params)
self.objects_by_name[object_name] = obj
return obj
def fill_in_simobj_parameters(self, object_name, obj):
"""Fill in all references to other SimObjects in an objects
parameters. This relies on all referenced objects having been
created"""
if object_name == 'Null':
return NULL
for param_name, param in obj.__class__._params.iteritems():
if issubclass(param.ptype, m5.objects.SimObject):
if isinstance(param, m5.params.VectorParamDesc):
param_values = self.config.get_param_vector(object_name,
param_name)
setattr(obj, param_name,
[ self.objects_by_name[name]
if name != 'Null' else m5.params.NULL
for name in param_values ])
else:
param_value = self.config.get_param(object_name,
param_name)
if param_value != 'Null':
setattr(obj, param_name, self.objects_by_name[
param_value])
return obj
def fill_in_children(self, object_name, obj):
"""Fill in the children of this object. This relies on all the
referenced objects having been created"""
children = self.config.get_object_children(object_name)
for child_name, child_paths in children:
param = obj.__class__._params.get(child_name, None)
if child_name == 'Null':
continue
if isinstance(child_paths, list):
child_list = [ self.objects_by_name[path]
for path in child_paths ]
else:
child_list = self.objects_by_name[child_paths]
obj.add_child(child_name, child_list)
for path in to_list(child_paths):
self.fill_in_children(path, self.objects_by_name[path])
return obj
def parse_port_name(self, port):
"""Parse the name of a port"""
m = re.match('(.*)\.([^.\[]+)(\[(\d+)\])?', port)
peer, peer_port, whole_index, index = m.groups()
if index is not None:
index = int(index)
else:
index = 0
return (peer, self.objects_by_name[peer], peer_port, index)
def gather_port_connections(self, object_name, obj):
"""Gather all the port-to-port connections from the named object.
Returns a list of (PortConnection, PortConnection) with unordered
(wrt. master/slave) connection information"""
if object_name == 'Null':
return NULL
parsed_ports = []
for port_name, port in obj.__class__._ports.iteritems():
# Assume that unnamed ports are unconnected
peers = self.config.get_port_peers(object_name, port_name)
for index, peer in zip(range(0, len(peers)), peers):
parsed_ports.append((
PortConnection(object_name, port.name, index),
PortConnection.from_string(peer)))
return parsed_ports
def bind_ports(self, connections):
"""Bind all ports from the given connection list. Note that the
connection list *must* list all connections with both (slave,master)
and (master,slave) orderings"""
# Markup a dict of how many connections are made to each port.
# This will be used to check that the next-to-be-made connection
# has a suitable port index
port_bind_indices = {}
for from_port, to_port in connections:
port_bind_indices[
(from_port.object_name, from_port.port_name)] = 0
def port_has_correct_index(port):
return port_bind_indices[
(port.object_name, port.port_name)] == port.index
def increment_port_index(port):
port_bind_indices[
(port.object_name, port.port_name)] += 1
# Step through the sorted connections. Exactly one of
# each (slave,master) and (master,slave) pairs will be
# bindable because the connections are sorted.
# For example: port_bind_indices
# left right left right
# a.b[0] -> d.f[1] 0 0 X
# a.b[1] -> e.g 0 0 BIND!
# e.g -> a.b[1] 1 X 0
# d.f[0] -> f.h 0 0 BIND!
# d.f[1] -> a.b[0] 1 0 BIND!
connections_to_make = []
for connection in sorted(connections):
from_port, to_port = connection
if (port_has_correct_index(from_port) and
port_has_correct_index(to_port)):
connections_to_make.append((from_port, to_port))
increment_port_index(from_port)
increment_port_index(to_port)
# Exactly half of the connections (ie. all of them, one per
# direction) must now have been made
if (len(connections_to_make) * 2) != len(connections):
raise Exception('Port bindings can\'t be ordered')
# Actually do the binding
for from_port, to_port in connections_to_make:
from_object = self.objects_by_name[from_port.object_name]
to_object = self.objects_by_name[to_port.object_name]
setattr(from_object, from_port.port_name,
getattr(to_object, to_port.port_name))
def find_all_objects(self):
"""Find and build all SimObjects from the config file and connect
their ports together as described. Does not instantiate system"""
# Build SimObjects for all sections of the config file
# populating not-SimObject-valued parameters
for object_name in self.config.get_all_object_names():
self.find_object(object_name)
# Add children to objects in the hierarchy from root
self.fill_in_children('root', self.find_object('root'))
# Now fill in SimObject-valued parameters in the knowledge that
# this won't be interpreted as becoming the parent of objects
# which are already in the root hierarchy
for name, obj in self.objects_by_name.iteritems():
self.fill_in_simobj_parameters(name, obj)
# Gather a list of all port-to-port connections
connections = []
for name, obj in self.objects_by_name.iteritems():
connections += self.gather_port_connections(name, obj)
# Find an acceptable order to bind those port connections and
# bind them
self.bind_ports(connections)
class ConfigFile(object):
def get_flags(self):
return set()
def load(self, config_file):
"""Load the named config file"""
pass
def get_all_object_names(self):
"""Get a list of all the SimObject paths in the configuration"""
pass
def get_param(self, object_name, param_name):
"""Get a single param or SimObject reference from the configuration
as a string"""
pass
def get_param_vector(self, object_name, param_name):
"""Get a vector param or vector of SimObject references from the
configuration as a list of strings"""
pass
def get_object_children(self, object_name):
"""Get a list of (name, paths) for each child of this object.
paths is either a single string object path or a list of object
paths"""
pass
def get_port_peers(self, object_name, port_name):
"""Get the list of connected port names (in the string form
object.port(\[index\])?) of the port object_name.port_name"""
pass
class ConfigIniFile(ConfigFile):
def __init__(self):
self.parser = ConfigParser.ConfigParser()
def load(self, config_file):
self.parser.read(config_file)
def get_all_object_names(self):
return self.parser.sections()
def get_param(self, object_name, param_name):
return self.parser.get(object_name, param_name)
def get_param_vector(self, object_name, param_name):
return self.parser.get(object_name, param_name).split()
def get_object_children(self, object_name):
if self.parser.has_option(object_name, 'children'):
children = self.parser.get(object_name, 'children')
child_names = children.split()
else:
child_names = []
def make_path(child_name):
if object_name == 'root':
return child_name
else:
return '%s.%s' % (object_name, child_name)
return [ (name, make_path(name)) for name in child_names ]
def get_port_peers(self, object_name, port_name):
if self.parser.has_option(object_name, port_name):
peer_string = self.parser.get(object_name, port_name)
return peer_string.split()
else:
return []
class ConfigJsonFile(ConfigFile):
def __init__(self):
pass
def is_sim_object(self, node):
return isinstance(node, dict) and 'path' in node
def find_all_objects(self, node):
if self.is_sim_object(node):
self.object_dicts[node['path']] = node
if isinstance(node, list):
for elem in node:
self.find_all_objects(elem)
elif isinstance(node, dict):
for elem in node.itervalues():
self.find_all_objects(elem)
def load(self, config_file):
root = json.load(open(config_file, 'r'))
self.object_dicts = {}
self.find_all_objects(root)
def get_all_object_names(self):
return sorted(self.object_dicts.keys())
def parse_param_string(self, node):
if node is None:
return "Null"
elif self.is_sim_object(node):
return node['path']
else:
return str(node)
def get_param(self, object_name, param_name):
obj = self.object_dicts[object_name]
return self.parse_param_string(obj[param_name])
def get_param_vector(self, object_name, param_name):
obj = self.object_dicts[object_name]
return [ self.parse_param_string(p) for p in obj[param_name] ]
def get_object_children(self, object_name):
"""It is difficult to tell which elements are children in the
JSON file as there is no explicit 'children' node. Take any
element which is a full SimObject description or a list of
SimObject descriptions. This will not work with a mixed list of
references and descriptions but that's a scenario that isn't
possible (very likely?) with gem5's binding/naming rules"""
obj = self.object_dicts[object_name]
children = []
for name, node in obj.iteritems():
if self.is_sim_object(node):
children.append((name, node['path']))
elif isinstance(node, list) and node != [] and all([
self.is_sim_object(e) for e in node ]):
children.append((name, [ e['path'] for e in node ]))
return children
def get_port_peers(self, object_name, port_name):
"""Get the 'peer' element of any node with 'peer' and 'role'
elements"""
obj = self.object_dicts[object_name]
peers = []
if port_name in obj and 'peer' in obj[port_name] and \
'role' in obj[port_name]:
peers = to_list(obj[port_name]['peer'])
return peers
parser = argparse.ArgumentParser()
parser.add_argument('config_file', metavar='config-file.ini',
help='.ini configuration file to load and run')
parser.add_argument('--checkpoint-dir', type=str, default=None,
help='A checkpoint to directory to restore when starting '
'the simulation')
args = parser.parse_args(sys.argv[1:])
if args.config_file.endswith('.ini'):
config = ConfigIniFile()
config.load(args.config_file)
else:
config = ConfigJsonFile()
config.load(args.config_file)
ticks.fixGlobalFrequency()
mgr = ConfigManager(config)
mgr.find_all_objects()
m5.instantiate(args.checkpoint_dir)
exit_event = m5.simulate()
print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()))
| 36.656307
| 78
| 0.644407
|
72e3c3a8597283916762d92dcc9f5e95c6261d2f
| 1,406
|
py
|
Python
|
demos/automatic_video_subtitiles/recognize.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 1,540
|
2017-11-14T13:26:33.000Z
|
2021-11-09T14:05:08.000Z
|
demos/automatic_video_subtitiles/recognize.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 599
|
2017-11-14T13:19:12.000Z
|
2021-11-09T01:58:26.000Z
|
demos/automatic_video_subtitiles/recognize.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 449
|
2017-11-14T12:48:46.000Z
|
2021-11-06T09:34:33.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import paddle
from paddlespeech.cli import ASRExecutor
from paddlespeech.cli import TextExecutor
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--device", type=str, default=paddle.get_device())
args = parser.parse_args()
# yapf: enable
if __name__ == "__main__":
asr_executor = ASRExecutor()
text_executor = TextExecutor()
text = asr_executor(
audio_file=os.path.abspath(os.path.expanduser(args.input)),
device=args.device)
result = text_executor(
text=text,
task='punc',
model='ernie_linear_p3_wudao',
device=args.device)
print('ASR Result: \n{}'.format(text))
print('Text Result: \n{}'.format(result))
| 31.954545
| 74
| 0.726174
|
f69422b19f218e9df4b97398a1f962c6247ed0f8
| 5,506
|
py
|
Python
|
python/seldon_core/wrapper.py
|
SandhyaaGopchandani/seldon-core
|
00360bd7c90e85da980730c34e55318997907d44
|
[
"Apache-2.0"
] | null | null | null |
python/seldon_core/wrapper.py
|
SandhyaaGopchandani/seldon-core
|
00360bd7c90e85da980730c34e55318997907d44
|
[
"Apache-2.0"
] | null | null | null |
python/seldon_core/wrapper.py
|
SandhyaaGopchandani/seldon-core
|
00360bd7c90e85da980730c34e55318997907d44
|
[
"Apache-2.0"
] | null | null | null |
import grpc
from concurrent import futures
from flask import jsonify, Flask, send_from_directory, request
from flask_cors import CORS
import logging
from seldon_core.utils import json_to_seldon_message, seldon_message_to_json, json_to_feedback, json_to_seldon_messages
from seldon_core.flask_utils import get_request
import seldon_core.seldon_methods
from seldon_core.flask_utils import SeldonMicroserviceException, ANNOTATION_GRPC_MAX_MSG_SIZE
from seldon_core.proto import prediction_pb2_grpc
import os
logger = logging.getLogger(__name__)
PRED_UNIT_ID = os.environ.get("PREDICTIVE_UNIT_ID", "0")
def get_rest_microservice(user_model):
app = Flask(__name__, static_url_path='')
CORS(app)
if hasattr(user_model, 'model_error_handler'):
logger.info('Registering the custom error handler...')
app.register_blueprint(user_model.model_error_handler)
@app.errorhandler(SeldonMicroserviceException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
logger.error("%s", error.to_dict())
response.status_code = error.status_code
return response
@app.route("/seldon.json", methods=["GET"])
def openAPI():
return send_from_directory('', "openapi/seldon.json")
@app.route("/predict", methods=["GET", "POST"])
def Predict():
requestJson = get_request()
logger.debug("REST Request: %s", request)
response = seldon_core.seldon_methods.predict(user_model, requestJson)
logger.debug("REST Response: %s", response)
return jsonify(response)
@app.route("/send-feedback", methods=["GET", "POST"])
def SendFeedback():
requestJson = get_request()
logger.debug("REST Request: %s", request)
requestProto = json_to_feedback(requestJson)
logger.debug("Proto Request: %s", requestProto)
responseProto = seldon_core.seldon_methods.send_feedback(user_model, requestProto, PRED_UNIT_ID)
jsonDict = seldon_message_to_json(responseProto)
return jsonify(jsonDict)
@app.route("/transform-input", methods=["GET", "POST"])
def TransformInput():
requestJson = get_request()
logger.debug("REST Request: %s", request)
response = seldon_core.seldon_methods.transform_input(
user_model, requestJson)
logger.debug("REST Response: %s", response)
return jsonify(response)
@app.route("/transform-output", methods=["GET", "POST"])
def TransformOutput():
requestJson = get_request()
logger.debug("REST Request: %s", request)
response = seldon_core.seldon_methods.transform_output(
user_model, requestJson)
logger.debug("REST Response: %s", response)
return jsonify(response)
@app.route("/route", methods=["GET", "POST"])
def Route():
requestJson = get_request()
logger.debug("REST Request: %s", request)
response = seldon_core.seldon_methods.route(
user_model, requestJson)
logger.debug("REST Response: %s", response)
return jsonify(response)
@app.route("/aggregate", methods=["GET", "POST"])
def Aggregate():
requestJson = get_request()
logger.debug("REST Request: %s", request)
response = seldon_core.seldon_methods.aggregate(
user_model, requestJson)
logger.debug("REST Response: %s", response)
return jsonify(response)
return app
# ----------------------------
# GRPC
# ----------------------------
class SeldonModelGRPC(object):
def __init__(self, user_model):
self.user_model = user_model
def Predict(self, request_grpc, context):
return seldon_core.seldon_methods.predict(self.user_model, request_grpc)
def SendFeedback(self, feedback_grpc, context):
return seldon_core.seldon_methods.send_feedback(self.user_model, feedback_grpc, PRED_UNIT_ID)
def TransformInput(self, request_grpc, context):
return seldon_core.seldon_methods.transform_input(self.user_model, request_grpc)
def TransformOutput(self, request_grpc, context):
return seldon_core.seldon_methods.transform_output(self.user_model, request_grpc)
def Route(self, request_grpc, context):
return seldon_core.seldon_methods.route(self.user_model, request_grpc)
def Aggregate(self, request_grpc, context):
return seldon_core.seldon_methods.aggregate(self.user_model, request_grpc)
def get_grpc_server(user_model, annotations={}, trace_interceptor=None):
seldon_model = SeldonModelGRPC(user_model)
options = []
if ANNOTATION_GRPC_MAX_MSG_SIZE in annotations:
max_msg = int(annotations[ANNOTATION_GRPC_MAX_MSG_SIZE])
logger.info(
"Setting grpc max message and receive length to %d", max_msg)
options.append(('grpc.max_message_length', max_msg))
options.append(('grpc.max_send_message_length', max_msg))
options.append(('grpc.max_receive_message_length', max_msg))
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=10), options=options)
if trace_interceptor:
from grpc_opentracing.grpcext import intercept_server
server = intercept_server(server, trace_interceptor)
prediction_pb2_grpc.add_GenericServicer_to_server(seldon_model, server)
prediction_pb2_grpc.add_ModelServicer_to_server(seldon_model, server)
prediction_pb2_grpc.add_TransformerServicer_to_server(seldon_model, server)
return server
| 38.236111
| 119
| 0.705412
|
f1358ed113b5c99cdfd30c4e161d15d16dc68015
| 1,916
|
py
|
Python
|
writeDB.py
|
kosovojs/pywikibot-scripts
|
4a9bf5177ebcfbba719970f9f3b48fbd51831818
|
[
"MIT"
] | 3
|
2021-10-03T17:27:43.000Z
|
2021-10-05T12:27:06.000Z
|
writeDB.py
|
kosovojs/pywikibot-scripts
|
4a9bf5177ebcfbba719970f9f3b48fbd51831818
|
[
"MIT"
] | null | null | null |
writeDB.py
|
kosovojs/pywikibot-scripts
|
4a9bf5177ebcfbba719970f9f3b48fbd51831818
|
[
"MIT"
] | null | null | null |
import pywikibot, re, json, os, time, requests
from datetime import datetime
import sqlite3 as lite
import sys
import re, os, sys, pywikibot, toolforge
connLabs = toolforge.connect_tools('s53143__meta_p')
cursor1 = connLabs.cursor()
def encode_if_necessary(b):
if type(b) is bytes:
return b.decode('utf8')
return b
def run_query(query,connection = connLabs):
#query = query.encode('utf-8')
#print(query)
try:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
except KeyboardInterrupt:
sys.exit()
return rows
#
def get_info_from_db():
query = 'select id from ciemi where teksts="" limit 1'
query_res = run_query(query,connLabs)
if len(query_res)>0:
return query_res[0][0]
else:
return False
#print(data)
#
def do_one(nr):
url = "http://vietvardi.lgia.gov.lv/vv/to_www_obj.objekts?p_id={}".format(nr)
url2= requests.get(url)
url2.encoding = 'Windows-1257'
top250_source = str(url2.text)
#titulo_unicode = top250_source.decode('utf8')
#pywikibot.output(top250_source)
nr = str(nr)
query = "UPDATE `ciemi` SET teksts=%s where id=%s"
cursor1.execute(query, (top250_source,nr))
connLabs.commit()
#
def main():
hasmoredata = True
counter = 0
begintime = time.time()
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
while hasmoredata:
try:
infodata = get_info_from_db()
if not infodata:
hasmoredata = False
break
counter += 1
if counter % 50 == 0:
print('\t\t'+str(counter))
sys.stdout.flush()
#if counter==2:
# hasmoredata = False
item = infodata
do_one(item)
time.sleep(2)
except:
print('final except')
hasmoredata = False
#
#con.close()
print('Done!')
endtime = time.time()
print('it took: {}'.format((endtime-begintime)))
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
#
main()
| 22.541176
| 79
| 0.648747
|
c70ef7e8424069f1dd06e85d744ea2cccd1f715e
| 415
|
py
|
Python
|
grascii/grammars/__init__.py
|
chanicpanic/grascii
|
654d24529dd8373d9df35f07b06323bb17ed7ffb
|
[
"MIT"
] | 3
|
2020-10-02T11:45:47.000Z
|
2021-06-27T01:16:08.000Z
|
grascii/grammars/__init__.py
|
chanicpanic/grascii
|
654d24529dd8373d9df35f07b06323bb17ed7ffb
|
[
"MIT"
] | 2
|
2021-07-03T23:09:00.000Z
|
2021-07-06T17:46:02.000Z
|
grascii/grammars/__init__.py
|
grascii/grascii
|
654d24529dd8373d9df35f07b06323bb17ed7ffb
|
[
"MIT"
] | null | null | null |
"""Contains grammars used by grascii."""
import io
from pathlib import Path
from pkg_resources import resource_string
def get_grammar(name: str) -> str:
"""Get a grammar string.
:param name: The name of the grammar resource.
:returns: A grammar string.
"""
return Path(__file__).with_name(name + ".lark")
# return resource_string("grascii.grammars", name + ".lark").decode("utf-8")
| 24.411765
| 80
| 0.681928
|
bdf54311411f9515092539aad8b3596e4cc0f6cc
| 31,433
|
py
|
Python
|
nova/conf/network.py
|
j-griffith/nova
|
8d9325b525034e6e967818bc8431e91585d8abb7
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/network.py
|
j-griffith/nova
|
8d9325b525034e6e967818bc8431e91585d8abb7
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/conf/network.py
|
j-griffith/nova
|
8d9325b525034e6e967818bc8431e91585d8abb7
|
[
"Apache-2.0"
] | 1
|
2020-07-22T21:08:25.000Z
|
2020-07-22T21:08:25.000Z
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.conf import paths
network_opts = [
# NOTE(sfinucan): Don't move this option to a group as it will be
# deprecated in a future release.
cfg.BoolOpt("flat_injected",
default=False,
help="""
This option determines whether the network setup information is injected into
the VM before it is booted. While it was originally designed to be used only by
nova-network, it is also used by the vmware and xenapi virt drivers to control
whether network information is injected into a VM.
"""),
cfg.StrOpt("flat_network_bridge",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option determines the bridge used for simple network interfaces when no
bridge is specified in the VM creation request.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any string representing a valid network bridge, such as 'br100'
Related options:
``use_neutron``
"""),
cfg.StrOpt("flat_network_dns",
default="8.8.4.4",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the address of the DNS server for a simple network. If this option is
not specified, the default of '8.8.4.4' is used.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``
"""),
cfg.StrOpt("flat_interface",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option is the name of the virtual interface of the VM on which the bridge
will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt for the bridge interface name.
Possible values:
Any valid virtual interface name, such as 'eth0'
"""),
cfg.IntOpt("vlan_start",
default=100,
min=1,
max=4094,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the VLAN number used for private networks. Note that the when creating
the networks, if the specified number has already been assigned, nova-network
will increment this number until it finds an available VLAN.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any integer between 1 and 4094. Values outside of that range will raise a
ValueError exception. Default = 100.
Related options:
``network_manager``, ``use_neutron``
"""),
cfg.StrOpt("vlan_interface",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options. While
this option has an effect when using neutron, it incorrectly override the value
provided by neutron and should therefore not be used.
""",
help="""
This option is the name of the virtual interface of the VM on which the VLAN
bridge will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt and xenapi for the bridge interface
name.
Please note that this setting will be ignored in nova-network if the
configuration option for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any valid virtual interface name, such as 'eth0'
"""),
cfg.IntOpt("num_networks",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
default=1,
min=1,
help="""
This option represents the number of networks to create if not explicitly
specified when the network is created. The only time this is used is if a CIDR
is specified, but an explicit network_size is not. In that case, the subnets
are created by diving the IP address space of the CIDR by num_networks. The
resulting subnet sizes cannot be larger than the configuration option
`network_size`; in that event, they are reduced to `network_size`, and a
warning is logged.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any positive integer is technically valid, although there are practical
limits based upon available IP address space and virtual interfaces. The
default is 1.
Related options:
``use_neutron``, ``network_size``
"""),
cfg.StrOpt("vpn_ip",
default="$my_ip",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the public IP address for the cloudpipe VPN servers. It defaults to the
IP address of the host.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any valid IP address. The default is $my_ip, the IP address of the VM.
Related options:
``network_manager``, ``use_neutron``, ``vpn_start``
"""),
cfg.PortOpt("vpn_start",
default=1000,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the port number to use as the first VPN port for private networks.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager', or if you specify a value the 'vpn_start'
parameter when creating a network.
Possible values:
Any integer representing a valid port number. The default is 1000.
Related options:
``use_neutron``, ``vpn_ip``, ``network_manager``
"""),
cfg.IntOpt("network_size",
default=256,
min=1,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option determines the number of addresses in each private subnet.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any positive integer that is less than or equal to the available network
size. Note that if you are creating multiple networks, they must all fit in
the available IP address space. The default is 256.
Related options:
``use_neutron``, ``num_networks``
"""),
cfg.StrOpt("fixed_range_v6",
default="fd00::/48",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option determines the fixed IPv6 address block when creating a network.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IPv6 CIDR. The default value is "fd00::/48".
Related options:
``use_neutron``
"""),
cfg.StrOpt("gateway",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the default IPv4 gateway. It is used only in the testing suite.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``, ``gateway_v6``
"""),
cfg.StrOpt("gateway_v6",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the default IPv6 gateway. It is used only in the testing suite.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``, ``gateway``
"""),
cfg.IntOpt("cnt_vpn_clients",
default=0,
min=0,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option represents the number of IP addresses to reserve at the top of the
address range for VPN clients. It also will be ignored if the configuration
option for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any integer, 0 or greater. The default is 0.
Related options:
``use_neutron``, ``network_manager``
"""),
cfg.IntOpt("fixed_ip_disassociate_timeout",
default=600,
min=0,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This is the number of seconds to wait before disassociating a deallocated fixed
IP address. This is only used with the nova-network service, and has no effect
when using neutron for networking.
Possible values:
Any integer, zero or greater. The default is 600 (10 minutes).
Related options:
``use_neutron``
"""),
cfg.IntOpt("create_unique_mac_address_attempts",
default=5,
min=1,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option determines how many times nova-network will attempt to create a
unique MAC address before giving up and raising a
`VirtualInterfaceMacAddressException` error.
Possible values:
Any positive integer. The default is 5.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("teardown_unused_network_gateway",
default=False,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
Determines whether unused gateway devices, both VLAN and bridge, are deleted if
the network is in nova-network VLAN mode and is multi-hosted.
Related options:
``use_neutron``, ``vpn_ip``, ``fake_network``
"""),
cfg.BoolOpt("force_dhcp_release",
default=True,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
When this option is True, a call is made to release the DHCP for the instance
when that instance is terminated.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("update_dns_entries",
default=False,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
When this option is True, whenever a DNS entry must be updated, a fanout cast
message is sent to all network hosts to update their DNS entries in multi-host
mode.
Related options:
``use_neutron``
"""),
cfg.IntOpt("dns_update_periodic_interval",
default=-1,
min=-1,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option determines the time, in seconds, to wait between refreshing DNS
entries for the network.
Possible values:
Either -1 (default), or any positive integer. A negative value will disable
the updates.
Related options:
``use_neutron``
"""),
cfg.StrOpt("dhcp_domain",
default="novalocal",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option allows you to specify the domain for the DHCP server.
Possible values:
Any string that is a valid domain name.
Related options:
``use_neutron``
"""),
cfg.StrOpt("l3_lib",
default="nova.network.l3.LinuxNetL3",
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
This option allows you to specify the L3 management library to be used.
Possible values:
Any dot-separated string that represents the import path to an L3
networking library.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("share_dhcp_address",
default=False,
deprecated_for_removal=True,
deprecated_since='2014.2',
help="""
THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
If True in multi_host mode, all compute hosts share the same dhcp address. The
same IP address used for DHCP will be added on each nova-network node which is
only visible to the VMs on the same host.
The use of this configuration has been deprecated and may be removed in any
release after Mitaka. It is recommended that instead of relying on this option,
an explicit value should be passed to 'create_networks()' as a keyword argument
with the name 'share_address'.
"""),
# NOTE(stephenfin): This should move to True for a cycle before being
# removed.
cfg.BoolOpt('use_neutron',
default=True,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="Whether to use Neutron or Nova Network as the back end "
"for networking. Defaults to False (indicating Nova "
"network).Set to True to use neutron.")
]
linux_net_opts = [
cfg.MultiStrOpt('dhcpbridge_flagfile',
default=['/etc/nova/nova-dhcpbridge.conf'],
help="""
This option is a list of full paths to one or more configuration files for
dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf'
should be sufficient, but if you have special needs for configuring dhcpbridge,
you can change or add to this list.
Possible values
A list of strings, where each string is the full path to a dhcpbridge
configuration file.
"""),
cfg.StrOpt('networks_path',
default=paths.state_path_def('networks'),
help="""
The location where the network configuration files will be kept. The default is
the 'networks' directory off of the location where nova's Python module is
installed.
Possible values
A string containing the full path to the desired configuration directory
"""),
cfg.StrOpt('public_interface',
default='eth0',
help="""
This is the name of the network interface for public IP addresses. The default
is 'eth0'.
Possible values:
Any string representing a network interface name
"""),
cfg.StrOpt('dhcpbridge',
default=paths.bindir_def('nova-dhcpbridge'),
help="""
The location of the binary nova-dhcpbridge. By default it is the binary named
'nova-dhcpbridge' that is installed with all the other nova binaries.
Possible values:
Any string representing the full path to the binary for dhcpbridge
"""),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help="""
This is the public IP address of the network host. It is used when creating a
SNAT rule.
Possible values:
Any valid IP address
Related options:
force_snat_range
"""),
cfg.IntOpt('dhcp_lease_time',
default=86400,
min=1,
help="""
The lifetime of a DHCP lease, in seconds. The default is 86400 (one day).
Possible values:
Any positive integer value.
"""),
cfg.MultiStrOpt("dns_server",
default=[],
help="""
Despite the singular form of the name of this option, it is actually a list of
zero or more server addresses that dnsmasq will use for DNS nameservers. If
this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use
the servers specified in this option. If the option use_network_dns_servers is
True, the dns1 and dns2 servers from the network will be appended to this list,
and will be used as DNS servers, too.
Possible values:
A list of strings, where each string is either an IP address or a FQDN.
Related options:
use_network_dns_servers
"""),
cfg.BoolOpt("use_network_dns_servers",
default=False,
help="""
When this option is set to True, the dns1 and dns2 servers for the network
specified by the user on boot will be used for DNS, as well as any specified in
the `dns_server` option.
Related options:
dns_server
"""),
cfg.ListOpt("dmz_cidr",
default=[],
help="""
This option is a list of zero or more IP address ranges in your network's DMZ
that should be accepted.
Possible values:
A list of strings, each of which should be a valid CIDR.
"""),
cfg.MultiStrOpt("force_snat_range",
default=[],
help="""
This is a list of zero or more IP ranges that traffic from the
`routing_source_ip` will be SNATted to. If the list is empty, then no SNAT
rules are created.
Possible values:
A list of strings, each of which should be a valid CIDR.
Related options:
routing_source_ip
"""),
cfg.StrOpt("dnsmasq_config_file",
default="",
help="""
The path to the custom dnsmasq configuration file, if any.
Possible values:
The full path to the configuration file, or an empty string if there is no
custom dnsmasq configuration file.
"""),
cfg.StrOpt("linuxnet_interface_driver",
default="nova.network.linux_net.LinuxBridgeInterfaceDriver",
help="""
This is the class used as the ethernet device driver for linuxnet bridge
operations. The default value should be all you need for most cases, but if you
wish to use a customized class, set this option to the full dot-separated
import path for that class.
Possible values:
Any string representing a dot-separated class path that Nova can import.
"""),
cfg.StrOpt("linuxnet_ovs_integration_bridge",
default="br-int",
help="""
The name of the Open vSwitch bridge that is used with linuxnet when connecting
with Open vSwitch."
Possible values:
Any string representing a valid bridge name.
"""),
cfg.BoolOpt("send_arp_for_ha",
default=False,
help="""
When True, when a device starts up, and upon binding floating IP addresses, arp
messages will be sent to ensure that the arp caches on the compute hosts are
up-to-date.
Related options:
send_arp_for_ha_count
"""),
cfg.IntOpt("send_arp_for_ha_count",
default=3,
help="""
When arp messages are configured to be sent, they will be sent with the count
set to the value of this option. Of course, if this is set to zero, no arp
messages will be sent.
Possible values:
Any integer greater than or equal to 0
Related options:
send_arp_for_ha
"""),
cfg.BoolOpt("use_single_default_gateway",
default=False,
help="""
When set to True, only the firt nic of a VM will get its default gateway from
the DHCP server.
"""),
cfg.MultiStrOpt("forward_bridge_interface",
default=["all"],
help="""
One or more interfaces that bridges can forward traffic to. If any of the items
in this list is the special keyword 'all', then all traffic will be forwarded.
Possible values:
A list of zero or more interface names, or the word 'all'.
"""),
cfg.StrOpt("metadata_host",
default="$my_ip",
help="""
This option determines the IP address for the network metadata API server.
Possible values:
* Any valid IP address. The default is the address of the Nova API server.
Related options:
* metadata_port
"""),
cfg.PortOpt("metadata_port",
default=8775,
help="""
This option determines the port used for the metadata API server.
Related options:
* metadata_host
"""),
cfg.StrOpt("iptables_top_regex",
default="",
help="""
This expression, if defined, will select any matching iptables rules and place
them at the top when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_bottom_regex
"""),
cfg.StrOpt("iptables_bottom_regex",
default="",
help="""
This expression, if defined, will select any matching iptables rules and place
them at the bottom when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_top_regex
"""),
cfg.StrOpt("iptables_drop_action",
default="DROP",
help="""
By default, packets that do not pass the firewall are DROPped. In many cases,
though, an operator may find it more useful to change this from DROP to REJECT,
so that the user issuing those packets may have a better idea as to what's
going on, or LOGDROP in order to record the blocked traffic before DROPping.
Possible values:
* A string representing an iptables chain. The default is DROP.
"""),
cfg.IntOpt("ovs_vsctl_timeout",
default=120,
min=0,
help="""
This option represents the period of time, in seconds, that the ovs_vsctl calls
will wait for a response from the database before timing out. A setting of 0
means that the utility should wait forever for a response.
Possible values:
* Any positive integer if a limited timeout is desired, or zero if the
calls should wait forever for a response.
"""),
cfg.BoolOpt("fake_network",
default=False,
help="""
This option is used mainly in testing to avoid calls to the underlying network
utilities.
"""),
cfg.IntOpt("ebtables_exec_attempts",
default=3,
min=1,
help="""
This option determines the number of times to retry ebtables commands before
giving up. The minimum number of retries is 1.
Possible values:
* Any positive integer
Related options:
* ebtables_retry_interval
"""),
cfg.FloatOpt("ebtables_retry_interval",
default=1.0,
help="""
This option determines the time, in seconds, that the system will sleep in
between ebtables retries. Note that each successive retry waits a multiple of
this value, so for example, if this is set to the default of 1.0 seconds, and
ebtables_exec_attempts is 4, after the first failure, the system will sleep for
1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and
after the third failure it will sleep 3 * 1.0 seconds.
Possible values:
* Any non-negative float or integer. Setting this to zero will result in no
waiting between attempts.
Related options:
* ebtables_exec_attempts
"""),
]
ldap_dns_opts = [
# TODO(siva_krishnan): Validate URL scheme once that feature is added
# in oslo_config
cfg.URIOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help="""
URL for LDAP server which will store DNS entries
Possible values:
* A valid LDAP URL representing the server
"""),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='Bind user for LDAP server'),
cfg.StrOpt('ldap_dns_password',
default='password',
secret=True,
help="Bind user's password for LDAP server"),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help="""
Hostmaster for LDAP DNS driver Statement of Authority
Possible values:
* Any valid string representing LDAP DNS hostmaster.
"""),
# TODO(sfinucan): This should be converted to a ListOpt. Perhaps when the
# option is moved to a group?
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help="""
DNS Servers for LDAP DNS driver
Possible values:
* A valid URL representing a DNS server
"""),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help="""
Base distinguished name for the LDAP search query
This option helps to decide where to look up the host in LDAP.
"""),
# TODO(sfinucan): Add a min parameter to this and the below options
cfg.IntOpt('ldap_dns_soa_refresh',
default=1800,
help="""
Refresh interval (in seconds) for LDAP DNS driver Start of Authority
Time interval, a secondary/slave DNS server waits before requesting for
primary DNS server's current SOA record. If the records are different,
secondary DNS server will request a zone transfer from primary.
NOTE: Lower values would cause more traffic.
"""),
cfg.IntOpt('ldap_dns_soa_retry',
default=3600,
help="""
Retry interval (in seconds) for LDAP DNS driver Start of Authority
Time interval, a secondary/slave DNS server should wait, if an
attempt to transfer zone failed during the previous refresh interval.
"""),
cfg.IntOpt('ldap_dns_soa_expiry',
default=86400,
help="""
Expiry interval (in seconds) for LDAP DNS driver Start of Authority
Time interval, a secondary/slave DNS server holds the information
before it is no longer considered authoritative.
"""),
cfg.IntOpt('ldap_dns_soa_minimum',
default=7200,
help="""
Minimum interval (in seconds) for LDAP DNS driver Start of Authority
It is Minimum time-to-live applies for all resource records in the
zone file. This value is supplied to other servers how long they
should keep the data in cache.
"""),
]
driver_opts = [
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
Driver to use for network creation.
Network driver initializes (creates bridges and so on) only when the
first VM lands on a host node. All network managers configure the
network using network drivers. The driver is not tied to any particular
network manager.
The default Linux driver implements vlans, bridges, and iptables rules
using linux utilities.
Note that this option is only used when using nova-network instead
of Neutron in your deployment.
Related options:
* use_neutron
"""),
cfg.StrOpt('firewall_driver',
help="""
Firewall driver to use with ``nova-network`` service.
This option only applies when using the ``nova-network`` service. When using
another networking services, such as Neutron, this should be to set to the
``nova.virt.firewall.NoopFirewallDriver``.
If unset (the default), this will default to the hypervisor-specified
default driver.
Possible values:
* nova.virt.firewall.IptablesFirewallDriver
* nova.virt.firewall.NoopFirewallDriver
* nova.virt.libvirt.firewall.IptablesFirewallDriver
* [...]
Related options:
* ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
networking
"""),
cfg.BoolOpt('allow_same_net_traffic',
default=True,
help="""
Determine whether to allow network traffic from same network.
When set to true, hosts on the same subnet are not filtered and are allowed
to pass all types of traffic between them. On a flat network, this allows
all instances from all projects unfiltered communication. With VLAN
networking, this allows access between instances within the same project.
This option only applies when using the ``nova-network`` service. When using
another networking services, such as Neutron, security groups or other
approaches should be used.
Possible values:
* True: Network traffic should be allowed pass between all instances on the
same network, regardless of their tenant and security policies
* False: Network traffic should not be allowed pass between instances unless
it is unblocked in a security group
Related options:
* ``use_neutron``: This must be set to ``False`` to enable ``nova-network``
networking
* ``firewall_driver``: This must be set to
``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure the
libvirt firewall driver is enabled.
"""),
]
rpcapi_opts = [
cfg.StrOpt('network_topic',
default='network',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
There is no need to let users choose the RPC topic for all services - there
is little gain from this. Furthermore, it makes it really easy to break Nova
by using this option.
""",
help='The topic network nodes listen on'),
cfg.BoolOpt('multi_host',
default=False,
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
nova-network is deprecated, as are any related configuration options.
""",
help="""
Default value for multi_host in networks.
nova-network service can operate in a multi-host or single-host mode.
In multi-host mode each compute node runs a copy of nova-network and the
instances on that compute node use the compute node as a gateway to the
Internet. Where as in single-host mode, a central server runs the nova-network
service. All compute nodes forward traffic from the instances to the
cloud controller which then forwards traffic to the Internet.
If this options is set to true, some rpc network calls will be sent directly
to host.
Note that this option is only used when using nova-network instead of
Neutron in your deployment.
Related options:
* use_neutron
""")
]
ALL_DEFAULT_OPTS = (linux_net_opts + network_opts + ldap_dns_opts
+ rpcapi_opts + driver_opts)
def register_opts(conf):
conf.register_opts(ALL_DEFAULT_OPTS)
def list_opts():
return {"DEFAULT": ALL_DEFAULT_OPTS}
| 30.547133
| 79
| 0.71285
|
627681bd796daa7a19310fc7d6f540d4487a0968
| 1,540
|
py
|
Python
|
tests/python/pants_test/backend/jvm/targets/test_jvm_target.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/targets/test_jvm_target.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/targets/test_jvm_target.py
|
WamBamBoozle/pants
|
98cadfa1a5d337146903eb66548cfe955f2627b3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.core.register import build_file_aliases as register_core
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.address import BuildFileAddress, SyntheticAddress
from pants.base.build_file_aliases import BuildFileAliases
from pants_test.base_test import BaseTest
class JvmTargetTest(BaseTest):
@property
def alias_groups(self):
return register_core().merge(BuildFileAliases.create(
targets={
# We don't usually have an alias for 'jvm_target' in BUILD files. It's being added here
# to make it easier to write a test.
'jvm_target': JvmTarget,
}))
def test_traversable_dependency_specs(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_target(name='foo',
resources=[':resource_target'],
)
resources(name='resource_target',
sources=['foo.txt'],
)
'''))
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
target = self.build_graph.get_target(SyntheticAddress.parse('//:foo'))
self.assertSequenceEqual([], list(target.traversable_specs))
self.assertSequenceEqual([':resource_target'], list(target.traversable_dependency_specs))
| 35.813953
| 95
| 0.74026
|
691dac29df6f8981d524643f603bd4ae5226ae0b
| 9,091
|
py
|
Python
|
latent_programmer/tasks/robust_fill/dataset/write_data.py
|
dshirron/google-research
|
0c0f6ce46af61250e0604bb5a6aa481d849e66dc
|
[
"Apache-2.0"
] | 1
|
2022-01-19T23:35:59.000Z
|
2022-01-19T23:35:59.000Z
|
latent_programmer/tasks/robust_fill/dataset/write_data.py
|
dshirron/google-research
|
0c0f6ce46af61250e0604bb5a6aa481d849e66dc
|
[
"Apache-2.0"
] | null | null | null |
latent_programmer/tasks/robust_fill/dataset/write_data.py
|
dshirron/google-research
|
0c0f6ce46af61250e0604bb5a6aa481d849e66dc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Write supervised training tasks to TFRecord dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import random
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import sample_random
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
from latent_programmer.tasks.robust_fill.dataset import experiment as exp_module
sys.path.append('../../../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_work_units', 1, 'Total number of work units.')
flags.DEFINE_integer('seed', None, 'Fixed random seed.')
flags.DEFINE_integer('num_tasks', 100000, 'Number of tasks to write.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_expressions', 10,
'Maximum number of expressions in program.')
flags.DEFINE_integer('min_expressions', 1,
'Maximum number of expressions in program.')
flags.DEFINE_integer('max_input_length', 20,
'Maximum number of characters in input strings.')
flags.DEFINE_string('save_dir', '/tmp/decomposition',
'Directory to save results to.')
flags.DEFINE_boolean('split_program', False,
'Whether to split program by parial program.')
flags.DEFINE_boolean('split_outputs', False,
'Whether to split outputs by partial program.')
flags.DEFINE_enum('split', None, ['train', 'valid', 'test', 'finetune'],
'Which split of the dataset to generate.')
flags.DEFINE_enum('experiment', 'NONE', [e.name for e in exp_module.Experiment],
'Kind of experiment (see document for descriptions).')
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_example(task, token_id_table):
"""Creates a tf.Example message to be written to a file."""
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
io_string = ''
if FLAGS.split_outputs:
for inp in task.inputs:
io_string += inp + '<'
for expr in task.program.expressions:
io_string += expr(inp) + '|'
io_string = io_string[:-1] + '>'
io_string = io_string[:-1]
else:
for inp, out in zip(task.inputs, task.outputs):
io_string += inp + '<' + out + '>'
io_string = io_string[:-1]
program_string = ''
if FLAGS.split_program:
for expr in task.program.expressions:
program_string += ' '.join(map(str, expr.encode(token_id_table)))
program_string += '|'
program_string = program_string[:-1]
else:
program_string = ' '.join(
map(str, task.program.encode(token_id_table)[:-1]))
feature = {
'i/o': _bytes_feature(str.encode(io_string)),
'program_encoding': _bytes_feature(str.encode(program_string)),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def generate_task_for_experiment(experiment, is_train):
"""Generates a random task for a given experiment and dataset split."""
if experiment == exp_module.Experiment.SWITCH_CONCEPT_ORDER.name:
# Handle this case separately because it's the most different from the rest.
return sample_random.random_task_switch_concept_order(
max_k=3,
max_input_tokens=5,
max_input_length=FLAGS.max_input_length,
num_examples=FLAGS.num_strings_per_task,
min_expressions=2,
max_expressions=6,
is_train=is_train)
# Still pass in max_expressions, min_expressions, sampler_pool,
# valid_length_fn, and keep_fn.
random_task_partial = functools.partial(
sample_random.random_task,
max_k=3,
max_input_tokens=5,
max_input_length=FLAGS.max_input_length,
num_examples=FLAGS.num_strings_per_task)
valid_num_expressions_fn = None
keep_fn = None
if experiment == exp_module.Experiment.LENGTH_1_6_TO_7_10.name:
min_expressions = 1 if is_train else 7
max_expressions = 6 if is_train else 10
sampler_pool = sample_random.SAMPLER_POOL_ALL
elif experiment == exp_module.Experiment.LENGTH_6_TO_1_10.name:
min_expressions = 6 if is_train else 1
max_expressions = 6 if is_train else 10
sampler_pool = sample_random.SAMPLER_POOL_ALL
if not is_train:
valid_num_expressions_fn = lambda n: n != 6
elif experiment == exp_module.Experiment.LENGTH_1_TO_2_6.name:
min_expressions = 1 if is_train else 2
max_expressions = 1 if is_train else 6
sampler_pool = sample_random.SAMPLER_POOL_ALL
elif experiment == exp_module.Experiment.COMPOSE_DIFFERENT_CONCEPTS.name:
min_expressions = 2
max_expressions = 6
if is_train:
sampler_pool = random.choice([sample_random.ALL_SUBSTRING,
sample_random.SAMPLER_POOL_MODIFY_OR_CONST])
else:
sampler_pool = [sample_random.ALL_SUBSTRING,
sample_random.SAMPLER_POOL_MODIFY_OR_CONST]
keep_fn = lambda c: ( # pylint: disable=g-long-lambda
any(isinstance(e, dsl.Substring) for e in c.expressions) and
any(isinstance(e, (dsl.Modification, dsl.ConstStr))
for e in c.expressions))
elif experiment == exp_module.Experiment.COMPOSE_NEW_OP.name:
if is_train:
if random.random() < 0.25:
min_expressions = 1
max_expressions = 1
sampler_pool = sample_random.SAMPLER_POOL_ONLY_COMPOSE
else:
min_expressions = 2
max_expressions = 6
sampler_pool = sample_random.SAMPLER_POOL_NO_COMPOSE
else:
min_expressions = 2
max_expressions = 6
sampler_pool = sample_random.SAMPLER_POOL_ALL
keep_fn = lambda c: any(isinstance(e, dsl.Compose) for e in c.expressions)
elif experiment == exp_module.Experiment.EXTEND_OP_FUNCTIONALITY.name:
min_expressions = 1
max_expressions = 6
sampler_pool = (sample_random.SAMPLER_POOL_NO_COMPOSE_SUBSTRING if is_train
else sample_random.SAMPLER_POOL_ALL)
if not is_train:
keep_fn = lambda c: any( # pylint: disable=g-long-lambda
isinstance(e, dsl.Compose) and
isinstance(e.modification_or_substring, dsl.Substring)
for e in c.expressions)
else:
raise ValueError('Unhandled experiment name: {}'.format(experiment))
if is_train:
# These are only used for test.
assert valid_num_expressions_fn is None and keep_fn is None
return random_task_partial(
max_expressions=max_expressions,
min_expressions=min_expressions,
sampler_pool=sampler_pool,
valid_num_expressions_fn=valid_num_expressions_fn,
keep_fn=keep_fn)
def main(_):
tf.enable_v2_behavior()
if FLAGS.seed is not None:
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
_, token_id_table = dsl_tokens.build_token_tables()
if not gfile.isdir(FLAGS.save_dir):
gfile.makedirs(FLAGS.save_dir)
worker_fname = os.path.join(
FLAGS.save_dir,
'program_tasks_{}.tf_records-00000-of-00001'.format(FLAGS.split))
# Write the `tf.Example` observations to the file.
with tf.io.TFRecordWriter(worker_fname) as writer:
for i in range(FLAGS.num_tasks):
if FLAGS.experiment == exp_module.Experiment.NONE:
task = sample_random.random_task(
max_expressions=FLAGS.max_expressions,
min_expressions=FLAGS.min_expressions,
max_k=3,
max_input_tokens=5,
max_input_length=FLAGS.max_input_length,
num_examples=FLAGS.num_strings_per_task)
else:
if FLAGS.split in ['train', 'valid']:
is_train = True
elif FLAGS.split == 'test':
is_train = False
elif FLAGS.split == 'finetune':
is_train = bool(i % 2)
else:
raise ValueError('Unhandled split: {}'.format(FLAGS.split))
task = generate_task_for_experiment(FLAGS.experiment, is_train)
example = serialize_example(task, token_id_table)
writer.write(example)
if __name__ == '__main__':
app.run(main)
| 35.373541
| 80
| 0.701243
|
09d60c1573e1009122d66de088204d02f73f0563
| 2,906
|
py
|
Python
|
src/py42/modules/alerts.py
|
kboghdady/py42
|
c91486d98a177c774b14991d89b44c7501f491ff
|
[
"MIT"
] | 1
|
2020-08-18T22:00:22.000Z
|
2020-08-18T22:00:22.000Z
|
src/py42/modules/alerts.py
|
kboghdady/py42
|
c91486d98a177c774b14991d89b44c7501f491ff
|
[
"MIT"
] | null | null | null |
src/py42/modules/alerts.py
|
kboghdady/py42
|
c91486d98a177c774b14991d89b44c7501f491ff
|
[
"MIT"
] | 1
|
2021-05-10T23:33:34.000Z
|
2021-05-10T23:33:34.000Z
|
from py42.modules.alertrules import AlertRulesModule
class AlertsModule(object):
def __init__(self, microservice_client_factory, alert_rules_module=None):
self._microservice_client_factory = microservice_client_factory
self._alert_rules_module = alert_rules_module or AlertRulesModule(
self._microservice_client_factory
)
@property
def rules(self):
"""A collection of methods for managing alert rules.
Returns:
:class:`py42.modules.alertrules.AlertRulesModule`
"""
return self._alert_rules_module
def search(self, query):
"""Searches alerts using the given :class:`py42.sdk.queries.alerts.alert_query.AlertQuery`.
Args:
query (:class:`py42.sdk.queries.alerts.alert_query.AlertQuery`): An alert query.
See the :ref:`Executing Searches User Guide <anchor_search_alerts>` to learn more
about how to construct a query.
Returns:
:class:`py42.response.Py42Response`: A response containing the alerts that match the given
query.
"""
alert_client = self._microservice_client_factory.get_alerts_client()
return alert_client.search(query)
def get_details(self, alert_ids):
"""Gets the details for the alerts with the given IDs, including the file event query that,
when passed into a search, would result in events that could have triggered the alerts.
Args:
alert_ids (iter[str]): The identification numbers of the alerts for which you want to
get details for.
Returns:
:class:`py42.response.Py42Response`: A response containing the alert details.
"""
alert_client = self._microservice_client_factory.get_alerts_client()
return alert_client.get_details(alert_ids)
def resolve(self, alert_ids, reason=None):
"""Resolves the alerts with the given IDs.
Args:
alert_ids (iter[str]): The identification numbers for the alerts to resolve.
reason (str, optional): The reason the alerts are now resolved. Defaults to None.
Returns:
:class:`py42.response.Py42Response`
"""
alert_client = self._microservice_client_factory.get_alerts_client()
return alert_client.resolve(alert_ids, reason=reason)
def reopen(self, alert_ids, reason=None):
"""Reopens the resolved alerts with the given IDs.
Args:
alert_ids (iter[str]): The identification numbers for the alerts to reopen.
reason (str, optional): The reason the alerts are reopened. Defaults to None.
Returns:
:class:`py42.response.Py42Response`
"""
alert_client = self._microservice_client_factory.get_alerts_client()
return alert_client.reopen(alert_ids, reason=reason)
| 39.27027
| 102
| 0.668961
|
c5a94cdd097e78b5e8e5daf47dcd88e43dc98a72
| 697
|
py
|
Python
|
wien2kparser/__init__.py
|
ondracka/nomad-parser-wien2k
|
f108ea41f49b78e9494423204eebb385b450da2a
|
[
"Apache-2.0"
] | null | null | null |
wien2kparser/__init__.py
|
ondracka/nomad-parser-wien2k
|
f108ea41f49b78e9494423204eebb385b450da2a
|
[
"Apache-2.0"
] | 3
|
2021-05-18T19:00:39.000Z
|
2021-07-14T15:17:17.000Z
|
wien2kparser/__init__.py
|
ondracka/nomad-parser-wien2k
|
f108ea41f49b78e9494423204eebb385b450da2a
|
[
"Apache-2.0"
] | 1
|
2021-05-19T19:32:34.000Z
|
2021-05-19T19:32:34.000Z
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .wien2k_parser import Wien2kParser
| 34.85
| 74
| 0.757532
|
9e3f7d6c223ed7fb01a29ba05dd862b065379481
| 12,743
|
py
|
Python
|
src/twisted/test/test_stdio.py
|
apjanke/twisted
|
22f949f7ce187513f0c218b73186c8a73baa00b4
|
[
"Unlicense",
"MIT"
] | 1
|
2021-01-03T01:54:14.000Z
|
2021-01-03T01:54:14.000Z
|
src/twisted/test/test_stdio.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/test/test_stdio.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
@var properEnv: A copy of L{os.environ} which has L{bytes} keys/values on POSIX
platforms and native L{str} keys/values on Windows.
"""
import os
import sys
import itertools
from unittest import skipIf
from twisted.trial.unittest import SkipTest, TestCase
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = b"xyz123abc Twisted is great!"
properEnv = dict(os.environ)
properEnv["PYTHONPATH"] = os.pathsep.join(sys.path)
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
L{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or L{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not L{None}.
"""
self.data[name] = self.data.get(name, b"") + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(TestCase):
if platform.isWindows() and requireModule("win32process") is None:
skip = (
"On windows, spawnProcess is not available in the "
"absence of win32process."
)
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
args = [
sys.executable,
b"-m",
b"twisted.test." + sibling,
reactor.__class__.__module__,
] + list(args)
return reactor.spawnProcess(proto, sys.executable, args, env=properEnv, **kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b"stdio_test_loseconn", errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, "r") as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(p, b"stdio_test_halfclose", errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the macOS bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, b"stdio_test_lastwrite", UNIQUE_LAST_WRITE_STRING, usePTY=True
)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (p.data,),
)
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b"stdio_test_hostpeer")
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.assertTrue(host)
self.assertTrue(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b"stdio_test_write")
def processEnded(reason):
self.assertEqual(p.data[1], b"ok!")
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, b"stdio_test_writeseq")
def processEnded(reason):
self.assertEqual(p.data[1], b"ok!")
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, "wb") as junkFile:
for i in range(1024):
junkFile.write(b"%d\n" % (i,))
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(b"%d\n" % (toWrite.pop(),))
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, b"stdio_test_producer")
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b"".join(written))
self.assertFalse(
toWrite, "Connection lost with %d writes left to go." % (len(toWrite),)
)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, b"stdio_test_consumer", junkPath)
def processEnded(reason):
with open(junkPath, "rb") as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
@skipIf(
platform.isWindows(),
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.",
)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open("wb")
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs["stdin"] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(b"%d" % (value,))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEqual(next(count), howMany + 1)
self.assertEqual(
path.getContent(), b"".join(b"%d" % (i,) for i in range(howMany))
)
onConnLost.addCallback(cbLost)
return onConnLost
| 34.255376
| 87
| 0.626697
|
4bf265cae449fdd5292591073dc10436ae47c90e
| 24,084
|
py
|
Python
|
HICO-DET_Benchmark/Generate_HICO_detection_nis.py
|
enlighten0707/Transferable-Interactiveness-Network
|
5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd
|
[
"MIT"
] | 227
|
2019-03-23T16:17:29.000Z
|
2022-03-29T03:01:36.000Z
|
HICO-DET_Benchmark/Generate_HICO_detection_nis.py
|
enlighten0707/Transferable-Interactiveness-Network
|
5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd
|
[
"MIT"
] | 68
|
2019-03-27T06:28:55.000Z
|
2021-09-24T13:37:35.000Z
|
HICO-DET_Benchmark/Generate_HICO_detection_nis.py
|
enlighten0707/Transferable-Interactiveness-Network
|
5ffbf1d0779702225bac37d6cc5d3ddf9b17a1cd
|
[
"MIT"
] | 51
|
2019-03-23T12:29:15.000Z
|
2022-03-23T01:37:04.000Z
|
# --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""
Change the HICO-DET detection results to the right format.
input arg: python Generate_HICO_detection_nis.py (1:pkl_path) (2:hico_dir) (3:rule_inter) (4:threshold_x) (5:threshold_y)
"""
import pickle
import shutil
import numpy as np
import scipy.io as sio
import os
import sys
import matplotlib
import matplotlib.pyplot as plth
import random
import HICO_Benchmark_Binary as rank
# all the no-interaction HOI index in HICO dataset
hoi_no_inter_all = [10,24,31,46,54,65,76,86,92,96,107,111,129,146,160,170,174,186,194,198,208,214,224,232,235,239,243,247,252,257,264,273,283,290,295,305,313,325,330,336,342,348,352,356,363,368,376,383,389,393,397,407,414,418,429,434,438,445,449,453,463,474,483,488,502,506,516,528,533,538,546,550,558,562,567,576,584,588,595,600]
# all HOI index range corresponding to different object id in HICO dataset
hoi_range = [(161, 170), (11, 24), (66, 76), (147, 160), (1, 10), (55, 65), (187, 194), (568, 576), (32, 46), (563, 567), (326, 330), (503, 506), (415, 418), (244, 247), (25, 31), (77, 86), (112, 129), (130, 146), (175, 186), (97, 107), (314, 325), (236, 239), (596, 600), (343, 348), (209, 214), (577, 584), (353, 356), (539, 546), (507, 516), (337, 342), (464, 474), (475, 483), (489, 502), (369, 376), (225, 232), (233, 235), (454, 463), (517, 528), (534, 538), (47, 54), (589, 595), (296, 305), (331, 336), (377, 383), (484, 488), (253, 257), (215, 224), (199, 208), (439, 445), (398, 407), (258, 264), (274, 283), (357, 363), (419, 429), (306, 313), (265, 273), (87, 92), (93, 96), (171, 174), (240, 243), (108, 111), (551, 558), (195, 198), (384, 389), (394, 397), (435, 438), (364, 368), (284, 290), (390, 393), (408, 414), (547, 550), (450, 453), (430, 434), (248, 252), (291, 295), (585, 588), (446, 449), (529, 533), (349, 352), (559, 562)]
# all image index in test set without any pair
all_remaining = set([20, 25, 54, 60, 66, 71, 74, 94, 154, 155, 184, 200, 229, 235, 242, 249, 273, 280, 289, 292, 315, 323, 328, 376, 400, 421, 432, 436, 461, 551, 554, 578, 613, 626, 639, 641, 642, 704, 705, 768, 773, 776, 796, 809, 827, 845, 850, 855, 862, 886, 901, 947, 957, 963, 965, 1003, 1011, 1014, 1028, 1042, 1044, 1057, 1090, 1092, 1097, 1099, 1119, 1171, 1180, 1231, 1241, 1250, 1346, 1359, 1360, 1391, 1420, 1450, 1467, 1495, 1498, 1545, 1560, 1603, 1605, 1624, 1644, 1659, 1673, 1674, 1677, 1709, 1756, 1808, 1845, 1847, 1849, 1859, 1872, 1881, 1907, 1910, 1912, 1914, 1953, 1968, 1979, 2039, 2069, 2106, 2108, 2116, 2126, 2142, 2145, 2146, 2154, 2175, 2184, 2218, 2232, 2269, 2306, 2308, 2316, 2323, 2329, 2390, 2397, 2406, 2425, 2463, 2475, 2483, 2494, 2520, 2576, 2582, 2591, 2615, 2624, 2642, 2646, 2677, 2703, 2707, 2712, 2717, 2763, 2780, 2781, 2818, 2830, 2833, 2850, 2864, 2873, 2913, 2961, 2983, 3021, 3040, 3042, 3049, 3057, 3066, 3082, 3083, 3111, 3112, 3122, 3157, 3200, 3204, 3229, 3293, 3309, 3328, 3341, 3373, 3393, 3423, 3439, 3449, 3471, 3516, 3525, 3537, 3555, 3616, 3636, 3653, 3668, 3681, 3709, 3718, 3719, 3733, 3737, 3744, 3756, 3762, 3772, 3780, 3784, 3816, 3817, 3824, 3855, 3865, 3885, 3891, 3910, 3916, 3918, 3919, 3933, 3949, 3980, 4009, 4049, 4066, 4089, 4112, 4143, 4154, 4200, 4222, 4243, 4254, 4257, 4259, 4266, 4269, 4273, 4308, 4315, 4320, 4331, 4343, 4352, 4356, 4369, 4384, 4399, 4411, 4424, 4428, 4445, 4447, 4466, 4477, 4482, 4492, 4529, 4534, 4550, 4566, 4596, 4605, 4606, 4620, 4648, 4710, 4718, 4734, 4771, 4773, 4774, 4801, 4807, 4811, 4842, 4845, 4849, 4874, 4886, 4887, 4907, 4926, 4932, 4948, 4960, 4969, 5000, 5039, 5042, 5105, 5113, 5159, 5161, 5174, 5183, 5197, 5214, 5215, 5216, 5221, 5264, 5273, 5292, 5293, 5353, 5438, 5447, 5452, 5465, 5468, 5492, 5498, 5520, 5543, 5551, 5575, 5581, 5605, 5617, 5623, 5671, 5728, 5759, 5766, 5777, 5799, 5840, 5853, 5875, 5883, 5886, 5898, 5919, 5922, 5941, 5948, 5960, 5962, 5964, 6034, 6041, 6058, 6080, 6103, 6117, 6134, 6137, 6138, 6163, 6196, 6206, 6210, 6223, 6228, 6232, 6247, 6272, 6273, 6281, 6376, 6409, 6430, 6438, 6473, 6496, 6595, 6608, 6635, 6678, 6687, 6692, 6695, 6704, 6712, 6724, 6757, 6796, 6799, 6815, 6851, 6903, 6908, 6914, 6948, 6957, 7065, 7071, 7073, 7089, 7099, 7102, 7114, 7147, 7169, 7185, 7219, 7226, 7232, 7271, 7285, 7315, 7323, 7341, 7378, 7420, 7433, 7437, 7467, 7489, 7501, 7513, 7514, 7523, 7534, 7572, 7580, 7614, 7619, 7625, 7658, 7667, 7706, 7719, 7727, 7752, 7813, 7826, 7829, 7868, 7872, 7887, 7897, 7902, 7911, 7936, 7942, 7945, 8032, 8034, 8042, 8044, 8092, 8101, 8156, 8167, 8175, 8176, 8205, 8234, 8237, 8244, 8301, 8316, 8326, 8350, 8362, 8385, 8441, 8463, 8479, 8534, 8565, 8610, 8623, 8651, 8671, 8678, 8689, 8707, 8735, 8761, 8763, 8770, 8779, 8800, 8822, 8835, 8923, 8942, 8962, 8970, 8984, 9010, 9037, 9041, 9122, 9136, 9140, 9147, 9164, 9165, 9166, 9170, 9173, 9174, 9175, 9185, 9186, 9200, 9210, 9211, 9217, 9218, 9246, 9248, 9249, 9250, 9254, 9307, 9332, 9337, 9348, 9364, 9371, 9376, 9379, 9389, 9404, 9405, 9408, 9415, 9416, 9417, 9418, 9419, 9421, 9424, 9433, 9434, 9493, 9501, 9505, 9519, 9520, 9521, 9522, 9526, 9529, 9531, 9637, 9654, 9655, 9664, 9686, 9688, 9701, 9706, 9709, 9712, 9716, 9717, 9718, 9731, 9746, 9747, 9748, 9753, 9765])
pair_total_num = 999999
binary_score_nointer, binary_score_inter, a_pair, b_pair, c_pair = rank.cal_rank_600()
pair_is_del = np.zeros(pair_total_num, dtype = 'float32')
pair_in_the_result = np.zeros(9999, dtype = 'float32')
def getSigmoid(b,c,d,x,a=6):
e = 2.718281828459
return a/(1+e**(b-c*x))+d
def save_HICO(HICO, HICO_dir, thres_no_inter, thres_inter, classid, begin, finish):
all_boxes = []
possible_hoi_range = hoi_range[classid - 1]
num_delete_pair_a = 0
num_delete_pair_b = 0
num_delete_pair_c = 0
for i in range(finish - begin + 1): # for every verb, iteration all the pkl file
total = []
score = []
pair_id = 0
for key, value in HICO.iteritems():
for element in value:
if element[2] == classid:
temp = []
temp.append(element[0].tolist()) # Human box
temp.append(element[1].tolist()) # Object box
temp.append(int(key)) # image id
temp.append(int(i)) # action id (0-599)
human_score = element[4]
object_score = element[5]
d_score = binary_score_inter[pair_id]
d_score_noi = binary_score_nointer[pair_id]
# you could change the parameter of NIS (sigmoid function) here
# use (10, 1.4, 0) as the default
score_old = element[3][begin - 1 + i] * getSigmoid(10,1.4,0,element[4]) * getSigmoid(10,1.4,0,element[5])
hoi_num = begin - 1 + i
score_new = score_old
if classid == 63:
thres_no_inter = 0.95
thres_inter = 0.15
elif classid == 43:
thres_no_inter = 0.85
thres_inter = 0.1
elif classid == 57:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 48:
thres_no_inter = 0.85
thres_inter = 0.2
elif classid == 41:
thres_no_inter = 0.85
thres_inter = 0.15
elif classid == 2:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 4:
thres_inter = 0.15
thres_no_inter = 0.85
elif classid == 31:
thres_inter = 0.1
thres_no_inter = 0.85
elif classid == 19:
thres_inter = 0.2
thres_no_inter = 0.85
elif classid == 1:
thres_inter = 0.05
thres_no_inter = 0.85
elif classid == 11:
thres_inter = 0.15
thres_no_inter = 0.85
# if Binary D score D[0] > no interaction threshold and D[1] <
if (d_score_noi > thres_no_inter) and (d_score < thres_inter) and not(int(key) in all_remaining):
if not((hoi_num + 1) in hoi_no_inter_all): # skiping all the 520 score
if (a_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_a += 1
pair_is_del[pair_id] = 1
elif (b_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_b += 1
pair_is_del[pair_id] = 1
elif (c_pair[pair_id] == 1) and (pair_is_del[pair_id] == 0):
num_delete_pair_c += 1
pair_is_del[pair_id] = 1
pair_id += 1
continue
temp.append(score_new)
total.append(temp)
score.append(score_new)
if not(int(key) in all_remaining):
pair_id += 1
idx = np.argsort(score, axis=0)[::-1]
for i_idx in range(min(len(idx),19999)):
all_boxes.append(total[idx[i_idx]])
# save the detection result in .mat file
savefile = os.path.join(HICO_dir, 'detections_' + str(classid).zfill(2) + '.mat')
if os.path.exists(savefile):
os.remove(savefile)
sio.savemat(savefile, {'all_boxes':all_boxes})
print('class',classid,'finished')
num_delete_inter = num_delete_pair_a + num_delete_pair_b
return num_delete_inter, num_delete_pair_c
def Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter):
if not os.path.exists(HICO_dir):
os.makedirs(HICO_dir)
HICO = pickle.load( open( output_file, "rb" ) )
# del_i and del_ni
del_i = 0
del_ni = 0
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 1 ,161, 170)
del_i += num_del_i
del_ni += num_del_no_i
# 1 person
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 2 ,11, 24 )
del_i += num_del_i
del_ni += num_del_no_i
# 2 bicycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 3 ,66, 76 )
del_i += num_del_i
del_ni += num_del_no_i
# 3 car
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 4 ,147, 160)
del_i += num_del_i
del_ni += num_del_no_i
# 4 motorcycle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 5 ,1, 10 )
del_i += num_del_i
del_ni += num_del_no_i
# 5 airplane
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 6 ,55, 65 )
del_i += num_del_i
del_ni += num_del_no_i
# 6 bus
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 7 ,187, 194)
del_i += num_del_i
del_ni += num_del_no_i
# 7 train
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 8 ,568, 576)
del_i += num_del_i
del_ni += num_del_no_i
# 8 truck
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 9 ,32, 46 )
del_i += num_del_i
del_ni += num_del_no_i
# 9 boat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 10,563, 567)
del_i += num_del_i
del_ni += num_del_no_i
# 10 traffic light
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 11,326,330)
del_i += num_del_i
del_ni += num_del_no_i
# 11 fire_hydrant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 12,503,506)
del_i += num_del_i
del_ni += num_del_no_i
# 12 stop_sign
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 13,415,418)
del_i += num_del_i
del_ni += num_del_no_i
# 13 parking_meter
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 14,244,247)
del_i += num_del_i
del_ni += num_del_no_i
# 14 bench
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 15,25, 31)
del_i += num_del_i
del_ni += num_del_no_i
# 15 bird
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 16,77, 86)
del_i += num_del_i
del_ni += num_del_no_i
# 16 cat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 17,112,129)
del_i += num_del_i
del_ni += num_del_no_i
# 17 dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 18,130,146)
del_i += num_del_i
del_ni += num_del_no_i
# 18 horse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 19,175,186)
del_i += num_del_i
del_ni += num_del_no_i
# 19 sheep
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 20,97,107)
del_i += num_del_i
del_ni += num_del_no_i
# 20 cow
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 21,314,325)
del_i += num_del_i
del_ni += num_del_no_i
# 21 elephant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 22,236,239)
del_i += num_del_i
del_ni += num_del_no_i
# 22 bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 23,596,600)
del_i += num_del_i
del_ni += num_del_no_i
# 23 zebra
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 24,343,348)
del_i += num_del_i
del_ni += num_del_no_i
# 24 giraffe
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 25,209,214)
del_i += num_del_i
del_ni += num_del_no_i
# 25 backpack
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 26,577,584)
del_i += num_del_i
del_ni += num_del_no_i
# 26 umbrella
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 27,353,356)
del_i += num_del_i
del_ni += num_del_no_i
# 27 handbag
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 28,539,546)
del_i += num_del_i
del_ni += num_del_no_i
# 28 tie
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 29,507,516)
del_i += num_del_i
del_ni += num_del_no_i
# 29 suitcase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 30,337,342)
del_i += num_del_i
del_ni += num_del_no_i
# 30 Frisbee
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 31,464,474)
del_i += num_del_i
del_ni += num_del_no_i
# 31 skis
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 32,475,483)
del_i += num_del_i
del_ni += num_del_no_i
# 32 snowboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 33,489,502)
del_i += num_del_i
del_ni += num_del_no_i
# 33 sports_ball
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 34,369,376)
del_i += num_del_i
del_ni += num_del_no_i
# 34 kite
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 35,225,232)
del_i += num_del_i
del_ni += num_del_no_i
# 35 baseball_bat
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 36,233,235)
del_i += num_del_i
del_ni += num_del_no_i
# 36 baseball_glove
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 37,454,463)
del_i += num_del_i
del_ni += num_del_no_i
# 37 skateboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 38,517,528)
del_i += num_del_i
del_ni += num_del_no_i
# 38 surfboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 39,534,538)
del_i += num_del_i
del_ni += num_del_no_i
# 39 tennis_racket
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 40,47,54)
del_i += num_del_i
del_ni += num_del_no_i
# 40 bottle
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 41,589,595)
del_i += num_del_i
del_ni += num_del_no_i
# 41 wine_glass
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 42,296,305)
del_i += num_del_i
del_ni += num_del_no_i
# 42 cup
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 43,331,336)
del_i += num_del_i
del_ni += num_del_no_i
# 43 fork
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 44,377,383)
del_i += num_del_i
del_ni += num_del_no_i
# 44 knife
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 45,484,488)
del_i += num_del_i
del_ni += num_del_no_i
# 45 spoon
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 46,253,257)
del_i += num_del_i
del_ni += num_del_no_i
# 46 bowl
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 47,215,224)
del_i += num_del_i
del_ni += num_del_no_i
# 47 banana
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 48,199,208)
del_i += num_del_i
del_ni += num_del_no_i
# 48 apple
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 49,439,445)
del_i += num_del_i
del_ni += num_del_no_i
# 49 sandwich
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 50,398,407)
del_i += num_del_i
del_ni += num_del_no_i
# 50 orange
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 51,258,264)
del_i += num_del_i
del_ni += num_del_no_i
# 51 broccoli
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 52,274,283)
del_i += num_del_i
del_ni += num_del_no_i
# 52 carrot
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 53,357,363)
del_i += num_del_i
del_ni += num_del_no_i
# 53 hot_dog
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 54,419,429)
del_i += num_del_i
del_ni += num_del_no_i
# 54 pizza
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 55,306,313)
del_i += num_del_i
del_ni += num_del_no_i
# 55 donut
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 56,265,273)
del_i += num_del_i
del_ni += num_del_no_i
# 56 cake
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 57,87,92)
del_i += num_del_i
del_ni += num_del_no_i
# 57 chair
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 58,93,96)
del_i += num_del_i
del_ni += num_del_no_i
# 58 couch
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 59,171,174)
del_i += num_del_i
del_ni += num_del_no_i
# 59 potted_plant
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 60,240,243)
del_i += num_del_i
del_ni += num_del_no_i
#60 bed
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 61,108,111)
del_i += num_del_i
del_ni += num_del_no_i
#61 dining_table
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 62,551,558)
del_i += num_del_i
del_ni += num_del_no_i
#62 toilet
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 63,195,198)
del_i += num_del_i
del_ni += num_del_no_i
#63 TV
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 64,384,389)
del_i += num_del_i
del_ni += num_del_no_i
#64 laptop
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 65,394,397)
del_i += num_del_i
del_ni += num_del_no_i
#65 mouse
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 66,435,438)
del_i += num_del_i
del_ni += num_del_no_i
#66 remote
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 67,364,368)
del_i += num_del_i
del_ni += num_del_no_i
#67 keyboard
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 68,284,290)
del_i += num_del_i
del_ni += num_del_no_i
#68 cell_phone
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 69,390,393)
del_i += num_del_i
del_ni += num_del_no_i
#69 microwave
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 70,408,414)
del_i += num_del_i
del_ni += num_del_no_i
#70 oven
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 71,547,550)
del_i += num_del_i
del_ni += num_del_no_i
#71 toaster
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 72,450,453)
del_i += num_del_i
del_ni += num_del_no_i
#72 sink
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 73,430,434)
del_i += num_del_i
del_ni += num_del_no_i
#73 refrigerator
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 74,248,252)
del_i += num_del_i
del_ni += num_del_no_i
#74 book
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 75,291,295)
del_i += num_del_i
del_ni += num_del_no_i
#75 clock
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 76,585,588)
del_i += num_del_i
del_ni += num_del_no_i
#76 vase
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 77,446,449)
del_i += num_del_i
del_ni += num_del_no_i
#77 scissors
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 78,529,533)
del_i += num_del_i
del_ni += num_del_no_i
#78 teddy_bear
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 79,349,352)
del_i += num_del_i
del_ni += num_del_no_i
#79 hair_drier
num_del_i, num_del_no_i = save_HICO(HICO, HICO_dir, thres_no_inter,thres_inter, 80,559,562)
del_i += num_del_i
del_ni += num_del_no_i
#80 toothbrush
print('num_del_inter',del_i,'num_del_no_inter',del_ni)
def main():
output_file = sys.argv[1]
HICO_dir = sys.argv[2]
thres_no_inter = float(sys.argv[3])
thres_inter = float(sys.argv[4])
print("the output file is",output_file)
print("the threshold of no interaction score is",thres_no_inter)
print("the threshold of interaction score is",thres_inter)
Generate_HICO_detection(output_file, HICO_dir, thres_no_inter,thres_inter)
if __name__ == '__main__':
main()
| 47.316306
| 3,294
| 0.639263
|
7530a0ef5892d980f098bf65fadf53123d3e6efa
| 3,367
|
py
|
Python
|
tests/st/bgp/test_single_route_reflector.py
|
EdSchouten/calico-containers
|
e4e3c1ba2b883395de4b340bb0c2b64e8595f49d
|
[
"Apache-2.0"
] | null | null | null |
tests/st/bgp/test_single_route_reflector.py
|
EdSchouten/calico-containers
|
e4e3c1ba2b883395de4b340bb0c2b64e8595f49d
|
[
"Apache-2.0"
] | null | null | null |
tests/st/bgp/test_single_route_reflector.py
|
EdSchouten/calico-containers
|
e4e3c1ba2b883395de4b340bb0c2b64e8595f49d
|
[
"Apache-2.0"
] | 1
|
2021-08-18T09:47:40.000Z
|
2021-08-18T09:47:40.000Z
|
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.plugins.attrib import attr
from tests.st.test_base import TestBase
from tests.st.utils.constants import DEFAULT_IPV4_ADDR_1, DEFAULT_IPV4_ADDR_2
from tests.st.utils.docker_host import DockerHost
from tests.st.utils.route_reflector import RouteReflectorCluster
class TestSingleRouteReflector(TestBase):
@attr('slow')
def test_single_route_reflector(self):
"""
Run a multi-host test using a single route reflector and global
peering.
"""
with DockerHost('host1') as host1, \
DockerHost('host2') as host2, \
RouteReflectorCluster(1, 1) as rrc:
# Turn off the node-to-node mesh (do this from any host), and
# change the default AS Number (arbitrary choice).
host1.calicoctl("bgp default-node-as 64514")
host1.calicoctl("bgp node-mesh off")
# Create a profile to associate with both workloads
host1.calicoctl("profile add TEST_GROUP")
workload_host1 = host1.create_workload("workload1")
workload_host2 = host2.create_workload("workload2")
# Add containers to Calico
host1.calicoctl("container add %s %s" % (workload_host1,
DEFAULT_IPV4_ADDR_1))
host2.calicoctl("container add %s %s" % (workload_host2,
DEFAULT_IPV4_ADDR_2))
# Now add the profiles - one using set and one using append
host1.calicoctl("container %s profile set TEST_GROUP" % workload_host1)
host2.calicoctl("container %s profile append TEST_GROUP" % workload_host2)
# Allow network to converge (which it won't)
try:
workload_host1.assert_can_ping(DEFAULT_IPV4_ADDR_2, retries=5)
except AssertionError:
pass
else:
raise AssertionError("Hosts can ping each other")
# Set global config telling all calico nodes to peer with the
# route reflector. This can be run from either host.
rg = rrc.get_redundancy_group()
assert len(rg) == 1
host1.calicoctl("bgp peer add %s as 64514" % rg[0].ip)
# Allow network to converge (which it now will).
workload_host1.assert_can_ping(DEFAULT_IPV4_ADDR_2, retries=10)
# And check connectivity in both directions.
self.assert_ip_connectivity(workload_list=[workload_host1,
workload_host2],
ip_pass_list=[DEFAULT_IPV4_ADDR_1,
DEFAULT_IPV4_ADDR_2])
| 43.727273
| 86
| 0.621622
|
d66ead32d60d31339862c95511e50af8b55d6252
| 233
|
py
|
Python
|
src/movie_service/app.py
|
kenesparta/movie-service-api
|
9b7e132e9729af516dfa9f2f9bc7613e392624f8
|
[
"MIT"
] | 4
|
2021-05-03T19:37:43.000Z
|
2021-05-03T22:40:51.000Z
|
src/movie_service/app.py
|
kenesparta/movie-service-api
|
9b7e132e9729af516dfa9f2f9bc7613e392624f8
|
[
"MIT"
] | null | null | null |
src/movie_service/app.py
|
kenesparta/movie-service-api
|
9b7e132e9729af516dfa9f2f9bc7613e392624f8
|
[
"MIT"
] | null | null | null |
from flask import Flask
import config
from api.routes import general, movie
app = Flask(__name__)
app.config.update()
def register_routes():
app.register_blueprint(general.general)
app.register_blueprint(movie.movie_bp)
| 16.642857
| 43
| 0.776824
|
c29f78e989f128af997edddfb73391eb32d36bc0
| 588
|
py
|
Python
|
net_interp.py
|
dmitrykroo/deepmux_super_resolution
|
4b0a6635d15aa2c2f214edd3f84d83699e4f2327
|
[
"Apache-2.0"
] | 1
|
2021-04-05T14:24:44.000Z
|
2021-04-05T14:24:44.000Z
|
net_interp.py
|
dmitrykroo/deepmux_super_resolution
|
4b0a6635d15aa2c2f214edd3f84d83699e4f2327
|
[
"Apache-2.0"
] | null | null | null |
net_interp.py
|
dmitrykroo/deepmux_super_resolution
|
4b0a6635d15aa2c2f214edd3f84d83699e4f2327
|
[
"Apache-2.0"
] | null | null | null |
import sys
import torch
from collections import OrderedDict
alpha = float(sys.argv[1])
net_PSNR_path = './models/RRDB_PSNR_x4.pth'
net_ESRGAN_path = './models/RRDB_ESRGAN_x4.pth'
net_interp_path = './models/interp_{:02d}.pth'.format(int(alpha*10))
net_PSNR = torch.load(net_PSNR_path)
net_ESRGAN = torch.load(net_ESRGAN_path)
net_interp = OrderedDict()
print('Interpolating with alpha = ', alpha)
for k, v_PSNR in net_PSNR.items():
v_ESRGAN = net_ESRGAN[k]
net_interp[k] = (1 - alpha) * v_PSNR + alpha * v_ESRGAN
torch.save(net_interp, net_interp_path)
| 26.727273
| 69
| 0.719388
|
afc85a0b8bd1818dcb784053665050a96174bfce
| 182
|
py
|
Python
|
harpoon/src/plugins/Config/commands/DisablePersist.py
|
xezzz/Harpoon
|
4a0bdea10a6e961013738598726a4205a5cc4c91
|
[
"MIT"
] | null | null | null |
harpoon/src/plugins/Config/commands/DisablePersist.py
|
xezzz/Harpoon
|
4a0bdea10a6e961013738598726a4205a5cc4c91
|
[
"MIT"
] | null | null | null |
harpoon/src/plugins/Config/commands/DisablePersist.py
|
xezzz/Harpoon
|
4a0bdea10a6e961013738598726a4205a5cc4c91
|
[
"MIT"
] | null | null | null |
async def run(plugin, ctx):
plugin.db.configs.update(ctx.guild.id, "persist", False)
await ctx.send(plugin.t(ctx.guild, "disabled_module", _emote="YES", module="Persist"))
| 30.333333
| 90
| 0.697802
|
9755e0556c0c1e52b4f1c523ddf495cf1d5a9819
| 1,104
|
py
|
Python
|
examples/miniapps/api_client/main.py
|
vlad-ghita/python-dependency-injector
|
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
|
[
"BSD-3-Clause"
] | null | null | null |
examples/miniapps/api_client/main.py
|
vlad-ghita/python-dependency-injector
|
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
|
[
"BSD-3-Clause"
] | null | null | null |
examples/miniapps/api_client/main.py
|
vlad-ghita/python-dependency-injector
|
5cf5bdda24851dd97cfa0f5054f4a8c35ddac014
|
[
"BSD-3-Clause"
] | null | null | null |
"""TBD."""
from dependency_injector import providers
import api
import models
# Creating ApiClient and User providers:
api_client = providers.Singleton(api.ApiClient,
host='production.com',
api_key='PROD_API_KEY')
user_factory = providers.Factory(models.User,
api_client=api_client)
if __name__ == '__main__':
# Creating several users and register them:
user1 = user_factory(1)
user1.register()
# API call [production.com:PROD_API_KEY], method - register, data -
# {'id': 1}
user2 = user_factory(2)
user2.register()
# API call [production.com:PROD_API_KEY], method - register, data -
# {'id': 2}
# Overriding of ApiClient on dev environment:
api_client.override(providers.Singleton(api.ApiClient,
host='localhost',
api_key='DEV_API_KEY'))
user3 = user_factory(3)
user3.register()
# API call [localhost:DEV_API_KEY], method - register, data - {'id': 3}
| 29.837838
| 75
| 0.584239
|
a63e4e1392b35165d7e139025c2570fa50e197a0
| 2,240
|
py
|
Python
|
blog/blog_app/models.py
|
MadMaxINDIAN/thePythoneer.tech-v2.0
|
74d988c1f04095557ed43daa2aabf232faf3d159
|
[
"MIT"
] | null | null | null |
blog/blog_app/models.py
|
MadMaxINDIAN/thePythoneer.tech-v2.0
|
74d988c1f04095557ed43daa2aabf232faf3d159
|
[
"MIT"
] | null | null | null |
blog/blog_app/models.py
|
MadMaxINDIAN/thePythoneer.tech-v2.0
|
74d988c1f04095557ed43daa2aabf232faf3d159
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Tag(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=250)
content = models.TextField()
facebook = models.CharField(max_length=100, null=True, blank=True)
twitter = models.CharField(max_length=30, null=True, blank=True)
github = models.CharField(max_length=60, null=True, blank=True)
linkedin = models.CharField(max_length=60, null=True, blank=True)
personal_website = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name
class BlogPostComment(models.Model):
comment = models.TextField()
def __str__(self):
return self.comment
class BlogPost(models.Model):
id = models.BigIntegerField(primary_key=True, unique=True)
image = models.ImageField()
title = models.CharField(max_length=100)
description = models.CharField(max_length=500)
# author = models.CharField(max_length=50)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
date = models.DateField(auto_now_add=True)
tags = models.ManyToManyField(Tag)
content = models.TextField()
published = models.BooleanField(null=False, blank=False)
# team = models.ManyToManyField(Author)
whatsapp_message = models.CharField(max_length=1000,null=True)
facebook_link = models.CharField(max_length=100,null=True)
comments = models.ManyToManyField(BlogPostComment,blank=True)
# TODO : ADD COMMENTS
# TODO : ADD FEEDBACK
feedback_upvote = models.IntegerField(default=0)
feedback_loved = models.IntegerField(default=0)
feedback_amazed = models.IntegerField(default=0)
def __str__(self):
return self.title
class EmailIPLocator(models.Model):
email = models.EmailField()
city = models.TextField
country = models.TextField
asname = models.TextField
isp = models.TextField
lat = models.FloatField()
lon = models.FloatField()
ip = models.IPAddressField
zip_code = models.IntegerField()
def __str__(self):
return self.email
| 33.939394
| 78
| 0.717411
|
52eb652bc2d41cfcfae93f6aafe49b22bcce3244
| 21,949
|
py
|
Python
|
ax/models/model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 1,803
|
2019-05-01T16:04:15.000Z
|
2022-03-31T16:01:29.000Z
|
ax/models/model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 810
|
2019-05-01T07:17:47.000Z
|
2022-03-31T23:58:46.000Z
|
ax/models/model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 220
|
2019-05-01T05:37:22.000Z
|
2022-03-29T04:30:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import itertools
import warnings
from collections import defaultdict
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TConfig, TParamCounter
from ax.exceptions.core import SearchSpaceExhausted
from ax.models.numpy_base import NumpyModel
from ax.models.torch_base import TorchModel
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import models # noqa F401 # pragma: no cover
Tensoray = Union[torch.Tensor, np.ndarray]
DEFAULT_MAX_RS_DRAWS = 10000
def rejection_sample(
gen_unconstrained: Callable[
[int, int, np.ndarray, Optional[Dict[int, float]]], np.ndarray
],
n: int,
d: int,
tunable_feature_indices: np.ndarray,
linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None,
deduplicate: bool = False,
max_draws: Optional[int] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[np.ndarray], np.ndarray]] = None,
existing_points: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, int]:
"""Rejection sample in parameter space.
Models must implement a `gen_unconstrained` method in order to support
rejection sampling via this utility.
"""
# We need to perform the round trip transformation on our generated point
# in order to deduplicate in the original search space.
# The transformation is applied above.
if deduplicate and rounding_func is None:
raise ValueError(
"Rounding function must be provided for deduplication." # pragma: no cover
)
failed_constraint_dict: TParamCounter = defaultdict(lambda: 0)
# Rejection sample with parameter constraints.
points = np.zeros((n, d))
attempted_draws = 0
successful_draws = 0
if max_draws is None:
max_draws = DEFAULT_MAX_RS_DRAWS
while successful_draws < n and attempted_draws <= max_draws:
# _gen_unconstrained returns points including fixed features.
# pyre-ignore: Anonymous function w/ named args.
point = gen_unconstrained(
n=1,
d=d,
tunable_feature_indices=tunable_feature_indices,
fixed_features=fixed_features,
)[0]
# Note: this implementation may not be performant, if the feasible volume
# is small, since applying the rounding_func is relatively expensive.
# If sampling in spaces with low feasible volume is slow, this function
# could be applied after checking the linear constraints.
if rounding_func is not None:
point = rounding_func(point)
# Check parameter constraints, always in raw transformed space.
if linear_constraints is not None:
all_constraints_satisfied, violators = check_param_constraints(
linear_constraints=linear_constraints, point=point
)
for violator in violators:
failed_constraint_dict[violator] += 1
else:
all_constraints_satisfied = True
violators = np.array([])
# Deduplicate: don't add the same point twice.
duplicate = False
if deduplicate:
if existing_points is not None:
prev_points = np.vstack([points[:successful_draws, :], existing_points])
else:
prev_points = points[:successful_draws, :]
duplicate = check_duplicate(point=point, points=prev_points)
# Add point if valid.
if all_constraints_satisfied and not duplicate:
points[successful_draws] = point
successful_draws += 1
attempted_draws += 1
if successful_draws < n:
# Only possible if attempted_draws >= max_draws.
raise SearchSpaceExhausted(
f"Rejection sampling error (specified maximum draws ({max_draws}) exhausted"
f", without finding sufficiently many ({n}) candidates). This likely means "
"that there are no new points left in the search space."
)
else:
return (points, attempted_draws)
def check_duplicate(point: np.ndarray, points: np.ndarray) -> bool:
"""Check if a point exists in another array.
Args:
point: Newly generated point to check.
points: Points previously generated.
Returns:
True if the point is contained in points, else False
"""
for p in points:
if np.array_equal(p, point):
return True
return False
def add_fixed_features(
tunable_points: np.ndarray,
d: int,
fixed_features: Optional[Dict[int, float]],
tunable_feature_indices: np.ndarray,
) -> np.ndarray:
"""Add fixed features to points in tunable space.
Args:
tunable_points: Points in tunable space.
d: Dimension of parameter space.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
tunable_feature_indices: Parameter indices (in d) which are tunable.
Returns:
points: Points in the full d-dimensional space, defined by bounds.
"""
n = np.shape(tunable_points)[0]
points = np.zeros((n, d))
points[:, tunable_feature_indices] = tunable_points
if fixed_features:
fixed_feature_indices = np.array(list(fixed_features.keys()))
fixed_values = np.tile(list(fixed_features.values()), (n, 1))
points[:, fixed_feature_indices] = fixed_values
return points
def check_param_constraints(
linear_constraints: Tuple[np.ndarray, np.ndarray], point: np.ndarray
) -> Tuple[bool, np.ndarray]:
"""Check if a point satisfies parameter constraints.
Args:
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
point: A candidate point in d-dimensional space, as a (1 x d) matrix.
Returns:
2-element tuple containing
- Flag that is True if all constraints are satisfied by the point.
- Indices of constraints which are violated by the point.
"""
constraints_satisfied = (
linear_constraints[0] @ np.expand_dims(point, axis=1) <= linear_constraints[1]
)
if np.all(constraints_satisfied):
return True, np.array([])
else:
return (False, np.where(constraints_satisfied == False)[0]) # noqa: E712
def tunable_feature_indices(
bounds: List[Tuple[float, float]], fixed_features: Optional[Dict[int, float]] = None
) -> np.ndarray:
"""Get the feature indices of tunable features.
Args:
bounds: A list of (lower, upper) tuples for each column of X.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value during generation.
Returns:
The indices of tunable features.
"""
fixed_feature_indices = list(fixed_features.keys()) if fixed_features else []
feature_indices = np.arange(len(bounds))
return np.delete(feature_indices, fixed_feature_indices)
def validate_bounds(
bounds: List[Tuple[float, float]], fixed_feature_indices: np.ndarray
) -> None:
"""Ensure the requested space is [0,1]^d.
Args:
bounds: A list of d (lower, upper) tuples for each column of X.
fixed_feature_indices: Indices of features which are fixed at a
particular value.
"""
for feature_idx, bound in enumerate(bounds):
# Bounds for fixed features are not unit-transformed.
if feature_idx in fixed_feature_indices:
continue
if bound[0] != 0 or bound[1] != 1:
raise ValueError(
"This generator operates on [0,1]^d. Please make use "
"of the UnitX transform in the ModelBridge, and ensure "
"task features are fixed."
)
def best_observed_point(
model: Union[NumpyModel, TorchModel],
bounds: List[Tuple[float, float]],
objective_weights: Optional[Tensoray],
outcome_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
linear_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[TConfig] = None,
) -> Optional[Tensoray]:
"""Select the best point that has been observed.
Implements two approaches to selecting the best point.
For both approaches, only points that satisfy parameter space constraints
(bounds, linear_constraints, fixed_features) will be returned. Points must
also be observed for all objective and constraint outcomes. Returned
points may violate outcome constraints, depending on the method below.
1: Select the point that maximizes the expected utility
(objective_weights^T posterior_objective_means - baseline) * Prob(feasible)
Here baseline should be selected so that at least one point has positive
utility. It can be specified in the options dict, otherwise
min (objective_weights^T posterior_objective_means)
will be used, where the min is over observed points.
2: Select the best-objective point that is feasible with at least
probability p.
The following quantities may be specified in the options dict:
- best_point_method: 'max_utility' (default) or 'feasible_threshold'
to select between the two approaches described above.
- utility_baseline: Value for the baseline used in max_utility approach. If
not provided, defaults to min objective value.
- probability_threshold: Threshold for the feasible_threshold approach.
Defaults to p=0.95.
- feasibility_mc_samples: Number of MC samples used for estimating the
probability of feasibility (defaults 10k).
Args:
model: Numpy or Torch model.
bounds: A list of (lower, upper) tuples for each feature.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value in the best point.
options: A config dictionary with settings described above.
Returns:
A d-array of the best point, or None if no feasible point exists.
"""
if not hasattr(model, "Xs"):
raise ValueError(f"Model must store training data Xs, but {model} does not.")
best_point_and_value = best_in_sample_point(
Xs=model.Xs, # pyre-ignore[16]: Presence of attr. checked above.
model=model,
bounds=bounds,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
options=options,
)
return None if best_point_and_value is None else best_point_and_value[0]
def best_in_sample_point(
Xs: Union[List[torch.Tensor], List[np.ndarray]],
model: Union[NumpyModel, TorchModel],
bounds: List[Tuple[float, float]],
objective_weights: Optional[Tensoray],
outcome_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
linear_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
fixed_features: Optional[Dict[int, float]] = None,
options: Optional[TConfig] = None,
) -> Optional[Tuple[Tensoray, float]]:
"""Select the best point that has been observed.
Implements two approaches to selecting the best point.
For both approaches, only points that satisfy parameter space constraints
(bounds, linear_constraints, fixed_features) will be returned. Points must
also be observed for all objective and constraint outcomes. Returned
points may violate outcome constraints, depending on the method below.
1: Select the point that maximizes the expected utility
(objective_weights^T posterior_objective_means - baseline) * Prob(feasible)
Here baseline should be selected so that at least one point has positive
utility. It can be specified in the options dict, otherwise
min (objective_weights^T posterior_objective_means)
will be used, where the min is over observed points.
2: Select the best-objective point that is feasible with at least
probability p.
The following quantities may be specified in the options dict:
- best_point_method: 'max_utility' (default) or 'feasible_threshold'
to select between the two approaches described above.
- utility_baseline: Value for the baseline used in max_utility approach. If
not provided, defaults to min objective value.
- probability_threshold: Threshold for the feasible_threshold approach.
Defaults to p=0.95.
- feasibility_mc_samples: Number of MC samples used for estimating the
probability of feasibility (defaults 10k).
Args:
Xs: Training data for the points, among which to select the best.
model: Numpy or Torch model.
bounds: A list of (lower, upper) tuples for each feature.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value in the best point.
options: A config dictionary with settings described above.
Returns:
A two-element tuple or None if no feasible point exist. In tuple:
- d-array of the best point,
- utility at the best point.
"""
# Parse options
if options is None:
options = {}
method: str = options.get("best_point_method", "max_utility")
B: Optional[float] = options.get("utility_baseline", None)
threshold: float = options.get("probability_threshold", 0.95)
nsamp: int = options.get("feasibility_mc_samples", 10000)
# Get points observed for all objective and constraint outcomes
if objective_weights is None:
return None # pragma: no cover
objective_weights_np = as_array(objective_weights)
X_obs = get_observed(
Xs=Xs,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
)
# Filter to those that satisfy constraints.
X_obs = filter_constraints_and_fixed_features(
X=X_obs,
bounds=bounds,
linear_constraints=linear_constraints,
fixed_features=fixed_features,
)
if len(X_obs) == 0:
# No feasible points
return None
# Predict objective and P(feas) at these points for Torch models.
if isinstance(Xs[0], torch.Tensor):
X_obs = X_obs.detach().clone()
f, cov = as_array(model.predict(X_obs))
obj = objective_weights_np @ f.transpose() # pyre-ignore
pfeas = np.ones_like(obj)
if outcome_constraints is not None:
A, b = as_array(outcome_constraints) # (m x j) and (m x 1)
# Use Monte Carlo to compute pfeas, to properly handle covariance
# across outcomes.
for i, _ in enumerate(X_obs):
z = np.random.multivariate_normal(
mean=f[i, :], cov=cov[i, :, :], size=nsamp
) # (nsamp x j)
pfeas[i] = (A @ z.transpose() <= b).all(axis=0).mean()
# Identify best point
if method == "feasible_threshold":
utility = obj
utility[pfeas < threshold] = -np.Inf
elif method == "max_utility":
if B is None:
B = obj.min()
utility = (obj - B) * pfeas
# pyre-fixme[61]: `utility` may not be initialized here.
i = np.argmax(utility)
if utility[i] == -np.Inf:
return None
else:
return X_obs[i, :], utility[i]
def as_array(
x: Union[Tensoray, Tuple[Tensoray, ...]]
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
"""Convert every item in a tuple of tensors/arrays into an array.
Args:
x: A tensor, array, or a tuple of potentially mixed tensors and arrays.
Returns:
x, with everything converted to array.
"""
if isinstance(x, tuple):
return tuple(as_array(x_i) for x_i in x) # pyre-ignore
elif isinstance(x, np.ndarray):
return x
elif torch.is_tensor(x):
return x.detach().cpu().double().numpy()
else:
raise ValueError(
"Input to as_array must be numpy array or torch tensor"
) # pragma: no cover
def get_observed(
Xs: Union[List[torch.Tensor], List[np.ndarray]],
objective_weights: Tensoray,
outcome_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
) -> Tensoray:
"""Filter points to those that are observed for objective outcomes and outcomes
that show up in outcome_constraints (if there are any).
Args:
Xs: A list of m (k_i x d) feature matrices X. Number of rows k_i
can vary from i=1,...,m.
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b.
Returns:
Points observed for all objective outcomes and outcome constraints.
"""
objective_weights_np = as_array(objective_weights)
used_outcomes: Set[int] = set(np.where(objective_weights_np != 0)[0])
if len(used_outcomes) == 0:
raise ValueError("At least one objective weight must be non-zero")
if outcome_constraints is not None:
used_outcomes = used_outcomes.union(
np.where(as_array(outcome_constraints)[0] != 0)[1]
)
outcome_list = list(used_outcomes)
X_obs_set = {tuple(float(x_i) for x_i in x) for x in Xs[outcome_list[0]]}
for _, idx in enumerate(outcome_list, start=1):
X_obs_set = X_obs_set.intersection(
{tuple(float(x_i) for x_i in x) for x in Xs[idx]}
)
if isinstance(Xs[0], np.ndarray):
return np.array(list(X_obs_set), dtype=Xs[0].dtype) # (n x d)
if isinstance(Xs[0], torch.Tensor):
# pyre-fixme[7]: Expected `Union[np.ndarray, torch.Tensor]` but got implicit
# return value of `None`.
return torch.tensor(list(X_obs_set), device=Xs[0].device, dtype=Xs[0].dtype)
def filter_constraints_and_fixed_features(
X: Tensoray,
bounds: List[Tuple[float, float]],
linear_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
fixed_features: Optional[Dict[int, float]] = None,
) -> Tensoray:
"""Filter points to those that satisfy bounds, linear_constraints, and
fixed_features.
Args:
X: An tensor or array of points.
bounds: A list of (lower, upper) tuples for each feature.
linear_constraints: A tuple of (A, b). For k linear constraints on
d-dimensional x, A is (k x d) and b is (k x 1) such that
A x <= b.
fixed_features: A map {feature_index: value} for features that
should be fixed to a particular value in the best point.
Returns:
Feasible points.
"""
if len(X) == 0: # if there are no points, nothing to filter
return X
X_np = X
if isinstance(X, torch.Tensor):
X_np = X.cpu().numpy()
feas = np.ones(X_np.shape[0], dtype=bool) # (n)
for i, b in enumerate(bounds):
feas &= (X_np[:, i] >= b[0]) & (X_np[:, i] <= b[1])
if linear_constraints is not None:
A, b = as_array(linear_constraints) # (m x d) and (m x 1)
feas &= (A @ X_np.transpose() <= b).all(axis=0)
if fixed_features is not None:
for idx, val in fixed_features.items():
feas &= X_np[:, idx] == val
X_feas = X_np[feas, :]
if isinstance(X, torch.Tensor):
return torch.from_numpy(X_feas).to(device=X.device, dtype=X.dtype)
else:
return X_feas
def mk_discrete_choices(
ssd: SearchSpaceDigest,
fixed_features: Optional[Dict[int, float]] = None,
) -> Dict[int, List[Union[int, float]]]:
discrete_choices = ssd.discrete_choices
# Add in fixed features.
if fixed_features is not None:
# Note: if any discrete features are fixed we won't enumerate those.
discrete_choices = {
**discrete_choices,
**{k: [v] for k, v in fixed_features.items()},
}
return discrete_choices
def enumerate_discrete_combinations(
discrete_choices: Dict[int, List[Union[int, float]]],
) -> List[Dict[int, Union[float, int]]]:
n_combos = np.prod([len(v) for v in discrete_choices.values()])
if n_combos > 50:
warnings.warn(
f"Enumerating {n_combos} combinations of discrete parameter values "
"while optimizing over a mixed search space. This can be very slow."
)
fixed_features_list = [
dict(zip(discrete_choices.keys(), c))
for c in itertools.product(*discrete_choices.values())
]
return fixed_features_list
| 39.194643
| 88
| 0.664449
|
29371591412a402583c88893ee3c188e29730b10
| 9,155
|
py
|
Python
|
curiefense/curietasker/curietasker/tasks_list_updates.py
|
bartavelle/curiefense
|
9c8d466453880ce6c1d51e59bb695773b09d2b2c
|
[
"Apache-2.0"
] | 388
|
2020-11-08T07:53:51.000Z
|
2022-03-31T19:04:32.000Z
|
curiefense/curietasker/curietasker/tasks_list_updates.py
|
bartavelle/curiefense
|
9c8d466453880ce6c1d51e59bb695773b09d2b2c
|
[
"Apache-2.0"
] | 617
|
2020-11-08T11:33:10.000Z
|
2022-03-31T17:04:40.000Z
|
curiefense/curietasker/curietasker/tasks_list_updates.py
|
bartavelle/curiefense
|
9c8d466453880ce6c1d51e59bb695773b09d2b2c
|
[
"Apache-2.0"
] | 67
|
2020-11-09T07:34:53.000Z
|
2022-03-22T15:02:13.000Z
|
import time
import datetime
import requests
import json
import re
from jsonschema import validate, ValidationError
from .task import Task
SCHEMAFILE = "/global-filters.schema"
@Task.register("update")
class TaskUpdate(Task):
parsers = {
# "ip": re.compile("^(?P<val>(([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})|(([0-9a-f]*:+){1,8}))(/[0-9]{1,2})) *([;#] *(?P<comment>.*$))?", re.IGNORECASE),
# "asn": re.compile(r"^as(?P<val>[0-9]{3,6}) *([#;//?] *(?P<comment>.*$))?", re.IGNORECASE),
"ip": re.compile(
r"^[^;#](([0-9a-f]{1,}\:+){1,7}[0-9a-f]{1,}([:]+)?(/\d{1,3})?|(\d{1,3}\.){3}\d{1,3}(/\d{1,2})?)((\s+)?([#;//?].+))?",
re.IGNORECASE,
),
"asn": re.compile(r"(AS\d{3,6})((\s+)?([#;//?].+))?", re.IGNORECASE),
}
def check_args(self, list_ids, branches):
assert (
type(list_ids) is list or list_ids == "*"
), f"Unrecognized list ids: {list_ids!r}"
assert (
type(branches) is list or branches == "*"
), f"Unrecognized branch list: {branches!r}"
self.list_ids = list_ids
self.branches = branches
def validate_schema(self, data):
with open(SCHEMAFILE) as json_file:
schema = json.load(json_file)
try:
validate(instance=data, schema=schema)
return True
except Exception as err:
self.log.error(f"Exception while parsing schema {err!r}")
return False
def parse_native(self, data):
if self.validate_schema(data):
# return entire document
return data
def parse_re(self, data):
lines = data.splitlines()
if len(lines) > 0:
midlist = int(len(lines) / 2)
## first,last and one from the middle. at least one must match.
if any(
(
self.parsers["ip"].match(lines[0]),
self.parsers["ip"].match(lines[-1]),
self.parsers["ip"].match(lines[midlist]),
)
):
for line in lines:
match = self.parsers["ip"].match(line)
if match:
g = match.groups()
if g:
yield ["ip", g[0], g[-1] and g[-1][:128]]
elif any(
(
self.parsers["asn"].match(lines[0]),
self.parsers["asn"].match(lines[-1]),
self.parsers["asn"].match(lines[midlist]),
)
):
for line in lines:
match = self.parsers["asn"].match(line)
if match:
g = match.groups()
if g:
yield ["asn", g[0], g[-1] and g[-1][:128]]
else:
yield None
def iterate_object(self, obj):
typename = type(obj).__name__
if typename == "list":
return obj
elif typename == "dict":
return obj.values()
def parse_object(self, obj):
got = self.iterate_object(obj)
for element in got:
typename = type(element).__name__
if typename in ["dict", "list"]:
for j in self.parse_object(element):
yield j
else:
match = self.parsers["ip"].match(element)
if match:
g = match.groups()
if g:
yield ["ip", g[0], g[-1] and g[-1][:128]]
else:
match = self.parsers["asn"].match(element)
if match:
g = match.groups()
if g:
yield ["asn", g[0], g[-1] and g[-1][:128]]
def readurl(self, url):
try:
data = requests.get(url)
data.raise_for_status()
if "application/json" in data.headers.get(
"Content-Type", data.headers.get("content-type")
):
self.log.info(f"readurl got JSON")
return data.json()
else:
self.log.info(f"readurl got text")
return data.text
except:
return None
def parse(self, lst):
url = lst.get("source")
data = self.readurl(url)
if data:
typename = type(data).__name__
self.log.info(f"parse results data type {typename}")
if typename not in ("dict", "list"):
entries = list(self.parse_re(data))
if len(entries) > 0 and entries[0]:
lst["entries"] = list(entries)
lst["mdate"] = datetime.datetime.now().isoformat()
else:
native_format = self.parse_native(data)
if native_format:
self.log.info(f"native format found")
# native format, update the whole entry
lst = native_format
else:
entries = list(self.parse_object(data))
if len(entries) > 0 and entries[0]:
self.log.info(f"parseobject found entries")
lst["entries"] = list(entries)
lst["mdate"] = datetime.datetime.now().isoformat()
return lst
self.log.error(f"Could not fetch data from: {url}")
return False
def action(self):
branches = self.branches
if branches == "*":
l = self.confserver.configs.list().body
branches = [b["id"] for b in l]
self.log.info(f"Working on all branches: {branches!r}")
for branch in branches:
lstids = self.list_ids
if lstids == "*":
lstids = self.confserver.entries.list(branch, "profilinglists").body
self.log.info(f"Working on lists: {lstids!r}")
for lstid in lstids:
self.log.info(f"Downloading {lstid} in branch {branch}")
try:
lst = self.confserver.entries.get(
branch, "profilinglists", lstid
).body
except Exception as e:
self.log.error(
f"Could not download {lstid} in branch {branch}: {e}"
)
continue
source = lst.get("source")
if not source:
self.log.error(
f"Profiling list {lstid} is missing 'source' attribute or attribute is empty"
)
continue
if source == "self-managed":
self.log.info(f"List {lstid} is self-managed")
continue
self.log.info(f"Downloading update from {source}")
try:
lst = self.parse(lst)
if lst:
self.confserver.entries.update(
branch, "profilinglists", lstid, body=lst
)
self.log.info(f"Updated {lstid} in branch {branch}")
except Exception as e:
self.log.error(
f"Could not download url [{source}] for list {lstid}"
)
continue
@Task.register("publish")
class TaskPublish(Task):
def check_args(self, branches):
assert (
type(branches) is list or branches == "*"
), f"Unrecognized branch list: {branches!r}"
self.branches = branches
def action(self):
sysdb = self.confserver.db.get("system").body
branches = self.branches
if branches == "*":
l = self.confserver.configs.list().body
branches = [b["id"] for b in l]
self.log.info(f"Working on all branches: {branches!r}")
for branch in branches:
for brbuck in sysdb["branch_buckets"]:
if brbuck["name"] == branch:
buckets = [
buck
for buck in sysdb["buckets"]
if buck["name"] in brbuck["buckets"]
]
self.log.info(
f"Publishing branch [{branch}] to buckets {buckets!r}"
)
res = self.confserver.tools.publish(branch, body=buckets).body
if res["ok"]:
self.log.info(f"Publish status: {res!r}")
else:
self.log.error(f"Publish status: {res!r}")
@Task.register("update_and_publish")
class TaskUpdateAndPublish(TaskUpdate, TaskPublish):
def check_args(self, list_ids, branches):
TaskUpdate.check_args(self, list_ids, branches)
TaskPublish.check_args(self, branches)
def action(self):
TaskUpdate.action(self)
TaskPublish.action(self)
| 36.043307
| 167
| 0.462698
|
93e4b79ad03083bd209909804bdb3586447d4b10
| 1,273
|
py
|
Python
|
LoliBot/cogs/joinleave.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
LoliBot/cogs/joinleave.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
LoliBot/cogs/joinleave.py
|
Aiyumii/KawaiiSoup
|
929f1d58183e01993ca9f7a4647433231e65c3ad
|
[
"MIT"
] | null | null | null |
import discord
from LoliBot.settings import guild_settings
class JoinLeave():
def __init__(self, Bot):
self.bot = Bot
async def on_member_join(self, member):
"""
Greets users when they join a server.
"""
settings = guild_settings.get(member.guild)
if not settings.greets["enabled"]:
return
if settings.greets["custom-message"]:
message = settings.greets["custom-message"]
else:
message = settings.greets["default-message"]
em = discord.Embed(
title="Welcome to {}!".format(member.guild),
description='Hey {}! Welcome to **{}**! {}'.format(member.mention, member.guild, message),
colour=0xDEADBF)
em.set_thumbnail(url=member.avatar_url)
channel = self.bot.get_channel(settings.greets["welcome-channel"])
return await channel.send(embed=em)
async def on_member_remove(self, member):
"""
The same but the opposite
"""
settings = guild_settings.get(member.guild)
channel = settings.goodbyes["goodbye-channel"]
if not settings.goodbyes["enabled"]:
return
else:
channel = self.bot.get_channel(channel)
return await channel.send(embed=discord.Embed(
description="{}#{} has left or been beaned.".format(member.name, member.discriminator), colour=0xDEADBF))
def setup(Bot):
Bot.add_cog(JoinLeave(Bot))
| 28.288889
| 109
| 0.714847
|
9475c1e8de56dbf4c96ea1356aa016e6c86b7bb9
| 579
|
py
|
Python
|
vbdiar/utils/__init__.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 101
|
2017-12-19T21:55:59.000Z
|
2022-03-15T06:56:06.000Z
|
vbdiar/utils/__init__.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 27
|
2017-07-20T06:10:42.000Z
|
2020-11-22T14:15:16.000Z
|
vbdiar/utils/__init__.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 30
|
2017-07-17T08:53:44.000Z
|
2021-05-18T07:37:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: Jan Profant <xprofa00@stud.fit.vutbr.cz>
# All Rights Reserved
import os
import errno
def mkdir_p(path):
""" Behaviour similar to mkdir -p in shell.
Args:
path (string_types): path to create
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise ValueError('Can not create directory {}.'.format(path))
| 20.678571
| 73
| 0.613126
|
70712dcdb2ba4f69ce77c456cd2190fb02ad52eb
| 714
|
py
|
Python
|
examples/message.py
|
Conchbot-Development/EpikCord.py
|
d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6
|
[
"MIT"
] | null | null | null |
examples/message.py
|
Conchbot-Development/EpikCord.py
|
d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6
|
[
"MIT"
] | 5
|
2022-02-03T00:35:57.000Z
|
2022-03-18T22:06:03.000Z
|
examples/message.py
|
Conchbot-Development/EpikCord.py
|
d81f4075ee9621edec05c8ab0ccbb4a6c4ec46c6
|
[
"MIT"
] | 1
|
2022-03-04T22:57:14.000Z
|
2022-03-04T22:57:14.000Z
|
"""
Before you implement this in your bot, please note that its just for testing,
If you have a test bot and are professional with your code, you can experiment
with different features and report the bugs in an issue
"""
from EpikCord import Client,Intents,Messageable, Embed
intents = Intents().guilds.guild_members.guild_messages.direct_messages
client = Client("your_token", intents)
@client.event
async def message_create(message):
if message.author.id == client.user.id:
return
if message.content == "example test":
message.channel = Messageable(client, message.channel_id)
await message.channel.send(content="hello, chat testing")
client.login()
| 31.043478
| 80
| 0.722689
|
64cf07eb2dab3161535ababd1b260bc63a3513aa
| 1,683
|
py
|
Python
|
google-cloud-secret_manager/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | 542
|
2018-09-19T18:52:38.000Z
|
2022-03-17T10:49:38.000Z
|
google-cloud-secret_manager/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | 6,226
|
2018-09-19T17:31:37.000Z
|
2022-03-31T23:08:01.000Z
|
google-cloud-secret_manager/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | 310
|
2018-09-20T02:19:43.000Z
|
2022-03-18T12:31:05.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
AUTOSYNTH_MULTIPLE_COMMITS = True
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
library = gapic.ruby_library(
"secretmanager", "v1",
generator_args={
"ruby-cloud-gem-name": "google-cloud-secret_manager",
"ruby-cloud-title": "Secret Manager",
"ruby-cloud-description": "Secret Manager is a secure and convenient storage system for API keys, passwords, certificates, and other sensitive data. Secret Manager provides a central place and single source of truth to manage, access, and audit secrets across Google Cloud.",
"ruby-cloud-env-prefix": "SECRET_MANAGER",
"ruby-cloud-wrapper-of": "v1:0.1;v1beta1:0.3",
"ruby-cloud-product-url": "https://cloud.google.com/secret-manager",
"ruby-cloud-api-id": "secretmanager.googleapis.com",
"ruby-cloud-api-shortname": "secretmanager",
}
)
s.copy(library, merge=ruby.global_merge)
| 40.071429
| 283
| 0.734997
|
cfc4a5861ff55df32c6208ddb9d4ddff47b9704c
| 1,669
|
py
|
Python
|
cohesity_management_sdk/models/reducers_wrapper.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-09-24T17:35:53.000Z
|
2022-03-25T08:08:47.000Z
|
cohesity_management_sdk/models/reducers_wrapper.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 18
|
2019-03-29T19:32:29.000Z
|
2022-01-03T23:16:45.000Z
|
cohesity_management_sdk/models/reducers_wrapper.py
|
nick6655/management-sdk-python
|
88e792cb83e5c24a22af495b220c145d0c45841d
|
[
"Apache-2.0"
] | 16
|
2019-02-27T06:54:12.000Z
|
2021-11-16T18:10:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.reducer_info
class ReducersWrapper(object):
"""Implementation of the 'ReducersWrapper' model.
ReducersWrapper is the struct to define the list of reducers.
Attributes:
reducers (list of ReducerInfo): Reducers specifies the list of
available reducers in analytics workbench.
"""
# Create a mapping from Model property names to API property names
_names = {
"reducers":'reducers'
}
def __init__(self,
reducers=None):
"""Constructor for the ReducersWrapper class"""
# Initialize members of the class
self.reducers = reducers
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
reducer_list = None
if dictionary.get('reducers', None) != None:
reducer_list = list()
for reducer in dictionary.get('reducers'):
reducer_list.append(cohesity_management_sdk.models.reducer_info.ReducerInfo.from_dictionary(reducer))
# Return an object of this model
return cls(reducer_list)
| 28.775862
| 117
| 0.638107
|
b95ef2588e6dcbec4f4dd3fbe0d8f1192a90aa49
| 2,938
|
py
|
Python
|
sphinx/ext/imgconverter.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/imgconverter.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/ext/imgconverter.py
|
choldgraf/sphinx
|
97d2f9fbf8eab478908af981c1a36aed1d75a4ce
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.ext.imgconverter
~~~~~~~~~~~~~~~~~~~~~~~
Image converter extension for Sphinx
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import subprocess
from subprocess import CalledProcessError, PIPE
from typing import Any, Dict
from sphinx.application import Sphinx
from sphinx.errors import ExtensionError
from sphinx.locale import __
from sphinx.transforms.post_transforms.images import ImageConverter
from sphinx.util import logging
logger = logging.getLogger(__name__)
class ImagemagickConverter(ImageConverter):
conversion_rules = [
('image/svg+xml', 'image/png'),
('image/gif', 'image/png'),
('application/pdf', 'image/png'),
]
def is_available(self) -> bool:
"""Confirms the converter is available or not."""
try:
args = [self.config.image_converter, '-version']
logger.debug('Invoking %r ...', args)
subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
return True
except OSError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
return False
except CalledProcessError as exc:
logger.warning(__('convert exited with error:\n'
'[stderr]\n%r\n[stdout]\n%r'),
exc.stderr, exc.stdout)
return False
def convert(self, _from: str, _to: str) -> bool:
"""Converts the image to expected one."""
try:
# append an index 0 to source filename to pick up the first frame
# (or first page) of image (ex. Animation GIF, PDF)
_from += '[0]'
args = ([self.config.image_converter] +
self.config.image_converter_args +
[_from, _to])
logger.debug('Invoking %r ...', args)
subprocess.run(args, stdout=PIPE, stderr=PIPE, check=True)
return True
except OSError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
return False
except CalledProcessError as exc:
raise ExtensionError(__('convert exited with error:\n'
'[stderr]\n%r\n[stdout]\n%r') %
(exc.stderr, exc.stdout))
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_post_transform(ImagemagickConverter)
app.add_config_value('image_converter', 'convert', 'env')
app.add_config_value('image_converter_args', [], 'env')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 34.97619
| 77
| 0.578285
|
ec01f6c45dc99423f3791eb799351c0ad33f117e
| 2,565
|
py
|
Python
|
scripts/build.py
|
Shagroth/tutorial
|
96cc36b2b66a5003c4b745b45fa6578b70c7993c
|
[
"Apache-2.0"
] | null | null | null |
scripts/build.py
|
Shagroth/tutorial
|
96cc36b2b66a5003c4b745b45fa6578b70c7993c
|
[
"Apache-2.0"
] | 4
|
2021-09-02T19:57:51.000Z
|
2022-02-27T11:41:09.000Z
|
scripts/build.py
|
Shagroth/tutorial
|
96cc36b2b66a5003c4b745b45fa6578b70c7993c
|
[
"Apache-2.0"
] | null | null | null |
import functools
import json
import os
import re
import semver
import subprocess as sp
BASE_URL_ENV = 'TOUR_BASE_URL'
def exec(*argv):
print("$>"," ".join(argv))
res = sp.run(argv, stdout=sp.PIPE, stderr=sp.PIPE)
if not res.returncode == 0:
print('Error running', argv)
print('StdOut:\n', res.stdout)
print('StdErr:\n', res.stderr)
raise Exception('Failed to exec ' + " ".join(argv))
return res
def runHugo(outSuffix=""):
baseUrl = "https://tour.dgraph.io/"
if BASE_URL_ENV in os.environ:
baseUrl = os.environ[BASE_URL_ENV]
if baseUrl[-1] != '/':
baseUrl += '/'
baseUrl += outSuffix
return exec(
"hugo",
"--destination=public/" + outSuffix,
"--baseURL",
baseUrl,
"--config",
"config.toml,releases.json",
)
def getReleases():
gitBranches = exec("git", "branch")
branches = gitBranches.stdout.decode('utf8')
branches = branches.split('\n')
res = []
for b in branches:
match = re.compile(r"[ *]+dgraph-([0-9.]+)").match(b)
if match:
res.append(match.group(1))
print('Found release versions', res)
res.sort(key=functools.cmp_to_key(semver.compare), reverse=True)
res = ["master"] + res
print('Order on the webpage: ', res)
return res
def buildBranch(branch, dest, jsonData):
print("Building", branch, "to public/" + dest)
res = exec("git", "checkout", branch)
with open('releases.json', 'w') as f:
f.write(json.dumps(jsonData))
runHugo(dest)
def buildAll(releases):
latestRelease = releases[1]
print('Latest Release (recommended to users): ', latestRelease)
def jsonFor(version, latestRelease, releases):
return {
"params": {
"latestRelease": latestRelease,
"tourReleases": releases,
"thisRelease": version,
},
}
buildBranch(
"dgraph-" + latestRelease,
"",
jsonFor(latestRelease, latestRelease, releases))
for r in releases:
path = r if r == "master" else "dgraph-" + r
buildBranch(path, path, jsonFor(r, latestRelease, releases))
def main():
releases = getReleases()
exec("rm", "-rf", "public")
exec("mkdir", "public")
buildAll(releases)
exec("git", "checkout", "master")
exec("rm", "-rf", "published")
exec("mv", "public", "published")
exec("git", "add", "published")
exec("git", "commit", "-m", "Hugo rebuild all branches")
main()
| 24.663462
| 68
| 0.578947
|
f341c44f834f5e8bbeea2fba0ed1107ddaf5c7cf
| 41,317
|
py
|
Python
|
test/run_test.py
|
mrTsjolder/pytorch
|
778f9eab6c036964d6dbca06549fb138f1e21c67
|
[
"Intel"
] | 1
|
2022-01-19T15:28:17.000Z
|
2022-01-19T15:28:17.000Z
|
test/run_test.py
|
mrTsjolder/pytorch
|
778f9eab6c036964d6dbca06549fb138f1e21c67
|
[
"Intel"
] | null | null | null |
test/run_test.py
|
mrTsjolder/pytorch
|
778f9eab6c036964d6dbca06549fb138f1e21c67
|
[
"Intel"
] | null | null | null |
#!/usr/bin/env python
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'test_jit_py3',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs'
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List[Report]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List[Report] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
| 37.73242
| 121
| 0.662367
|
1c88b59ee3f98e230f9c75c3fcde53b039199721
| 1,536
|
py
|
Python
|
tests/image/test_image.py
|
rajatscibi/chitra
|
1543805f1401c571e516e47ab1c8a83b93dd657c
|
[
"Apache-2.0"
] | 158
|
2020-01-27T05:35:35.000Z
|
2021-12-24T16:15:23.000Z
|
tests/image/test_image.py
|
rajatscibi/chitra
|
1543805f1401c571e516e47ab1c8a83b93dd657c
|
[
"Apache-2.0"
] | 112
|
2020-02-15T15:12:38.000Z
|
2021-12-22T13:18:14.000Z
|
tests/image/test_image.py
|
rajatscibi/chitra
|
1543805f1401c571e516e47ab1c8a83b93dd657c
|
[
"Apache-2.0"
] | 33
|
2020-09-25T13:49:17.000Z
|
2021-12-01T13:05:23.000Z
|
from unittest.mock import MagicMock
import numpy as np
from PIL import Image
from chitra.image.image import Chitra, _cache_image
url = (
"https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/logo.png"
)
image = Chitra(url, cache=True)
def test__load_image():
url = "https://raw.githubusercontent.com/aniketmaurya/chitra/master/docs/assets/logo.png"
image = Chitra(url, cache=True)
assert isinstance(image.image, Image.Image)
def test_numpy():
assert isinstance(image.numpy(), np.ndarray)
def test_to_tensor():
assert True
def test_shape():
assert len(image.shape) == 3
def test_size():
assert len(image.size) == 2
def test_imshow():
assert True
def test_draw_boxes():
assert True
def test_resize_image_with_bbox():
box = [10, 20, 30, 40]
label = ["chitra"]
dummy = np.random.randn(100, 100, 3).astype("uint8")
image = Chitra(dummy, bboxes=box, labels=label)
image.resize_image_with_bbox((10, 10))
rescaled_bounding_box = image.bboxes[0]
assert np.isclose(rescaled_bounding_box.x1, 1)
assert np.isclose(rescaled_bounding_box.y1, 2)
assert np.isclose(rescaled_bounding_box.x2, 3)
assert np.isclose(rescaled_bounding_box.y2, 4)
def test__cache_image():
image = MagicMock()
image.save = MagicMock()
_cache_image(image, "test_image.jpg")
image.save.assert_called_once()
def test_image_resize():
image = Chitra(url, cache=True)
image.resize((224, 224))
assert image.shape[:2] == (224, 224)
| 22.26087
| 93
| 0.703125
|
09128e50839791f582a05c82dd558d9c4759e33e
| 4,445
|
py
|
Python
|
avalanche/models/helper_method.py
|
AlexTo/avalanche
|
e5562e3f3e1aaf7e9623698f7fd493ff97b4bf64
|
[
"MIT"
] | null | null | null |
avalanche/models/helper_method.py
|
AlexTo/avalanche
|
e5562e3f3e1aaf7e9623698f7fd493ff97b4bf64
|
[
"MIT"
] | null | null | null |
avalanche/models/helper_method.py
|
AlexTo/avalanche
|
e5562e3f3e1aaf7e9623698f7fd493ff97b4bf64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import torch
import torch.nn as nn
from avalanche.models.dynamic_modules import (
MultiTaskModule,
MultiHeadClassifier,
)
class MultiTaskDecorator(MultiTaskModule):
"""
Encapsulates an existing nn.Module to make it subclass MultiTaskModule,
the user should still be able to interact with the encapsulated module
as if it was the module itself.
The only things that change are the following, the classifier from the
given model will be replaced by a MultiHeadClassifier, and the forward()
implementation will be overwritten by one that accepts task labels.
The encapsulated module will then be automatically extended to
fit new classes during calls to model.adaptation()
"""
def __init__(self, model: nn.Module, classifier_name: str):
"""
:param model: pytorch nn.Module that does not support multitask
:param classifier_name: attribute name of the existing classification
layer inside the module
"""
self.__dict__["_initialized"] = False
super().__init__()
self.model = model
self.classifier_name = classifier_name
old_classifier = getattr(model, classifier_name)
if isinstance(old_classifier, nn.Linear):
in_size = old_classifier.in_features
out_size = old_classifier.out_features
old_params = [
torch.clone(p.data) for p in old_classifier.parameters()
]
# Replace old classifier by empty block
setattr(self.model, classifier_name, nn.Sequential())
elif isinstance(old_classifier, nn.Sequential):
in_size = old_classifier[-1].in_features
out_size = old_classifier[-1].out_features
old_params = [
torch.clone(p.data) for p in old_classifier[-1].parameters()
]
del old_classifier[-1]
else:
raise NotImplementedError(
f"Cannot handle the following type \
of classification layer {type(old_classifier)}"
)
# Set new classifier and initialize to previous param values
setattr(self, classifier_name, MultiHeadClassifier(in_size, out_size))
for param, param_old in zip(
getattr(self, classifier_name).parameters(), old_params
):
param.data = param_old
self.max_class_label = max(self.max_class_label, out_size)
self._initialized = True
def forward_single_task(self, x: torch.Tensor, task_label: int):
out = self.model(x)
return getattr(self, self.classifier_name)(
out.view(out.size(0), -1), task_labels=task_label
)
def __getattr__(self, name):
# Override pytorch impl from nn.Module
# Its a bit particular since pytorch nn.Module does not
# keep some attributes in a classical manner in self.__dict__
# rather it puts them into _parameters, _buffers and
# _modules attributes. We have to add these lines to avoid recursion
if name == "model":
return self.__dict__["_modules"]["model"]
if name == self.classifier_name:
return self.__dict__["_modules"][self.classifier_name]
# If its a different attribute, return the one from the model
return getattr(self.model, name)
def __setattr__(self, name, value):
# During initialization, use pytorch routine
if not self.__dict__["_initialized"] or name in self.__dict__:
super().__setattr__(name, value)
else:
return setattr(self.model, name, value)
def as_multitask(model: nn.Module, classifier_name: str) -> MultiTaskModule:
"""
Wraps around a model to make it a multitask model
:param model: model to be converted into MultiTaskModule
:param classifier_name: the name of the attribute containing
the classification layer (nn.Linear). It can also
be an instance of nn.Sequential containing multiple
layers as long as the classification layer is the
last layer.
:return the decorated model, now subclassing MultiTaskModule, and
accepting task_labels as forward() method argument
"""
return MultiTaskDecorator(model, classifier_name)
__all__ = ["as_multitask"]
| 38.652174
| 79
| 0.648819
|
4ebb4bf3ce7ace152263d6338f41f09d97e0ab6b
| 1,152
|
py
|
Python
|
test.py
|
looput/Rotated_ROIAlign
|
edf2bcfbd8580b90dcaae3cb92546b719c995a35
|
[
"MIT"
] | 14
|
2019-10-18T11:50:08.000Z
|
2022-03-22T12:20:07.000Z
|
test.py
|
looput/Rotated_ROIAlign
|
edf2bcfbd8580b90dcaae3cb92546b719c995a35
|
[
"MIT"
] | 3
|
2020-06-11T11:09:03.000Z
|
2022-02-25T12:23:49.000Z
|
test.py
|
looput/Rotated_ROIAlign
|
edf2bcfbd8580b90dcaae3cb92546b719c995a35
|
[
"MIT"
] | 2
|
2019-11-18T23:46:28.000Z
|
2021-09-28T09:12:32.000Z
|
import torch
import numpy as np
from matplotlib import pyplot as plt
import cv2
import torchvision.utils as vutils
from roi_align_rotate import ROIAlignRotated
pooler_rotated=ROIAlignRotated((32,192), spatial_scale = (1.), sampling_ratio = 0)
image=cv2.imread('IMG_0451.jpg')
with open('IMG_0451.gt') as f:
lines=f.readlines()
rectes=[]
for line in lines:
line=line.split(' ',7)[2:]
rectes.append([float(num.strip('\n')) for num in line])
device=torch.device('cuda')
rectes=torch.from_numpy(np.array(rectes)).to(device=device,dtype=torch.float32)
rectes[:,:2]=rectes[:,:2]+rectes[:,2:4]/2
rectes[:,-1]=-1*rectes[:,-1]*180/np.pi
image_tensor=torch.from_numpy(image[np.newaxis,:,:,:]).to(device=device)
ids=torch.full((rectes.shape[0], 1), 0, dtype=torch.float32, device=device)
rois=torch.cat([ids,rectes],dim=1)
image_tensor=image_tensor.transpose(1,3).transpose(2,3).to(torch.float32)
image_roi_bbox=pooler_rotated(image_tensor,rois)
image_show=vutils.make_grid(image_roi_bbox[:,...], normalize=True, scale_each=True,nrow=2)
image_show=image_show.detach().permute(1,2,0).cpu().numpy()
plt.imshow(image_show)
plt.show()
| 28.8
| 90
| 0.733507
|
6325d47c4ccddd3c677dc4be2bc4c0b6a0539d9c
| 626
|
py
|
Python
|
src/003_aggregation/06/answer_a.py
|
negi524/awesomebook
|
42eedc88fb43276044debc604044badb0e8bcaf0
|
[
"BSD-3-Clause"
] | null | null | null |
src/003_aggregation/06/answer_a.py
|
negi524/awesomebook
|
42eedc88fb43276044debc604044badb0e8bcaf0
|
[
"BSD-3-Clause"
] | null | null | null |
src/003_aggregation/06/answer_a.py
|
negi524/awesomebook
|
42eedc88fb43276044debc604044badb0e8bcaf0
|
[
"BSD-3-Clause"
] | null | null | null |
from pandas._libs.tslibs.timestamps import Timestamp
from preprocess.load_data.data_loader import load_hotel_reserve
import pandas as pd
def main():
"""順位の算出
顧客ごとに予約日時の順位を古い順につける
同じ予約日時の場合はデータ行の読み込み順に小さな順位をつける
"""
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
# 予約日時のデータ型をDateTime型に変換する
reserve_tb['reserve_datetime'] = pd.to_datetime(reserve_tb['reserve_datetime'])
reserve_tb['log_no'] = reserve_tb.groupby('customer_id')['reserve_datetime'] \
.rank(ascending=True, method='first') # 昇順、重複値は登録順
print(reserve_tb.head(20))
if __name__ == '__main__':
main()
| 26.083333
| 83
| 0.731629
|
f45e09d414ca816cdb039de5d2201b602d1b912b
| 65,345
|
py
|
Python
|
pysnmp/CISCO-ATM-CONN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-ATM-CONN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-ATM-CONN-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-ATM-CONN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-ATM-CONN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:33:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint")
atmVplVpi, AtmTrafficDescrParamIndex, atmVclVpi, atmVclVci = mibBuilder.importSymbols("ATM-MIB", "atmVplVpi", "AtmTrafficDescrParamIndex", "atmVclVpi", "atmVclVci")
NsapAtmAddr, = mibBuilder.importSymbols("CISCO-ATM-IF-MIB", "NsapAtmAddr")
LsPerVcqThresholdGroup, = mibBuilder.importSymbols("CISCO-ATM-RM-MIB", "LsPerVcqThresholdGroup")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
ifIndex, InterfaceIndexOrZero = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndexOrZero")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
ObjectIdentity, Bits, Counter64, IpAddress, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Gauge32, ModuleIdentity, Counter32, TimeTicks, MibIdentifier, iso, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "Counter64", "IpAddress", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Gauge32", "ModuleIdentity", "Counter32", "TimeTicks", "MibIdentifier", "iso", "NotificationType")
TimeStamp, RowStatus, DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "RowStatus", "DisplayString", "TruthValue", "TextualConvention")
ciscoAtmConnMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 13))
ciscoAtmConnMIB.setRevisions(('2002-07-12 00:00', '2001-10-30 00:00', '2001-10-10 00:00', '2001-08-06 00:00', '2001-01-29 00:00', '1998-10-02 00:00', '1997-05-26 00:00', '1996-11-01 00:00', '1998-07-26 00:00',))
if mibBuilder.loadTexts: ciscoAtmConnMIB.setLastUpdated('200207120000Z')
if mibBuilder.loadTexts: ciscoAtmConnMIB.setOrganization('Cisco Systems, Inc.')
ciscoAtmConnMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1))
class CastType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("pointToPoint", 1), ("pointToMultiPointRoot", 2), ("pointToMultiPointLeaf", 3))
class ConfigType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("permanent", 2), ("switch", 3), ("soft", 4), ("softPassive", 5))
class SpanType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("unknown", 1), ("transit", 2), ("terminate", 3), ("both", 4))
class EnableStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enable", 1), ("disable", 2))
class UpcStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("passing", 1), ("tagging", 2), ("dropping", 3), ("localShaping", 4))
class ConnState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("setup", 1), ("release", 2), ("notInstalled", 3), ("down", 4), ("up", 5))
class Location(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("unknown", 1), ("calling", 2), ("called", 3))
class Direction(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("unknown", 1), ("p2pCallingSide", 2), ("p2pCalledSide", 3), ("p2mpRoot", 4), ("p2mpLeaf", 5))
class SnoopDirType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("transmit", 1), ("receive", 2))
ciscoAtmVpl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1))
ciscoAtmVplTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1), )
if mibBuilder.loadTexts: ciscoAtmVplTable.setStatus('current')
ciscoAtmVplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ATM-MIB", "atmVplVpi"))
if mibBuilder.loadTexts: ciscoAtmVplEntry.setStatus('current')
ciscoAtmVplCastType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 1), CastType().clone('pointToPoint')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplCastType.setStatus('current')
ciscoAtmVplSpanType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 2), SpanType().clone('transit')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplSpanType.setStatus('current')
ciscoAtmVplConfigType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 3), ConfigType().clone('permanent')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplConfigType.setStatus('current')
ciscoAtmVplRxUpcMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 4), UpcStatus().clone('passing')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplRxUpcMode.setStatus('current')
ciscoAtmVplConnState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 5), ConnState()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplConnState.setStatus('current')
ciscoAtmVplOamLoopbkTxInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 60)).clone(5)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplOamLoopbkTxInterval.setStatus('current')
ciscoAtmVplOamSegmentLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 7), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplOamSegmentLoopback.setStatus('current')
ciscoAtmVplOamEndLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 8), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplOamEndLoopback.setStatus('current')
ciscoAtmVplOamAisEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 9), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplOamAisEnable.setStatus('current')
ciscoAtmVplOamRdiEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 10), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplOamRdiEnable.setStatus('current')
ciscoAtmVplInstallTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 11), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplInstallTime.setStatus('current')
ciscoAtmVplInCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplInCells.setStatus('current')
ciscoAtmVplOutCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplOutCells.setStatus('current')
ciscoAtmVplCrossIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 14), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplCrossIfIndex.setStatus('current')
ciscoAtmVplCrossVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplCrossVpi.setStatus('current')
ciscoAtmVplNextLeafIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 16), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplNextLeafIfIndex.setStatus('current')
ciscoAtmVplNextLeafVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplNextLeafVpi.setStatus('current')
ciscoAtmVplRemoteAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 18), NsapAtmAddr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplRemoteAddr.setStatus('current')
ciscoAtmVplRemoteVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplRemoteVpi.setStatus('current')
ciscoAtmVplLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 20), Location()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplLocation.setStatus('current')
ciscoAtmVplSlowRetryIntv = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplSlowRetryIntv.setStatus('current')
ciscoAtmVplNumAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplNumAttempts.setStatus('current')
ciscoAtmVplLastReleaseCause = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplLastReleaseCause.setStatus('current')
ciscoAtmVplLogicalPortDef = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notLogicalIf", 1), ("isLogicalIf", 2))).clone('notLogicalIf')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplLogicalPortDef.setStatus('current')
ciscoAtmVplLogicalPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 25), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplLogicalPortIndex.setStatus('current')
ciscoAtmVplUpcViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplUpcViolations.setStatus('current')
ciscoAtmVplEpdTpdCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplEpdTpdCellDrops.setStatus('obsolete')
ciscoAtmVplEpdTpdPacketDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplEpdTpdPacketDrops.setStatus('obsolete')
ciscoAtmVplEpdTpdPacketsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplEpdTpdPacketsIn.setStatus('obsolete')
ciscoAtmVplClp1Drops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplClp1Drops.setStatus('obsolete')
ciscoAtmVplDefaultRxUpcTolerance = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplDefaultRxUpcTolerance.setStatus('current')
ciscoAtmVplDefaultRxUpcVbrCdvt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 32), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplDefaultRxUpcVbrCdvt.setStatus('current')
ciscoAtmVplLsPerVcqWrrWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 33), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplLsPerVcqWrrWeight.setStatus('current')
ciscoAtmVplLsPerVcqTunnelIsShaped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 34), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplLsPerVcqTunnelIsShaped.setStatus('current')
ciscoAtmVplLsPerVcqXmtQueuedCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 35), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplLsPerVcqXmtQueuedCells.setStatus('current')
ciscoAtmVplLsPerVcQThreshGrp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 36), LsPerVcqThresholdGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplLsPerVcQThreshGrp.setStatus('current')
ciscoAtmVplInClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 37), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplInClp0Cells.setStatus('current')
ciscoAtmVplInClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 38), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplInClp1Cells.setStatus('current')
ciscoAtmVplOutClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplOutClp0Cells.setStatus('current')
ciscoAtmVplOutClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 40), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplOutClp1Cells.setStatus('current')
ciscoAtmVplCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 41), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplCellDrops.setStatus('current')
ciscoAtmVplClp0VcqFullCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 43), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplClp0VcqFullCellDrops.setStatus('current')
ciscoAtmVplVcqClpThreshCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 44), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplVcqClpThreshCellDrops.setStatus('current')
ciscoAtmVplLsPerVcqTunnelIsHierarchical = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 45), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplLsPerVcqTunnelIsHierarchical.setStatus('current')
ciscoAtmVplRxNegTraffDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 46), AtmTrafficDescrParamIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplRxNegTraffDescrIndex.setStatus('current')
ciscoAtmVplTxNegTraffDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 47), AtmTrafficDescrParamIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplTxNegTraffDescrIndex.setStatus('current')
ciscoAtmVplSwFabOutCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplSwFabOutCells.setStatus('current')
ciscoAtmVplSwFabOutClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplSwFabOutClp0Cells.setStatus('current')
ciscoAtmVplSwFabOutClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 50), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVplSwFabOutClp1Cells.setStatus('current')
ciscoAtmVplConnName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 51), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplConnName.setStatus('current')
ciscoAtmVplConnType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 1, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("endToEnd", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVplConnType.setStatus('current')
ciscoAtmVcl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2))
ciscoAtmVclTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1), )
if mibBuilder.loadTexts: ciscoAtmVclTable.setStatus('current')
ciscoAtmVclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ATM-MIB", "atmVclVpi"), (0, "ATM-MIB", "atmVclVci"))
if mibBuilder.loadTexts: ciscoAtmVclEntry.setStatus('current')
ciscoAtmVclCastType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 1), CastType().clone('pointToPoint')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclCastType.setStatus('current')
ciscoAtmVclSpanType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 2), SpanType().clone('transit')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclSpanType.setStatus('current')
ciscoAtmVclConfigType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 3), ConfigType().clone('permanent')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclConfigType.setStatus('current')
ciscoAtmVclRxUpcMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 4), UpcStatus().clone('passing')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclRxUpcMode.setStatus('current')
ciscoAtmVclEpdEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2), ("useTrafficDescr", 3))).clone('useTrafficDescr')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclEpdEnable.setStatus('current')
ciscoAtmVclConnState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 6), ConnState()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclConnState.setStatus('current')
ciscoAtmVclOamLoopbkTxInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 60)).clone(5)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclOamLoopbkTxInterval.setStatus('current')
ciscoAtmVclOamSegmentLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 8), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclOamSegmentLoopback.setStatus('current')
ciscoAtmVclOamEndLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 9), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclOamEndLoopback.setStatus('current')
ciscoAtmVclOamAisEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 10), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclOamAisEnable.setStatus('current')
ciscoAtmVclOamRdiEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 11), EnableStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclOamRdiEnable.setStatus('current')
ciscoAtmVclInstallTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 12), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclInstallTime.setStatus('current')
ciscoAtmVclInCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclInCells.setStatus('current')
ciscoAtmVclOutCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclOutCells.setStatus('current')
ciscoAtmVclCrossIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 15), InterfaceIndexOrZero()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclCrossIfIndex.setStatus('current')
ciscoAtmVclCrossVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclCrossVpi.setStatus('current')
ciscoAtmVclCrossVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclCrossVci.setStatus('current')
ciscoAtmVclNextLeafIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 18), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclNextLeafIfIndex.setStatus('current')
ciscoAtmVclNextLeafVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclNextLeafVpi.setStatus('current')
ciscoAtmVclNextLeafVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclNextLeafVci.setStatus('current')
ciscoAtmVclAalEncapFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("aal5Snap", 2), ("aal5Nlpid", 3), ("aal5FrNlpid", 4), ("aal5Mux", 5), ("aal34Smds", 6), ("aalQsAal", 7), ("aal5Ilmi", 8), ("aal5Lane", 9), ("aal5Pnni", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclAalEncapFlag.setStatus('current')
ciscoAtmVclAalEncapProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("other", 1), ("ip", 2), ("xns", 3), ("appletalk", 4), ("clns", 5), ("decnet", 6), ("novell", 7), ("apollo", 8), ("vines", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclAalEncapProtocol.setStatus('current')
ciscoAtmVclAalUserVcType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("other", 1), ("boxConfigure", 2), ("busForward", 3), ("busSend", 4), ("clientConfigure", 5), ("clientData", 6), ("clientDirect", 7), ("clientDistribute", 8), ("clientForward", 9), ("clientSend", 10), ("configure", 11), ("serverConfigure", 12), ("serverDirect", 13), ("serverDistribute", 14)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclAalUserVcType.setStatus('current')
ciscoAtmVclAtmInArpInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60))).setUnits('minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclAtmInArpInterval.setStatus('current')
ciscoAtmVclRemoteAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 25), NsapAtmAddr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclRemoteAddr.setStatus('current')
ciscoAtmVclRemoteVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclRemoteVpi.setStatus('current')
ciscoAtmVclRemoteVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclRemoteVci.setStatus('current')
ciscoAtmVclLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 28), Location()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclLocation.setStatus('current')
ciscoAtmVclSlowRetryIntv = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclSlowRetryIntv.setStatus('current')
ciscoAtmVclNumAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclNumAttempts.setStatus('current')
ciscoAtmVclLastReleaseCause = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 31), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclLastReleaseCause.setStatus('current')
ciscoAtmVclUpcViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclUpcViolations.setStatus('current')
ciscoAtmVclEpdTpdCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclEpdTpdCellDrops.setStatus('obsolete')
ciscoAtmVclEpdTpdPacketDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclEpdTpdPacketDrops.setStatus('current')
ciscoAtmVclEpdTpdPacketsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclEpdTpdPacketsIn.setStatus('current')
ciscoAtmVclClp1Drops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 37), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclClp1Drops.setStatus('obsolete')
ciscoAtmVclDefaultRxUpcTolerance = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 38), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclDefaultRxUpcTolerance.setStatus('current')
ciscoAtmVclDefaultRxUpcVbrCdvt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclDefaultRxUpcVbrCdvt.setStatus('current')
ciscoAtmVclLsPerVcqWrrWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 40), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclLsPerVcqWrrWeight.setStatus('current')
ciscoAtmVclLsPerVcqXmtQueuedCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 41), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclLsPerVcqXmtQueuedCells.setStatus('current')
ciscoAtmVclLsPerVcQThreshGrp = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 42), LsPerVcqThresholdGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclLsPerVcQThreshGrp.setStatus('current')
ciscoAtmVclInClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 43), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclInClp0Cells.setStatus('current')
ciscoAtmVclInClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 44), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclInClp1Cells.setStatus('current')
ciscoAtmVclOutClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 45), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclOutClp0Cells.setStatus('current')
ciscoAtmVclOutClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 46), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclOutClp1Cells.setStatus('current')
ciscoAtmVclCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclCellDrops.setStatus('current')
ciscoAtmVclClp0VcqFullCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclClp0VcqFullCellDrops.setStatus('current')
ciscoAtmVclVcqClpThreshCellDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclVcqClpThreshCellDrops.setStatus('current')
ciscoAtmVclRxNegTraffDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 50), AtmTrafficDescrParamIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclRxNegTraffDescrIndex.setStatus('current')
ciscoAtmVclTxNegTraffDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 51), AtmTrafficDescrParamIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclTxNegTraffDescrIndex.setStatus('current')
ciscoAtmVclSwFabOutCells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 52), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclSwFabOutCells.setStatus('current')
ciscoAtmVclSwFabOutClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 53), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclSwFabOutClp0Cells.setStatus('current')
ciscoAtmVclSwFabOutClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 54), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmVclSwFabOutClp1Cells.setStatus('current')
ciscoAtmVclConnName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 55), SnmpAdminString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclConnName.setStatus('current')
ciscoAtmVclConnType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 2, 1, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("endToEnd", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmVclConnType.setStatus('current')
ciscoAtmSvp = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3))
ciscoAtmSvpAddrTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3, 1), )
if mibBuilder.loadTexts: ciscoAtmSvpAddrTable.setStatus('current')
ciscoAtmSvpAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-ATM-CONN-MIB", "ciscoAtmSvpAddr"), (0, "CISCO-ATM-CONN-MIB", "ciscoAtmSvpVpi"))
if mibBuilder.loadTexts: ciscoAtmSvpAddrEntry.setStatus('current')
ciscoAtmSvpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(20, 20)).setFixedLength(20))
if mibBuilder.loadTexts: ciscoAtmSvpAddr.setStatus('current')
ciscoAtmSvpVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095)))
if mibBuilder.loadTexts: ciscoAtmSvpVpi.setStatus('current')
ciscoAtmSvpDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 3, 1, 1, 3), Direction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmSvpDirection.setStatus('current')
ciscoAtmSvc = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4))
ciscoAtmSvcAddrTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1), )
if mibBuilder.loadTexts: ciscoAtmSvcAddrTable.setStatus('current')
ciscoAtmSvcAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-ATM-CONN-MIB", "ciscoAtmSvcAddr"), (0, "CISCO-ATM-CONN-MIB", "ciscoAtmSvcVpi"), (0, "CISCO-ATM-CONN-MIB", "ciscoAtmSvcVci"))
if mibBuilder.loadTexts: ciscoAtmSvcAddrEntry.setStatus('current')
ciscoAtmSvcAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(20, 20)).setFixedLength(20))
if mibBuilder.loadTexts: ciscoAtmSvcAddr.setStatus('current')
ciscoAtmSvcVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095)))
if mibBuilder.loadTexts: ciscoAtmSvcVpi.setStatus('current')
ciscoAtmSvcVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ciscoAtmSvcVci.setStatus('current')
ciscoAtmSvcDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 1, 1, 4), Direction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmSvcDirection.setStatus('current')
ciscoAtmSnoopVc = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5))
ciscoAtmSnoopVcTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1), )
if mibBuilder.loadTexts: ciscoAtmSnoopVcTable.setStatus('current')
ciscoAtmSnoopVcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ATM-MIB", "atmVclVpi"), (0, "ATM-MIB", "atmVclVci"))
if mibBuilder.loadTexts: ciscoAtmSnoopVcEntry.setStatus('current')
ciscoAtmSnoopVcSnoopedIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 1), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVcSnoopedIfIndex.setStatus('current')
ciscoAtmSnoopVcSnoopedVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVcSnoopedVpi.setStatus('current')
ciscoAtmSnoopVcSnoopedVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVcSnoopedVci.setStatus('current')
ciscoAtmSnoopVcDir = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 4), SnoopDirType().clone('receive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVcDir.setStatus('current')
ciscoAtmSnoopVcState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 5), ConnState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmSnoopVcState.setStatus('current')
ciscoAtmSnoopVcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 5, 1, 1, 6), RowStatus().clone('active')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVcRowStatus.setStatus('current')
ciscoAtmSnoopVp = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6))
ciscoAtmSnoopVpTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1), )
if mibBuilder.loadTexts: ciscoAtmSnoopVpTable.setStatus('current')
ciscoAtmSnoopVpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "ATM-MIB", "atmVplVpi"))
if mibBuilder.loadTexts: ciscoAtmSnoopVpEntry.setStatus('current')
ciscoAtmSnoopVpSnoopedIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1, 1), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVpSnoopedIfIndex.setStatus('current')
ciscoAtmSnoopVpSnoopedVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVpSnoopedVpi.setStatus('current')
ciscoAtmSnoopVpDir = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1, 3), SnoopDirType().clone('receive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVpDir.setStatus('current')
ciscoAtmSnoopVpState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1, 4), ConnState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ciscoAtmSnoopVpState.setStatus('current')
ciscoAtmSnoopVpRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 6, 1, 1, 5), RowStatus().clone('active')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ciscoAtmSnoopVpRowStatus.setStatus('current')
ciscoAtmSvcFrameDiscardUsesAal5Ie = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 13, 1, 4, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ciscoAtmSvcFrameDiscardUsesAal5Ie.setStatus('current')
ciscoAtmConnMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 3))
ciscoAtmConnMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1))
ciscoAtmConnMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2))
ciscoAtmConnMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 1)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance = ciscoAtmConnMIBCompliance.setStatus('obsolete')
ciscoAtmConnMIBCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 2)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance2 = ciscoAtmConnMIBCompliance2.setStatus('obsolete')
ciscoAtmConnMIBCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 3)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance3 = ciscoAtmConnMIBCompliance3.setStatus('obsolete')
ciscoAtmConnMIBCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 4)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup2"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance4 = ciscoAtmConnMIBCompliance4.setStatus('obsolete')
ciscoAtmConnMIBCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 5)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup2"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBNegTraffGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance5 = ciscoAtmConnMIBCompliance5.setStatus('deprecated')
ciscoAtmConnMIBCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 6)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup2"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup1"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBNegTraffGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance6 = ciscoAtmConnMIBCompliance6.setStatus('deprecated')
ciscoAtmConnMIBCompliance7 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 7)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup1"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup2"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup1"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBNegTraffGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance7 = ciscoAtmConnMIBCompliance7.setStatus('deprecated')
ciscoAtmConnMIBCompliance8 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 1, 8)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup4"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBGroup3"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsPerVcqGroup2"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBlsFcPfqGroup1"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnMIBNegTraffGroup"), ("CISCO-ATM-CONN-MIB", "ciscoAtmConnNmsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBCompliance8 = ciscoAtmConnMIBCompliance8.setStatus('current')
ciscoAtmConnMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 1)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortDef"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapFlag"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapProtocol"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalUserVcType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAtmInArpInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvpDirection"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvcDirection"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBGroup = ciscoAtmConnMIBGroup.setStatus('deprecated')
ciscoAtmConnMIBGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 2)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplEpdTpdCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplEpdTpdPacketDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplEpdTpdPacketsIn"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplClp1Drops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplDefaultRxUpcTolerance"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketsIn"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclClp1Drops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclDefaultRxUpcTolerance"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvcFrameDiscardUsesAal5Ie"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBGroup2 = ciscoAtmConnMIBGroup2.setStatus('obsolete')
ciscoAtmConnMIBGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 3)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplDefaultRxUpcTolerance"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclDefaultRxUpcTolerance"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplDefaultRxUpcVbrCdvt"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclDefaultRxUpcVbrCdvt"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvcFrameDiscardUsesAal5Ie"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBGroup3 = ciscoAtmConnMIBGroup3.setStatus('current')
ciscoAtmConnMIBlsPerVcqGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 4)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqWrrWeight"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqTunnelIsShaped"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqXmtQueuedCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcQThreshGrp"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcqWrrWeight"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcqXmtQueuedCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcQThreshGrp"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcDir"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcRowStatus"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpSnoopedIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpSnoopedVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpDir"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBlsPerVcqGroup = ciscoAtmConnMIBlsPerVcqGroup.setStatus('obsolete')
ciscoAtmConnMIBlsFcPfqGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 5)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketsIn"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplClp0VcqFullCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplVcqClpThreshCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclClp0VcqFullCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclVcqClpThreshCellDrops"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBlsFcPfqGroup = ciscoAtmConnMIBlsFcPfqGroup.setStatus('deprecated')
ciscoAtmConnMIBlsPerVcqGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 6)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqWrrWeight"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqTunnelIsShaped"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqTunnelIsHierarchical"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcqXmtQueuedCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLsPerVcQThreshGrp"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcqWrrWeight"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcqXmtQueuedCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLsPerVcQThreshGrp"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcSnoopedVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcDir"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVcRowStatus"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpSnoopedIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpSnoopedVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpDir"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSnoopVpRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBlsPerVcqGroup2 = ciscoAtmConnMIBlsPerVcqGroup2.setStatus('current')
ciscoAtmConnMIBNegTraffGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 7)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplRxNegTraffDescrIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplTxNegTraffDescrIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRxNegTraffDescrIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclTxNegTraffDescrIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBNegTraffGroup = ciscoAtmConnMIBNegTraffGroup.setStatus('current')
ciscoAtmConnMIBlsFcPfqGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 8)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclUpcViolations"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdTpdPacketsIn"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplClp0VcqFullCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplVcqClpThreshCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSwFabOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSwFabOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSwFabOutClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutClp1Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclClp0VcqFullCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclVcqClpThreshCellDrops"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSwFabOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSwFabOutClp0Cells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSwFabOutClp1Cells"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBlsFcPfqGroup1 = ciscoAtmConnMIBlsFcPfqGroup1.setStatus('current')
ciscoAtmConnMIBGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 9)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortDef"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnName"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapFlag"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapProtocol"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalUserVcType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAtmInArpInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnName"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvpDirection"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvcDirection"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBGroup1 = ciscoAtmConnMIBGroup1.setStatus('deprecated')
ciscoAtmConnMIBGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 10)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortDef"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplLogicalPortIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCastType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSpanType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConfigType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRxUpcMode"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclEpdEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnState"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamLoopbkTxInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamSegmentLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamEndLoopback"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamAisEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOamRdiEnable"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInstallTime"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclInCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclOutCells"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclCrossVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafIfIndex"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNextLeafVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapFlag"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalEncapProtocol"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAalUserVcType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclAtmInArpInterval"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteAddr"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVpi"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclRemoteVci"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLocation"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclSlowRetryIntv"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclNumAttempts"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclLastReleaseCause"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvpDirection"), ("CISCO-ATM-CONN-MIB", "ciscoAtmSvcDirection"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnMIBGroup4 = ciscoAtmConnMIBGroup4.setStatus('current')
ciscoAtmConnNmsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 13, 3, 2, 11)).setObjects(("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnName"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVplConnType"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnName"), ("CISCO-ATM-CONN-MIB", "ciscoAtmVclConnType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoAtmConnNmsGroup = ciscoAtmConnNmsGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-ATM-CONN-MIB", ciscoAtmVplInClp1Cells=ciscoAtmVplInClp1Cells, ciscoAtmVplCellDrops=ciscoAtmVplCellDrops, ciscoAtmVclEpdTpdPacketDrops=ciscoAtmVclEpdTpdPacketDrops, ciscoAtmVclLsPerVcQThreshGrp=ciscoAtmVclLsPerVcQThreshGrp, ciscoAtmVclDefaultRxUpcVbrCdvt=ciscoAtmVclDefaultRxUpcVbrCdvt, ciscoAtmSnoopVpSnoopedVpi=ciscoAtmSnoopVpSnoopedVpi, ciscoAtmVplSwFabOutCells=ciscoAtmVplSwFabOutCells, ciscoAtmVclLastReleaseCause=ciscoAtmVclLastReleaseCause, ConnState=ConnState, ciscoAtmVclCastType=ciscoAtmVclCastType, ciscoAtmVclConnState=ciscoAtmVclConnState, ciscoAtmVclVcqClpThreshCellDrops=ciscoAtmVclVcqClpThreshCellDrops, ciscoAtmSvcFrameDiscardUsesAal5Ie=ciscoAtmSvcFrameDiscardUsesAal5Ie, ciscoAtmVplOutCells=ciscoAtmVplOutCells, ciscoAtmVplConnName=ciscoAtmVplConnName, ciscoAtmVclEpdEnable=ciscoAtmVclEpdEnable, ciscoAtmConnMIBCompliance7=ciscoAtmConnMIBCompliance7, ciscoAtmVplOamEndLoopback=ciscoAtmVplOamEndLoopback, ciscoAtmVplTxNegTraffDescrIndex=ciscoAtmVplTxNegTraffDescrIndex, ciscoAtmVplCrossIfIndex=ciscoAtmVplCrossIfIndex, ciscoAtmVclConnName=ciscoAtmVclConnName, ciscoAtmVclUpcViolations=ciscoAtmVclUpcViolations, ciscoAtmConnMIBGroup2=ciscoAtmConnMIBGroup2, ciscoAtmVclSpanType=ciscoAtmVclSpanType, ciscoAtmVplCastType=ciscoAtmVplCastType, ciscoAtmVclOutCells=ciscoAtmVclOutCells, ciscoAtmSnoopVpTable=ciscoAtmSnoopVpTable, ciscoAtmVclAalEncapFlag=ciscoAtmVclAalEncapFlag, ciscoAtmSnoopVcDir=ciscoAtmSnoopVcDir, ciscoAtmVclNumAttempts=ciscoAtmVclNumAttempts, ciscoAtmVplUpcViolations=ciscoAtmVplUpcViolations, ciscoAtmVplOutClp0Cells=ciscoAtmVplOutClp0Cells, ciscoAtmVplNextLeafVpi=ciscoAtmVplNextLeafVpi, ciscoAtmVclOamEndLoopback=ciscoAtmVclOamEndLoopback, ciscoAtmVclEpdTpdCellDrops=ciscoAtmVclEpdTpdCellDrops, ciscoAtmSvpDirection=ciscoAtmSvpDirection, ciscoAtmVplConfigType=ciscoAtmVplConfigType, ciscoAtmVclLsPerVcqWrrWeight=ciscoAtmVclLsPerVcqWrrWeight, ciscoAtmVplDefaultRxUpcTolerance=ciscoAtmVplDefaultRxUpcTolerance, ciscoAtmVclInstallTime=ciscoAtmVclInstallTime, ciscoAtmVplLocation=ciscoAtmVplLocation, ciscoAtmSnoopVcState=ciscoAtmSnoopVcState, ciscoAtmVclOutClp0Cells=ciscoAtmVclOutClp0Cells, ciscoAtmSvcAddrTable=ciscoAtmSvcAddrTable, ciscoAtmVclAalEncapProtocol=ciscoAtmVclAalEncapProtocol, ciscoAtmVplSpanType=ciscoAtmVplSpanType, ciscoAtmSnoopVcEntry=ciscoAtmSnoopVcEntry, ciscoAtmConnMIBGroups=ciscoAtmConnMIBGroups, ciscoAtmConnMIBCompliance4=ciscoAtmConnMIBCompliance4, ciscoAtmVclSwFabOutCells=ciscoAtmVclSwFabOutCells, ciscoAtmSvp=ciscoAtmSvp, CastType=CastType, ciscoAtmConnMIBlsFcPfqGroup=ciscoAtmConnMIBlsFcPfqGroup, ciscoAtmSvpVpi=ciscoAtmSvpVpi, ciscoAtmSnoopVc=ciscoAtmSnoopVc, ciscoAtmVclOamSegmentLoopback=ciscoAtmVclOamSegmentLoopback, ciscoAtmVclInClp1Cells=ciscoAtmVclInClp1Cells, ciscoAtmVclOutClp1Cells=ciscoAtmVclOutClp1Cells, Location=Location, ciscoAtmVplEntry=ciscoAtmVplEntry, ciscoAtmVplRxNegTraffDescrIndex=ciscoAtmVplRxNegTraffDescrIndex, ciscoAtmVplInstallTime=ciscoAtmVplInstallTime, EnableStatus=EnableStatus, ciscoAtmConnMIBCompliances=ciscoAtmConnMIBCompliances, ciscoAtmVclRxNegTraffDescrIndex=ciscoAtmVclRxNegTraffDescrIndex, ciscoAtmVclAtmInArpInterval=ciscoAtmVclAtmInArpInterval, ciscoAtmVplOamAisEnable=ciscoAtmVplOamAisEnable, Direction=Direction, PYSNMP_MODULE_ID=ciscoAtmConnMIB, ciscoAtmVclCrossVpi=ciscoAtmVclCrossVpi, ciscoAtmConnNmsGroup=ciscoAtmConnNmsGroup, ciscoAtmConnMIBCompliance8=ciscoAtmConnMIBCompliance8, ciscoAtmVclNextLeafIfIndex=ciscoAtmVclNextLeafIfIndex, ciscoAtmVclClp0VcqFullCellDrops=ciscoAtmVclClp0VcqFullCellDrops, ciscoAtmVplNumAttempts=ciscoAtmVplNumAttempts, ciscoAtmVplConnState=ciscoAtmVplConnState, ciscoAtmVplOamLoopbkTxInterval=ciscoAtmVplOamLoopbkTxInterval, ciscoAtmVclSlowRetryIntv=ciscoAtmVclSlowRetryIntv, ciscoAtmVplEpdTpdPacketsIn=ciscoAtmVplEpdTpdPacketsIn, ciscoAtmVclRemoteVci=ciscoAtmVclRemoteVci, ciscoAtmVplRxUpcMode=ciscoAtmVplRxUpcMode, ciscoAtmConnMIBCompliance2=ciscoAtmConnMIBCompliance2, ciscoAtmVpl=ciscoAtmVpl, ciscoAtmVclTable=ciscoAtmVclTable, ciscoAtmVplRemoteVpi=ciscoAtmVplRemoteVpi, ciscoAtmVplConnType=ciscoAtmVplConnType, ciscoAtmConnMIBlsFcPfqGroup1=ciscoAtmConnMIBlsFcPfqGroup1, ciscoAtmVclSwFabOutClp1Cells=ciscoAtmVclSwFabOutClp1Cells, ciscoAtmVclClp1Drops=ciscoAtmVclClp1Drops, ciscoAtmVplNextLeafIfIndex=ciscoAtmVplNextLeafIfIndex, ciscoAtmVplInClp0Cells=ciscoAtmVplInClp0Cells, ciscoAtmSnoopVcSnoopedVci=ciscoAtmSnoopVcSnoopedVci, ciscoAtmVclConfigType=ciscoAtmVclConfigType, ciscoAtmVplLastReleaseCause=ciscoAtmVplLastReleaseCause, ciscoAtmVclAalUserVcType=ciscoAtmVclAalUserVcType, ciscoAtmVplOamSegmentLoopback=ciscoAtmVplOamSegmentLoopback, ciscoAtmVplLsPerVcqXmtQueuedCells=ciscoAtmVplLsPerVcqXmtQueuedCells, ciscoAtmSvcAddr=ciscoAtmSvcAddr, ciscoAtmSnoopVpRowStatus=ciscoAtmSnoopVpRowStatus, ciscoAtmVclEpdTpdPacketsIn=ciscoAtmVclEpdTpdPacketsIn, ciscoAtmConnMIB=ciscoAtmConnMIB, ciscoAtmVplLsPerVcqTunnelIsShaped=ciscoAtmVplLsPerVcqTunnelIsShaped, ciscoAtmVclNextLeafVci=ciscoAtmVclNextLeafVci, ciscoAtmVclConnType=ciscoAtmVclConnType, ciscoAtmVclCellDrops=ciscoAtmVclCellDrops, UpcStatus=UpcStatus, ciscoAtmConnMIBCompliance3=ciscoAtmConnMIBCompliance3, ciscoAtmConnMIBGroup1=ciscoAtmConnMIBGroup1, ciscoAtmVcl=ciscoAtmVcl, ciscoAtmVclOamAisEnable=ciscoAtmVclOamAisEnable, SnoopDirType=SnoopDirType, ciscoAtmVclInClp0Cells=ciscoAtmVclInClp0Cells, ciscoAtmConnMIBCompliance5=ciscoAtmConnMIBCompliance5, ciscoAtmVplEpdTpdCellDrops=ciscoAtmVplEpdTpdCellDrops, ciscoAtmSvcVpi=ciscoAtmSvcVpi, ciscoAtmVplSwFabOutClp1Cells=ciscoAtmVplSwFabOutClp1Cells, ciscoAtmVplCrossVpi=ciscoAtmVplCrossVpi, ciscoAtmConnMIBObjects=ciscoAtmConnMIBObjects, ciscoAtmVclNextLeafVpi=ciscoAtmVclNextLeafVpi, ciscoAtmVplDefaultRxUpcVbrCdvt=ciscoAtmVplDefaultRxUpcVbrCdvt, ciscoAtmVplClp0VcqFullCellDrops=ciscoAtmVplClp0VcqFullCellDrops, ciscoAtmSvpAddrTable=ciscoAtmSvpAddrTable, ciscoAtmVclDefaultRxUpcTolerance=ciscoAtmVclDefaultRxUpcTolerance, SpanType=SpanType, ciscoAtmSvcVci=ciscoAtmSvcVci, ciscoAtmSnoopVpDir=ciscoAtmSnoopVpDir, ciscoAtmConnMIBConformance=ciscoAtmConnMIBConformance, ciscoAtmSnoopVcSnoopedVpi=ciscoAtmSnoopVcSnoopedVpi, ciscoAtmConnMIBlsPerVcqGroup2=ciscoAtmConnMIBlsPerVcqGroup2, ciscoAtmConnMIBlsPerVcqGroup=ciscoAtmConnMIBlsPerVcqGroup, ciscoAtmSnoopVcRowStatus=ciscoAtmSnoopVcRowStatus, ciscoAtmVclInCells=ciscoAtmVclInCells, ciscoAtmVplSwFabOutClp0Cells=ciscoAtmVplSwFabOutClp0Cells, ciscoAtmVplOutClp1Cells=ciscoAtmVplOutClp1Cells, ciscoAtmSnoopVpEntry=ciscoAtmSnoopVpEntry, ciscoAtmConnMIBGroup3=ciscoAtmConnMIBGroup3, ciscoAtmSvpAddrEntry=ciscoAtmSvpAddrEntry, ciscoAtmVclLocation=ciscoAtmVclLocation, ciscoAtmConnMIBCompliance6=ciscoAtmConnMIBCompliance6, ciscoAtmSvcAddrEntry=ciscoAtmSvcAddrEntry, ciscoAtmSnoopVpSnoopedIfIndex=ciscoAtmSnoopVpSnoopedIfIndex, ciscoAtmVclRemoteAddr=ciscoAtmVclRemoteAddr, ciscoAtmVplLsPerVcqTunnelIsHierarchical=ciscoAtmVplLsPerVcqTunnelIsHierarchical, ciscoAtmVclEntry=ciscoAtmVclEntry, ConfigType=ConfigType, ciscoAtmVclTxNegTraffDescrIndex=ciscoAtmVclTxNegTraffDescrIndex, ciscoAtmVclCrossIfIndex=ciscoAtmVclCrossIfIndex, ciscoAtmVplLogicalPortDef=ciscoAtmVplLogicalPortDef, ciscoAtmSnoopVcSnoopedIfIndex=ciscoAtmSnoopVcSnoopedIfIndex, ciscoAtmSvc=ciscoAtmSvc, ciscoAtmVclOamLoopbkTxInterval=ciscoAtmVclOamLoopbkTxInterval, ciscoAtmVclOamRdiEnable=ciscoAtmVclOamRdiEnable, ciscoAtmSvcDirection=ciscoAtmSvcDirection, ciscoAtmConnMIBGroup=ciscoAtmConnMIBGroup, ciscoAtmVplVcqClpThreshCellDrops=ciscoAtmVplVcqClpThreshCellDrops, ciscoAtmSvpAddr=ciscoAtmSvpAddr, ciscoAtmVclSwFabOutClp0Cells=ciscoAtmVclSwFabOutClp0Cells, ciscoAtmVplEpdTpdPacketDrops=ciscoAtmVplEpdTpdPacketDrops, ciscoAtmVplClp1Drops=ciscoAtmVplClp1Drops, ciscoAtmConnMIBGroup4=ciscoAtmConnMIBGroup4, ciscoAtmVplInCells=ciscoAtmVplInCells, ciscoAtmVplLsPerVcqWrrWeight=ciscoAtmVplLsPerVcqWrrWeight, ciscoAtmVplRemoteAddr=ciscoAtmVplRemoteAddr, ciscoAtmVplTable=ciscoAtmVplTable, ciscoAtmVplOamRdiEnable=ciscoAtmVplOamRdiEnable, ciscoAtmVplLogicalPortIndex=ciscoAtmVplLogicalPortIndex, ciscoAtmConnMIBCompliance=ciscoAtmConnMIBCompliance, ciscoAtmConnMIBNegTraffGroup=ciscoAtmConnMIBNegTraffGroup, ciscoAtmSnoopVcTable=ciscoAtmSnoopVcTable, ciscoAtmSnoopVpState=ciscoAtmSnoopVpState, ciscoAtmVclRemoteVpi=ciscoAtmVclRemoteVpi, ciscoAtmVplLsPerVcQThreshGrp=ciscoAtmVplLsPerVcQThreshGrp, ciscoAtmVclLsPerVcqXmtQueuedCells=ciscoAtmVclLsPerVcqXmtQueuedCells, ciscoAtmVclRxUpcMode=ciscoAtmVclRxUpcMode, ciscoAtmVclCrossVci=ciscoAtmVclCrossVci, ciscoAtmSnoopVp=ciscoAtmSnoopVp, ciscoAtmVplSlowRetryIntv=ciscoAtmVplSlowRetryIntv)
| 155.954654
| 8,508
| 0.759829
|
391b4e0dbf38208e3126be094ef8a3614b2790c1
| 2,753
|
py
|
Python
|
21.Car-insurance-claim(Challenges-in-Machine-Learning)/code.py
|
swapnilbpatil/ga-learner-dsmp-repo
|
fdea54e911dae514f1a73fd429c33799102dc45b
|
[
"MIT"
] | null | null | null |
21.Car-insurance-claim(Challenges-in-Machine-Learning)/code.py
|
swapnilbpatil/ga-learner-dsmp-repo
|
fdea54e911dae514f1a73fd429c33799102dc45b
|
[
"MIT"
] | null | null | null |
21.Car-insurance-claim(Challenges-in-Machine-Learning)/code.py
|
swapnilbpatil/ga-learner-dsmp-repo
|
fdea54e911dae514f1a73fd429c33799102dc45b
|
[
"MIT"
] | null | null | null |
# --------------
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Code starts here
df = pd.read_csv(path)
print(df.head())
#Remove the $ and , from columns
for col in ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']:
df[col] = df[col].str.replace("$",'')
df[col] = df[col].str.replace(",",'')
print(df.head())
#The features
X = df.iloc[:,:-1]
#The target variable
y = df.iloc[:,-1]
#Calculate the value counts of target variable
count = y.value_counts()
#Split the dataframe
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 6)
# Code ends here
# --------------
# Code starts here
for col in ['INCOME','HOME_VAL','BLUEBOOK','OLDCLAIM','CLM_AMT']:
X_train[col] = X_train[col].astype('float')
X_test[col] = X_test[col].astype('float')
print(X_train.info())
print(X_test.info())
# Code ends here
# --------------
# Code starts here
#Drop the rows from columns
X_train.dropna(subset= ['YOJ','OCCUPATION'],inplace=True)
X_test.dropna(subset= ['YOJ','OCCUPATION'],inplace=True)
#Update the index of y_train
y_train = y_train[X_train.index]
y_test = y_test[X_test.index]
#fill the missing values for columns
for i in ['AGE','CAR_AGE','INCOME','HOME_VAL']:
X_train.fillna(X_train[col].mean(),inplace=True)
X_test.fillna(X_train[col].mean(),inplace=True)
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"]
# Code starts here
for col in columns:
le = LabelEncoder()
X_train[col]=le.fit_transform(X_train[col].astype(str))
X_test[col]=le.fit_transform(X_test[col].astype(str))
# Code ends here
# --------------
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# code starts here
model = LogisticRegression(random_state = 6)
model.fit(X_train,y_train)
y_pred = model.predict(X_test)
score = accuracy_score(y_test,y_pred)
print('The accuracy_score:',score)
# Code ends here
# --------------
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
# code starts here
smote = SMOTE(random_state = 9)
X_train,y_train = smote.fit_sample(X_train,y_train)
scaler = StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
# Code ends here
# --------------
# Code Starts here
model = LogisticRegression()
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
score = accuracy_score(y_test,y_pred)
print(' The Accuracy Score:',score)
# Code ends here
| 21.341085
| 106
| 0.703233
|
db2c3164a482219271ca9e037cc7e46fddcca9f0
| 13,990
|
py
|
Python
|
mako/ast.py
|
gcodebackups/shaderman
|
5472ad7cd6ab1ce85e1c75aabf367d0ea7d485ca
|
[
"BSD-3-Clause"
] | 5
|
2016-08-20T07:13:47.000Z
|
2021-09-28T21:07:59.000Z
|
mako/ast.py
|
gcodebackups/shaderman
|
5472ad7cd6ab1ce85e1c75aabf367d0ea7d485ca
|
[
"BSD-3-Clause"
] | null | null | null |
mako/ast.py
|
gcodebackups/shaderman
|
5472ad7cd6ab1ce85e1c75aabf367d0ea7d485ca
|
[
"BSD-3-Clause"
] | 2
|
2019-02-22T01:58:08.000Z
|
2021-09-28T21:08:45.000Z
|
# ast.py
# Copyright (C) 2006, 2007 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes"""
from compiler import ast, visitor
from compiler import parse as compiler_parse
from mako import util, exceptions
from StringIO import StringIO
import re
def parse(code, mode, **exception_kwargs):
try:
return compiler_parse(code, mode)
except SyntaxError, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = util.Set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = util.Set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames, code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if its declared later in the same block of code
# - AST is less likely to break with version changes (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
expr = parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
class FindIdentifiers(object):
def __init__(self):
self.in_function = False
self.local_ident_stack = {}
def _add_declared(s, name):
if not s.in_function:
self.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitFunction(self,node, *args):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(s, node, *args):
if node.name not in __builtins__ and node.name not in self.declared_identifiers and node.name not in s.local_ident_stack:
self.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **exception_kwargs)
self._add_declared(mod)
f = FindIdentifiers()
visitor.walk(expr, f) #, walker=walker())
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = util.Set()
self.undeclared_identifiers = util.Set()
class FindTuple(object):
def visitTuple(s, node, *args):
for n in node.nodes:
p = PythonCode(n, **exception_kwargs)
self.codeargs.append(p)
self.args.append(ExpressionGenerator(n).value())
self.declared_identifiers = self.declared_identifiers.union(p.declared_identifiers)
self.undeclared_identifiers = self.undeclared_identifiers.union(p.undeclared_identifiers)
if isinstance(code, basestring):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = parse(code, "exec", **exception_kwargs)
else:
expr = code
f = FindTuple()
visitor.walk(expr, f)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:$', code.strip(), re.S)
if not m:
raise exceptions.CompileException("Fragment '%s' is not a partial control statement" % code, **exception_kwargs)
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
else:
raise exceptions.CompileException("Unsupported control keyword: '%s'" % keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print "Node:", str(node)
#print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = parse(code, "exec", **exception_kwargs)
class ParseFunc(object):
def visitFunction(s, node, *args):
self.funcname = node.name
self.argnames = node.argnames
self.defaults = node.defaults
self.varargs = node.varargs
self.kwargs = node.kwargs
f = ParseFunc()
visitor.walk(expr, f)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException("Code '%s' is not a function declaration" % code, **exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException("'**%s' keyword argument not allowed here" % self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" % (arg, ExpressionGenerator(default).value()))
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) #, walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write("(")
self.visit(node.left, *args)
self.buf.write(" %s " % op)
self.visit(node.right, *args)
self.buf.write(")")
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(" " + op + " ")
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator("*", node, *args)
def visitAnd(self, node, *args):
self.booleanop("and", node, *args)
def visitOr(self, node, *args):
self.booleanop("or", node, *args)
def visitBitand(self, node, *args):
self.booleanop("&", node, *args)
def visitBitor(self, node, *args):
self.booleanop("|", node, *args)
def visitBitxor(self, node, *args):
self.booleanop("^", node, *args)
def visitAdd(self, node, *args):
self.operator("+", node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write(".%s" % node.attrname)
def visitSub(self, node, *args):
self.operator("-", node, *args)
def visitNot(self, node, *args):
self.buf.write("not ")
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator("/", node, *args)
def visitFloorDiv(self, node, *args):
self.operator("//", node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitUnarySub(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
self.visit(node.lower)
self.buf.write(":")
if node.upper is not None:
self.visit(node.upper)
self.buf.write("]")
def visitDict(self, node):
self.buf.write("{")
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(": ")
self.visit(c[i+1])
if i<len(c) -2:
self.buf.write(", ")
self.buf.write("}")
def visitTuple(self, node):
self.buf.write("(")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write(")")
def visitList(self, node):
self.buf.write("[")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write("]")
def visitListComp(self, node):
self.buf.write("[")
self.visit(node.expr)
self.buf.write(" ")
for n in node.quals:
self.visit(n)
self.buf.write("]")
def visitListCompFor(self, node):
self.buf.write(" for ")
self.visit(node.assign)
self.buf.write(" in ")
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(" if ")
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write("(")
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(", ")
self.visit(a)
self.buf.write(")")
| 40.787172
| 248
| 0.557327
|
c39d2ea8b4954d5b4a40f2a3abb0412bd3b1ca8a
| 5,015
|
py
|
Python
|
py_proto/modules/common/proto/header_pb2.py
|
yujianyi/fusion_localization
|
c0057e29cbf690d6260f021080fd951c1a6b6baa
|
[
"Apache-2.0"
] | 2
|
2019-03-04T02:11:04.000Z
|
2019-04-18T11:19:45.000Z
|
py_proto/modules/common/proto/header_pb2.py
|
yujianyi/fusion_localization
|
c0057e29cbf690d6260f021080fd951c1a6b6baa
|
[
"Apache-2.0"
] | 1
|
2019-03-15T08:37:53.000Z
|
2019-03-15T08:37:53.000Z
|
py_proto/modules/common/proto/header_pb2.py
|
yujianyi/fusion_localization
|
c0057e29cbf690d6260f021080fd951c1a6b6baa
|
[
"Apache-2.0"
] | 1
|
2019-03-04T02:11:09.000Z
|
2019-03-04T02:11:09.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/common/proto/header.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.common.proto import error_code_pb2 as modules_dot_common_dot_proto_dot_error__code__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/common/proto/header.proto',
package='apollo.common',
syntax='proto2',
serialized_pb=_b('\n!modules/common/proto/header.proto\x12\rapollo.common\x1a%modules/common/proto/error_code.proto\"\xd3\x01\n\x06Header\x12\x15\n\rtimestamp_sec\x18\x01 \x01(\x01\x12\x13\n\x0bmodule_name\x18\x02 \x01(\t\x12\x14\n\x0csequence_num\x18\x03 \x01(\r\x12\x17\n\x0flidar_timestamp\x18\x04 \x01(\x04\x12\x18\n\x10\x63\x61mera_timestamp\x18\x05 \x01(\x04\x12\x17\n\x0fradar_timestamp\x18\x06 \x01(\x04\x12\x12\n\x07version\x18\x07 \x01(\r:\x01\x31\x12\'\n\x06status\x18\x08 \x01(\x0b\x32\x17.apollo.common.StatusPb')
,
dependencies=[modules_dot_common_dot_proto_dot_error__code__pb2.DESCRIPTOR,])
_HEADER = _descriptor.Descriptor(
name='Header',
full_name='apollo.common.Header',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp_sec', full_name='apollo.common.Header.timestamp_sec', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='module_name', full_name='apollo.common.Header.module_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sequence_num', full_name='apollo.common.Header.sequence_num', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lidar_timestamp', full_name='apollo.common.Header.lidar_timestamp', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='camera_timestamp', full_name='apollo.common.Header.camera_timestamp', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='radar_timestamp', full_name='apollo.common.Header.radar_timestamp', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='apollo.common.Header.version', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='apollo.common.Header.status', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=303,
)
_HEADER.fields_by_name['status'].message_type = modules_dot_common_dot_proto_dot_error__code__pb2._STATUSPB
DESCRIPTOR.message_types_by_name['Header'] = _HEADER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), dict(
DESCRIPTOR = _HEADER,
__module__ = 'modules.common.proto.header_pb2'
# @@protoc_insertion_point(class_scope:apollo.common.Header)
))
_sym_db.RegisterMessage(Header)
# @@protoc_insertion_point(module_scope)
| 41.106557
| 528
| 0.746959
|
737831182965b4a13e66805f5d08c33603c262a8
| 310
|
py
|
Python
|
pybart/settings.py
|
ericdwang/pybart
|
87985306ab1e068927ef89b44dd968440026ab01
|
[
"BSD-3-Clause"
] | 20
|
2015-12-14T04:56:11.000Z
|
2020-05-26T04:41:33.000Z
|
pybart/settings.py
|
ericdwang/pybart
|
87985306ab1e068927ef89b44dd968440026ab01
|
[
"BSD-3-Clause"
] | 3
|
2015-12-14T19:11:53.000Z
|
2017-03-13T03:14:33.000Z
|
pybart/settings.py
|
ericdwang/pybart
|
87985306ab1e068927ef89b44dd968440026ab01
|
[
"BSD-3-Clause"
] | 1
|
2018-02-01T03:28:50.000Z
|
2018-02-01T03:28:50.000Z
|
import os
DEFAULT_API_KEY = 'MW9S-E7SL-26DU-VV8V'
REFRESH_INTERVAL = 100 # Milliseconds
TOTAL_COLUMNS = 4
BART_API_KEY = os.environ.get('BART_API_KEY')
try:
BART_STATIONS = os.environ['BART_STATIONS'].split(',')
except KeyError:
BART_STATIONS = []
BART_MAP_URL = 'https://www.bart.gov/stations'
| 18.235294
| 58
| 0.725806
|
d4f3f266b910c407e3685a5cbeac44a6c662aa5d
| 4,515
|
py
|
Python
|
nyoka/Base64.py
|
maxibor/nyoka
|
19f480eee608035aa5fba368c96d4143bc2f5710
|
[
"Apache-2.0"
] | 1
|
2021-04-07T07:55:31.000Z
|
2021-04-07T07:55:31.000Z
|
nyoka/Base64.py
|
maxibor/nyoka
|
19f480eee608035aa5fba368c96d4143bc2f5710
|
[
"Apache-2.0"
] | null | null | null |
nyoka/Base64.py
|
maxibor/nyoka
|
19f480eee608035aa5fba368c96d4143bc2f5710
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2004-2016 Zementis, Inc.
Copyright (c) 2016-2020 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import struct
import base64
import sys
from array import array
#https://stackoverflow.com/questions/40914544/python-base64-float
#https://stackoverflow.com/questions/682504/what-is-a-clean-pythonic-way-to-have-multiple-constructors-in-python
# >>> sys.float_info
# sys.float_info(max=1.7976931348623157e+308, max_exp=1024, max_10_exp=308, min=2.2250738585072014e-308, min_exp=-1021, min_10_exp=-307, dig=15, mant_dig=53, epsilon=2.220446049250313e-16, radix=2, rounds=1)
class FloatBase64:
"""provide several conversions into a base64 encoded string"""
def __init__(self, param):
"""initializer"""
if type(param) == str: # type check is a bad idea!
self.string = param
self.float = self.to_float(param)
else:
self.float = param
self.string = self.from_float(param)
@classmethod
def to_float(cls, base64String):
"""convert string base64String into a float"""
return struct.unpack('<d', base64.standard_b64decode(base64String)[:8])[0]
@classmethod
def from_float(cls, number):
"""converts the float number into a base64 string"""
return str(base64.standard_b64encode(struct.pack('<d', number)))
@classmethod
def to_floatArray(cls, base64String):
"""converts the base64String into an array('f')"""
base64String = base64String.replace("\n", "")
base64String = base64String.replace("\t", "")
base64String = base64String.replace(" ", "")
data = base64.standard_b64decode(base64String)
count = len(data) // 4
result = array('f', struct.unpack('<{0}f'.format(count), data)) # one big structure of `count` floats
return result
@classmethod
def to_floatArray_urlsafe(cls, base64String):
"""converts the base64String into an array('f')"""
data = base64.urlsafe_b64decode(base64String)
count = len(data) // 4
result = array('f', struct.unpack('<{0}f'.format(count), data)) # one big structure of `count` floats
return result
@classmethod
def from_floatArray(cls, floatArray, nlPos = 0):
"""converts the floatArray into a base64 string; nlPos: inserts \n after nlPos floats if given """
import sys
if sys.version_info >= (3,0):
if nlPos > 0:
result = ""
nl = nlPos
fArray = array('f')
for i in range(0, len(floatArray)):
fArray.append(floatArray[i])
nl -= 1
if nl <= 0:
result += str(base64.standard_b64encode(fArray), 'utf-8') + "\n"
nl = nlPos
fArray = array('f')
result += str(base64.standard_b64encode(fArray), 'utf-8')
return result
else:
result = ""
fArray = array('f')
for i in range(0, len(floatArray)):
fArray.append(floatArray[i])
result += str(base64.standard_b64encode(fArray), 'utf-8')
return result
else:
if nlPos > 0:
result = ""
nl = nlPos
fArray = array('f')
for i in range(0, len(floatArray)):
fArray.append(floatArray[i])
nl -= 1
if nl <= 0:
result += base64.standard_b64encode(fArray) + "\n"
nl = nlPos
fArray = array('f')
result += base64.standard_b64encode(fArray)
return result
else:
result = ""
fArray = array('f')
for i in range(0, len(floatArray)):
fArray.append(floatArray[i])
result += base64.standard_b64encode(fArray)
return result
@classmethod
def from_floatArray_urlsafe(cls, floatArray, nlPos = 0):
"""converts the floatArray into a base64 string; nlPos: inserts \n after nlPos floats if given """
if nlPos > 0:
result = ""
nl = nlPos
fArray = array('f')
for i in range(0, len(floatArray)):
fArray.append(floatArray[i])
nl -= 1
if nl <= 0:
result += base64.urlsafe_b64encode(fArray) + "\n"
nl = nlPos
fArray = array('f')
result += base64.urlsafe_b64encode(fArray)
return result
else:
return base64.urlsafe_b64encode(floatArray)
| 33.198529
| 207
| 0.683942
|
ee2ed7eabd443373907a794bdabf4fab7d1edd9d
| 2,503
|
py
|
Python
|
setup.py
|
SolSpecSolutions/OpenSfM
|
b3f39ad5acbbe05aba14e611d6f2d1b204f35daa
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
SolSpecSolutions/OpenSfM
|
b3f39ad5acbbe05aba14e611d6f2d1b204f35daa
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
SolSpecSolutions/OpenSfM
|
b3f39ad5acbbe05aba14e611d6f2d1b204f35daa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import errno
import os
import setuptools
import subprocess
import sys
from wheel.bdist_wheel import bdist_wheel
class platform_bdist_wheel(bdist_wheel):
"""Patched bdist_well to make sure wheels include platform tag."""
def finalize_options(self):
bdist_wheel.finalize_options(self)
self.root_is_pure = False
def mkdir_p(path):
"""Make a directory including parent directories."""
try:
os.makedirs(path)
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
def configure_c_extension():
"""Configure cmake project to C extension."""
print("Configuring for python {}.{}...".format(sys.version_info.major,
sys.version_info.minor))
mkdir_p('cmake_build')
cmake_command = [
'cmake',
'../opensfm/src',
'-DHAS_FLTO=OFF',
'-DPYTHON_EXECUTABLE=' + sys.executable,
]
subprocess.check_call(cmake_command, cwd='cmake_build')
def build_c_extension():
"""Compile C extension."""
print("Compiling extension...")
subprocess.check_call(['make', '-j4'], cwd='cmake_build')
configure_c_extension()
build_c_extension()
setuptools.setup(
name='opensfm',
version='0.4.1a7',
description='A Structure from Motion library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/mapillary/OpenSfM',
project_urls={
"Documentation": "https://docs.opensfm.org/",
},
author='Mapillary',
license='BSD',
packages=setuptools.find_packages(),
scripts=[
'bin/opensfm_run_all',
'bin/opensfm',
],
package_data={
'opensfm': [
'csfm.*',
'data/sensor_data.json',
'data/bow/bow_hahog_root_uchar_10000.npz',
'data/bow/bow_hahog_root_uchar_64.npz',
]
},
# install_requires=[
# 'cloudpickle>=0.4.0',
# 'ExifRead>=2.1.2',
# 'gpxpy>=1.1.2',
# 'loky>=1.2.1',
# 'networkx>=1.11',
# 'numpy>=1.13',
# 'pyproj>=1.9.5.1',
# 'pytest>=3.0.7',
# 'python-dateutil>=2.6.0',
# 'PyYAML>=3.12',
# 'repoze.lru>=0.7',
# 'scipy',
# 'six',
# 'xmltodict>=0.10.2',
# 'Pillow>=6.0.0',
# ],
cmdclass={'bdist_wheel': platform_bdist_wheel},
)
| 26.072917
| 75
| 0.588494
|
67387d8d097d981be11ea59eafc5bdaf496ad51c
| 13,499
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20180201/get_express_route_circuit_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180201/get_express_route_circuit_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180201/get_express_route_circuit_peering.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteCircuitPeeringResult',
'AwaitableGetExpressRouteCircuitPeeringResult',
'get_express_route_circuit_peering',
]
@pulumi.output_type
class GetExpressRouteCircuitPeeringResult:
"""
Peering in an ExpressRouteCircuit resource.
"""
def __init__(__self__, azure_asn=None, connections=None, etag=None, gateway_manager_etag=None, id=None, ipv6_peering_config=None, last_modified_by=None, microsoft_peering_config=None, name=None, peer_asn=None, peering_type=None, primary_azure_port=None, primary_peer_address_prefix=None, provisioning_state=None, route_filter=None, secondary_azure_port=None, secondary_peer_address_prefix=None, shared_key=None, state=None, stats=None, vlan_id=None):
if azure_asn and not isinstance(azure_asn, int):
raise TypeError("Expected argument 'azure_asn' to be a int")
pulumi.set(__self__, "azure_asn", azure_asn)
if connections and not isinstance(connections, list):
raise TypeError("Expected argument 'connections' to be a list")
pulumi.set(__self__, "connections", connections)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if gateway_manager_etag and not isinstance(gateway_manager_etag, str):
raise TypeError("Expected argument 'gateway_manager_etag' to be a str")
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ipv6_peering_config and not isinstance(ipv6_peering_config, dict):
raise TypeError("Expected argument 'ipv6_peering_config' to be a dict")
pulumi.set(__self__, "ipv6_peering_config", ipv6_peering_config)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if microsoft_peering_config and not isinstance(microsoft_peering_config, dict):
raise TypeError("Expected argument 'microsoft_peering_config' to be a dict")
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peer_asn and not isinstance(peer_asn, float):
raise TypeError("Expected argument 'peer_asn' to be a float")
pulumi.set(__self__, "peer_asn", peer_asn)
if peering_type and not isinstance(peering_type, str):
raise TypeError("Expected argument 'peering_type' to be a str")
pulumi.set(__self__, "peering_type", peering_type)
if primary_azure_port and not isinstance(primary_azure_port, str):
raise TypeError("Expected argument 'primary_azure_port' to be a str")
pulumi.set(__self__, "primary_azure_port", primary_azure_port)
if primary_peer_address_prefix and not isinstance(primary_peer_address_prefix, str):
raise TypeError("Expected argument 'primary_peer_address_prefix' to be a str")
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_filter and not isinstance(route_filter, dict):
raise TypeError("Expected argument 'route_filter' to be a dict")
pulumi.set(__self__, "route_filter", route_filter)
if secondary_azure_port and not isinstance(secondary_azure_port, str):
raise TypeError("Expected argument 'secondary_azure_port' to be a str")
pulumi.set(__self__, "secondary_azure_port", secondary_azure_port)
if secondary_peer_address_prefix and not isinstance(secondary_peer_address_prefix, str):
raise TypeError("Expected argument 'secondary_peer_address_prefix' to be a str")
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if shared_key and not isinstance(shared_key, str):
raise TypeError("Expected argument 'shared_key' to be a str")
pulumi.set(__self__, "shared_key", shared_key)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if stats and not isinstance(stats, dict):
raise TypeError("Expected argument 'stats' to be a dict")
pulumi.set(__self__, "stats", stats)
if vlan_id and not isinstance(vlan_id, int):
raise TypeError("Expected argument 'vlan_id' to be a int")
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> Optional[int]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[str]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> Optional[float]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> Optional[str]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> Optional[str]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[str]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> Optional['outputs.RouteFilterResponse']:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> Optional[str]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[str]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[str]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> Optional['outputs.ExpressRouteCircuitStatsResponse']:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[int]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
class AwaitableGetExpressRouteCircuitPeeringResult(GetExpressRouteCircuitPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteCircuitPeeringResult(
azure_asn=self.azure_asn,
connections=self.connections,
etag=self.etag,
gateway_manager_etag=self.gateway_manager_etag,
id=self.id,
ipv6_peering_config=self.ipv6_peering_config,
last_modified_by=self.last_modified_by,
microsoft_peering_config=self.microsoft_peering_config,
name=self.name,
peer_asn=self.peer_asn,
peering_type=self.peering_type,
primary_azure_port=self.primary_azure_port,
primary_peer_address_prefix=self.primary_peer_address_prefix,
provisioning_state=self.provisioning_state,
route_filter=self.route_filter,
secondary_azure_port=self.secondary_azure_port,
secondary_peer_address_prefix=self.secondary_peer_address_prefix,
shared_key=self.shared_key,
state=self.state,
stats=self.stats,
vlan_id=self.vlan_id)
def get_express_route_circuit_peering(circuit_name: Optional[str] = None,
peering_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteCircuitPeeringResult:
"""
Peering in an ExpressRouteCircuit resource.
:param str circuit_name: The name of the express route circuit.
:param str peering_name: The name of the peering.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['circuitName'] = circuit_name
__args__['peeringName'] = peering_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180201:getExpressRouteCircuitPeering', __args__, opts=opts, typ=GetExpressRouteCircuitPeeringResult).value
return AwaitableGetExpressRouteCircuitPeeringResult(
azure_asn=__ret__.azure_asn,
connections=__ret__.connections,
etag=__ret__.etag,
gateway_manager_etag=__ret__.gateway_manager_etag,
id=__ret__.id,
ipv6_peering_config=__ret__.ipv6_peering_config,
last_modified_by=__ret__.last_modified_by,
microsoft_peering_config=__ret__.microsoft_peering_config,
name=__ret__.name,
peer_asn=__ret__.peer_asn,
peering_type=__ret__.peering_type,
primary_azure_port=__ret__.primary_azure_port,
primary_peer_address_prefix=__ret__.primary_peer_address_prefix,
provisioning_state=__ret__.provisioning_state,
route_filter=__ret__.route_filter,
secondary_azure_port=__ret__.secondary_azure_port,
secondary_peer_address_prefix=__ret__.secondary_peer_address_prefix,
shared_key=__ret__.shared_key,
state=__ret__.state,
stats=__ret__.stats,
vlan_id=__ret__.vlan_id)
| 40.906061
| 454
| 0.671679
|
1d64cd256663d289fc105f7cbfa46b9e5281f994
| 2,172
|
py
|
Python
|
var/spack/repos/builtin/packages/r-manipulatewidget/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/r-manipulatewidget/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/r-manipulatewidget/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RManipulatewidget(RPackage):
"""Add Even More Interactivity to Interactive Charts
Like package 'manipulate' does for static graphics, this package helps to
easily add controls like sliders, pickers, checkboxes, etc. that can be
used to modify the input data or the parameters of an interactive chart
created with package 'htmlwidgets'."""
homepage = "https://github.com/rte-antares-rpackage/manipulateWidget"
url = "https://cloud.r-project.org/src/contrib/manipulateWidget_0.10.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/manipulateWidget/"
version('0.10.1', sha256='9d621192121f6b516bc7f1a18305995bfb7838c6683ac701422afc03a50e27ee')
version('0.10.0', sha256='3d61a3d0cedf5c8a850a3e62ed6af38c600dc3f25b44c4ff07a5093bf9ca4ffd')
version('0.9.0', sha256='5bf4bdb702263b0e156f40f3354922a06db7db544e497addcd6c98d9860bf3a3')
version('0.8.0', sha256='e7e6351b1fb8f39b9895e2536fa7c149cbc5d63d7022f67c1b25232cf0706ca7')
version('0.7.0', sha256='160ce5c68658301e00051c60ac5693701c5bc97b7344bacde0f56be4955231f6')
version('0.6.0', sha256='90aa1b30647d7034166b8d6c6185503b6855c70253e36a41742a84faa77ce0db')
version('0.5.1', sha256='5a672c2bd8ba16ec8212cd9fb620072b243e6d18c02dd3ec70bd8c2a1ff1c9c4')
version('0.5.0', sha256='2599e25f78bb0d748705160e1dfe62a673f5bb388ac5f415f3d649d2511737c8')
version('0.4.0', sha256='65cc7d28c2b2efc81fda35da019ac6e6058580cf0fdf5e31458cc96386c0c599')
depends_on('r+X', type=('build', 'run'))
depends_on('r-shiny@1.0.3:', type=('build', 'run'))
depends_on('r-miniui', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-knitr', type=('build', 'run'))
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
depends_on('r-webshot', type=('build', 'run'))
| 54.3
| 96
| 0.745396
|
51c26cd7b8a2ded1122fd7122d5564ad66b08b37
| 11,905
|
py
|
Python
|
attack_check.py
|
Jakb1202/Discord-bot-attack-defence
|
021798e6e6b83481b66c1cda1fec2c4a8329073b
|
[
"BSD-3-Clause"
] | null | null | null |
attack_check.py
|
Jakb1202/Discord-bot-attack-defence
|
021798e6e6b83481b66c1cda1fec2c4a8329073b
|
[
"BSD-3-Clause"
] | null | null | null |
attack_check.py
|
Jakb1202/Discord-bot-attack-defence
|
021798e6e6b83481b66c1cda1fec2c4a8329073b
|
[
"BSD-3-Clause"
] | 1
|
2021-05-31T23:58:28.000Z
|
2021-05-31T23:58:28.000Z
|
import asyncio
import inspect
from io import BytesIO
import typing
import re
import discord
from discord.ext import commands
from datetime import timedelta, datetime
from utils import *
class AttackCheck(commands.Cog):
def __init__(self, client):
self.client = client
self.joined_dic = {}
self.last_member = {}
self.loop = self.client.loop.create_task(self.joined_check())
self.client.ban_exceptions = {} # holds user IDs under a guild ID that will not be banned when +banbytime command is run
@commands.Cog.listener()
async def on_guild_available(self, guild):
print(f"'{guild}' became available")
if guild.id not in self.joined_dic:
self.joined_dic[guild.id] = 0
if guild.id not in self.last_member:
self.last_member[guild.id] = None
@commands.Cog.listener()
async def on_guild_join(self, guild):
if guild.id not in self.joined_dic:
self.joined_dic[guild.id] = 0
if guild.id not in self.last_member:
self.last_member[guild.id] = None
@commands.Cog.listener()
async def on_guild_remove(self, guild):
if guild.id in self.joined_dic:
self.joined_dic.pop(guild.id)
if guild.id in self.last_member:
self.last_member.pop(guild.id)
@commands.Cog.listener()
async def on_member_join(self, member):
self.joined_dic[member.guild.id] += 1
self.last_member[member.guild.id] = member
@commands.command(
usage="+toggle",
description="Toggles whether the attack check alert should trigger"
)
async def toggle(self, ctx):
if self.client.alerts_enabled:
self.client.alerts_enabled = 0
await ctx.send("Disabled attack check alerts")
else:
self.client.alerts_enabled = 1
await ctx.send("Enabled attack check alerts")
async def auto_ban(self, guild_id, ban_time):
log_channel = get_log_channel(self.client)
if not log_channel:
return
msg = await log_channel.send(":warning: Auto-ban criteria satisfied, preparing to ban all members in the "
"above alert\nREACT WITH :x: IN THE NEXT 90 SECONDS TO CANCEL AUTO-BANNING")
def check(reaction, user):
return (reaction.message == msg) and (str(reaction.emoji) == "❌")
try:
reaction, user = await self.client.wait_for("reaction_add", timeout=90, check=check)
except asyncio.TimeoutError: # No one cancelled the auto-ban within 90 seconds
ban_time = ban_time.replace(second=0, microsecond=0)
guild = self.client.get_guild(guild_id)
collected_members = [m for m in guild.members if m.joined_at.replace(second=0, microsecond=0) == ban_time]
await log_channel.send(f"Preparing to ban {len(collected_members)} members...")
for member in collected_members:
try:
await guild.ban(discord.Object(member.id),
reason=f"Auto-banned at {frmtd_utcnow()} during late-night attack")
except Exception:
continue
await log_channel.send(f"{len(collected_members)} members have been auto-banned.")
else:
await log_channel.send(f"{user.mention} banning cancelled!")
async def joined_check(self):
await self.client.wait_until_ready()
while not self.client.is_closed():
for guild_id in self.joined_dic:
# if no member has joined this guild since starting the bot
if self.last_member[guild_id] is None:
self.joined_dic[guild_id] = 0
continue
# CHANGE 11 TO THE THRESHOLD OF MEMBERS TO JOIN IN A SINGLE CLOCK MINUTE FOR THE ALERT TO TRIGGER
if self.joined_dic[guild_id] >= 11 and self.client.alerts_enabled:
bbt = (datetime.utcnow() - timedelta(minutes=1))
await post_log_embed(
client=self.client,
guild_id=guild_id,
title=f"{self.joined_dic[guild_id]} accounts have joined within the last 60 seconds!",
desc=f"`+banbytime {bbt.strftime('%H:%M %d/%m/%y')}`",
color=0xed2140,
author="Potential bot attack!",
author_url="https://media.discordapp.net/attachments/560128634984202258/652220624600563752/wip.gif",
thumbnail="https://cdn.discordapp.com/emojis/588814117305843714.png?v=1",
message="@here"
)
# auto ban accounts joined in the last minute if more than 20 and between 01:00 and 08:00 UTC
if (self.joined_dic[guild_id] >= 20) and (1 < datetime.utcnow().hour < 8):
asyncio.create_task(self.auto_ban(guild_id, bbt))
self.joined_dic[guild_id] = 0
sleep_time = (datetime.utcnow() + timedelta(minutes=1)).replace(second=0)
await discord.utils.sleep_until(sleep_time)
async def ban_base(self, ctx: commands.Context, condition: typing.Callable[[discord.Member], bool]):
"""
Base function for all mass ban commands, pass ctx and a boolean check function and then the magic happens
"""
called_by = inspect.getframeinfo(inspect.currentframe().f_back)[2]
async with ctx.channel.typing():
collected_members = [m for m in ctx.guild.members if condition(m)]
with BytesIO(
str.encode('\n'.join(
[f"{m.name}#{m.discriminator}\tID: {m.id}\tJ: {date_str(m.joined_at)}\tC: {date_str(m.created_at)}"
for m in collected_members]))) as byt_f:
await ctx.send(f"It is currently {frmtd_utcnow()} UTC.\nThe following {len(collected_members)} members"
f" will be banned, do you want to continue, Y/N?",
file=discord.File(fp=byt_f, filename="members_to_ban.txt"))
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
try:
msg = await self.client.wait_for("message", check=check, timeout=30)
except asyncio.TimeoutError:
return await ctx.send(f"{ctx.author.mention} Timed out! No banning will take place :(")
if msg.content.lower() not in ("y", "yes"):
return await ctx.send(f"{ctx.author.mention} banning cancelled!")
await ctx.send("Banning continuing, stand by...")
async with ctx.channel.typing():
ban_fails = []
for member in collected_members:
try:
await ctx.guild.ban(
discord.Object(member.id),
reason=f"Banned by {ctx.author.id} using {called_by} at {frmtd_utcnow()} UTC")
except discord.HTTPException:
ban_fails.append(member)
if ban_fails:
with BytesIO(str.encode(
'\n'.join([f"{m.name}#{m.discriminator}\tID: {m.id}" for m in ban_fails]))) as byt_f:
await ctx.send(f"The following {len(ban_fails)} members failed to be banned",
file=discord.File(fp=byt_f, filename="members_to_ban.txt"))
await ctx.send(f"{ctx.author.mention} bans complete!")
@commands.bot_has_permissions(ban_members=True)
@commands.command(
name="banbyname",
usage="+banbyname [name]",
description="Bans all members with that exact name. Case insensitive.",
aliases=("banname",)
)
@commands.has_permissions(ban_members=True)
async def ban_by_name(self, ctx, *, name):
def condition(m: discord.Member):
return m.name.lower() == name.lower()
await self.ban_base(ctx, condition)
@commands.command(
name="bantimeexceptions",
usage="+bantimeexception [ID, ID, ID]",
description="Creates a list of user IDs that won't be banned when +banbytime is run.",
aliases=("bantimeexception", "banexceptions")
)
@commands.has_permissions(ban_members=True)
async def ban_by_time_exceptions(self, ctx, *, exceptions):
ban_exceptions = exceptions.split(", ")
self.client.ban_exceptions[ctx.guild.id] = ban_exceptions
await ctx.send(f"Exception list created with users: {ban_exceptions}")
@commands.bot_has_permissions(ban_members=True)
@commands.command(
name="banbytime",
usage="+banbytime [HH:MM] <dd/mm/yyyy>",
description="Bans all members that joined at that time. In UTC.",
aliases=("bantime",)
)
@commands.has_permissions(ban_members=True)
async def ban_by_time(self, ctx, *, ban_date: TimeString):
ban_exceptions = [] if ctx.guild.id not in self.client.ban_exceptions \
else self.client.ban_exceptions[ctx.guild.id]
if ban_date > datetime.utcnow():
return await ctx.send("You're trying to ban all join dates in the future, check UTC time...")
def condition(m: discord.Member):
return m.joined_at.replace(second=0, microsecond=0) == ban_date \
and m.id not in ban_exceptions # self.client.ban_exceptions[ctx.guild.id]
await self.ban_base(ctx, condition)
@commands.bot_has_permissions(ban_members=True)
@commands.command(
name="banbyregex",
usage="+banbyregex [pattern string]",
description="Bans all members that joined at that time. In UTC.",
aliases=("banregex",)
)
# THIS IS RESTRICTED TO ADMINISTRATORS AS IT CAN BE DANGEROUS - POTENTIAL TO BAN ALL MEMBERS WITH +banregex .*
@commands.has_permissions(administrator=True)
async def ban_by_regex(self, ctx, *, regex_pattern: str):
compiled = re.compile(regex_pattern)
def condition(m: discord.Member):
return compiled.match(m.name) is not None
await self.ban_base(ctx, condition)
@commands.bot_has_permissions(ban_members=True)
@commands.command(
name="banbypfp",
usage="+banbypfp [member/user/hash]",
description="Bans all members that have the same avatar hash.",
aliases=("banpfp", "banbyavatar")
)
@commands.has_permissions(ban_members=True)
async def ban_by_pfp(self, ctx, item: typing.Union[BMC, UserID, str]):
pfp_hash = item.avatar if not isinstance(item, str) else item
def condition(m: discord.Member):
return m.avatar == pfp_hash
await self.ban_base(ctx, condition)
@commands.bot_has_permissions(ban_members=True)
@commands.command(
name="banbycreation",
usage="+banbycreation [member/user]",
description="Bans all members that have the same creation date.",
aliases=("bancreation",)
)
@commands.has_permissions(ban_members=True)
async def ban_by_creation(self, ctx, *, item: typing.Union[BMC, UserID, TimeString]):
member_creation = item if isinstance(item, datetime) else item.created_at.replace(second=0, microsecond=0)
def condition(m: discord.Member):
return m.created_at.replace(second=0, microsecond=0) == member_creation
await self.ban_base(ctx, condition)
def cog_unload(self):
self.loop.cancel()
def setup(client):
client.add_cog(AttackCheck(client))
| 44.755639
| 130
| 0.601428
|
5821bb4bedb914076accc5bb19450d90206793c9
| 252
|
py
|
Python
|
utils/image_12_coe_converter.py
|
HowyoungZhou/cyber-melody-2
|
b96fff16f4bc57b47867389f8d6dc297fae58387
|
[
"MIT"
] | null | null | null |
utils/image_12_coe_converter.py
|
HowyoungZhou/cyber-melody-2
|
b96fff16f4bc57b47867389f8d6dc297fae58387
|
[
"MIT"
] | null | null | null |
utils/image_12_coe_converter.py
|
HowyoungZhou/cyber-melody-2
|
b96fff16f4bc57b47867389f8d6dc297fae58387
|
[
"MIT"
] | null | null | null |
from sys import argv
from PIL import Image
from coe_writer import write_coe
from image import rgb12_pixel_generator
def main():
im = Image.open(argv[1])
write_coe(argv[2], 16, rgb12_pixel_generator(im))
if __name__ == "__main__":
main()
| 19.384615
| 53
| 0.730159
|
e50e0419a7a51cd65804a1019954c264594c88cd
| 118
|
py
|
Python
|
wsgi.py
|
Baez2222/TaqueriaPointOfSale
|
9c60b15609754945d822ef3e9c5587eac959412a
|
[
"MIT"
] | null | null | null |
wsgi.py
|
Baez2222/TaqueriaPointOfSale
|
9c60b15609754945d822ef3e9c5587eac959412a
|
[
"MIT"
] | null | null | null |
wsgi.py
|
Baez2222/TaqueriaPointOfSale
|
9c60b15609754945d822ef3e9c5587eac959412a
|
[
"MIT"
] | 1
|
2020-12-10T02:56:04.000Z
|
2020-12-10T02:56:04.000Z
|
from taqueriaposapp import create_app
application = create_app()
if __name__ == "__main__":
application.run()
| 13.111111
| 37
| 0.737288
|
73fcf94c3f8a69c1c067ba43acda0f217145b603
| 5,086
|
py
|
Python
|
app/user/tests/test_users_api.py
|
MForte101/recipe-app-api
|
58cf88354b73a00303a722ff0ef04756800384bb
|
[
"MIT"
] | null | null | null |
app/user/tests/test_users_api.py
|
MForte101/recipe-app-api
|
58cf88354b73a00303a722ff0ef04756800384bb
|
[
"MIT"
] | null | null | null |
app/user/tests/test_users_api.py
|
MForte101/recipe-app-api
|
58cf88354b73a00303a722ff0ef04756800384bb
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
payload = {
'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'name',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(
user.check_password(payload['password'])
)
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {'email': 'test@londonappdev.com',
'password': 'testpass',
'name': 'Test',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': 'test@londonappdev.com',
'password': 'pw',
'name': 'Test',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for a user"""
payload = {'email': 'mark@phillydev.com', 'password': 'testpass123'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invlaid_cred(self):
"""Token is not created if invalid cred is provided"""
create_user(email='mark@phillydev.com', password='testpass')
payload = {'email': 'mark@phillydev.com', 'password': 'testwrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if no user"""
payload = {'email': 'test@phillydev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'test@phillydev.com',
'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that auth is required for users"""
res = self.client.post(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authenticate"""
def setUp(self):
self.user = create_user(
email="test@phillydev.com",
password="testpassword",
name="name"
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that post is not allowed on Me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'testpass123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 36.328571
| 77
| 0.63665
|
514d7451dc6cd222175f0539bbd658eb7b3250ee
| 69
|
py
|
Python
|
app/config.py
|
wplam107/babynames
|
f5a7707449b6c8c9224ed8102235e3882c76d9bc
|
[
"MIT"
] | null | null | null |
app/config.py
|
wplam107/babynames
|
f5a7707449b6c8c9224ed8102235e3882c76d9bc
|
[
"MIT"
] | null | null | null |
app/config.py
|
wplam107/babynames
|
f5a7707449b6c8c9224ed8102235e3882c76d9bc
|
[
"MIT"
] | null | null | null |
DATABASE_URI = 'postgres+psycopg2://postgres:@localhost:5432/name_db'
| 69
| 69
| 0.811594
|
ca9aa48a46422d4557b482033361194933e32d33
| 136,161
|
py
|
Python
|
sympy/tensor/tensor.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 2
|
2019-12-16T16:02:58.000Z
|
2020-01-20T04:07:18.000Z
|
sympy/tensor/tensor.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/tensor/tensor.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module defines tensors with abstract index notation.
The abstract index notation has been first formalized by Penrose.
Tensor indices are formal objects, with a tensor type; there is no
notion of index range, it is only possible to assign the dimension,
used to trace the Kronecker delta; the dimension can be a Symbol.
The Einstein summation convention is used.
The covariant indices are indicated with a minus sign in front of the index.
For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c``
contracted.
A tensor expression ``t`` can be called; called with its
indices in sorted order it is equal to itself:
in the above example ``t(a, b) == t``;
one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``.
The contracted indices are dummy indices, internally they have no name,
the indices being represented by a graph-like structure.
Tensors are put in canonical form using ``canon_bp``, which uses
the Butler-Portugal algorithm for canonicalization using the monoterm
symmetries of the tensors.
If there is a (anti)symmetric metric, the indices can be raised and
lowered when the tensor is put in canonical form.
"""
from __future__ import print_function, division
from collections import defaultdict
import operator
import itertools
from sympy import Rational, prod, Integer
from sympy.combinatorics import Permutation
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \
bsgs_direct_product, canonicalize, riemann_bsgs
from sympy.core import Basic, Expr, sympify, Add, Mul, S
from sympy.core.compatibility import string_types, reduce, range, SYMPY_INTS
from sympy.core.containers import Tuple, Dict
from sympy.core.decorators import deprecated
from sympy.core.symbol import Symbol, symbols
from sympy.core.sympify import CantSympify, _sympify
from sympy.core.operations import AssocOp
from sympy.matrices import eye
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.decorator import memoize_property
import warnings
@deprecated(useinstead=".replace_with_arrays", issue=15276, deprecated_since_version="1.4")
def deprecate_data():
pass
@deprecated(useinstead=".substitute_indices()", issue=17515,
deprecated_since_version="1.5")
def deprecate_fun_eval():
pass
@deprecated(useinstead="tensor_heads()", issue=17108,
deprecated_since_version="1.5")
def deprecate_TensorType():
pass
class _IndexStructure(CantSympify):
"""
This class handles the indices (free and dummy ones). It contains the
algorithms to manage the dummy indices replacements and contractions of
free indices under multiplications of tensor expressions, as well as stuff
related to canonicalization sorting, getting the permutation of the
expression and so on. It also includes tools to get the ``TensorIndex``
objects corresponding to the given index structure.
"""
def __init__(self, free, dum, index_types, indices, canon_bp=False):
self.free = free
self.dum = dum
self.index_types = index_types
self.indices = indices
self._ext_rank = len(self.free) + 2*len(self.dum)
self.dum.sort(key=lambda x: x[0])
@staticmethod
def from_indices(*indices):
"""
Create a new ``_IndexStructure`` object from a list of ``indices``
``indices`` ``TensorIndex`` objects, the indices. Contractions are
detected upon construction.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure.from_indices(m0, m1, -m1, m3)
_IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz])
"""
free, dum = _IndexStructure._free_dum_from_indices(*indices)
index_types = [i.tensor_index_type for i in indices]
indices = _IndexStructure._replace_dummy_names(indices, free, dum)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def from_components_free_dum(components, free, dum):
index_types = []
for component in components:
index_types.extend(component.index_types)
indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types)
return _IndexStructure(free, dum, index_types, indices)
@staticmethod
def _free_dum_from_indices(*indices):
"""
Convert ``indices`` into ``free``, ``dum`` for single component tensor
``free`` list of tuples ``(index, pos, 0)``,
where ``pos`` is the position of index in
the list of indices formed by the component tensors
``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \
_IndexStructure
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz)
>>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3)
([(m0, 0), (m3, 3)], [(1, 2)])
"""
n = len(indices)
if n == 1:
return [(indices[0], 0)], []
# find the positions of the free indices and of the dummy indices
free = [True]*len(indices)
index_dict = {}
dum = []
for i, index in enumerate(indices):
name = index.name
typ = index.tensor_index_type
contr = index.is_up
if (name, typ) in index_dict:
# found a pair of dummy indices
is_contr, pos = index_dict[(name, typ)]
# check consistency and update free
if is_contr:
if contr:
raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i))
else:
free[pos] = False
free[i] = False
else:
if contr:
free[pos] = False
free[i] = False
else:
raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i))
if contr:
dum.append((i, pos))
else:
dum.append((pos, i))
else:
index_dict[(name, typ)] = index.is_up, i
free = [(index, i) for i, index in enumerate(indices) if free[i]]
free.sort()
return free, dum
def get_indices(self):
"""
Get a list of indices, creating new tensor indices to complete dummy indices.
"""
return self.indices[:]
@staticmethod
def generate_indices_from_free_dum_index_types(free, dum, index_types):
indices = [None]*(len(free)+2*len(dum))
for idx, pos in free:
indices[pos] = idx
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for pos1, pos2 in dum:
typ1 = index_types[pos1]
indname = generate_dummy_name(typ1)
indices[pos1] = TensorIndex(indname, typ1, True)
indices[pos2] = TensorIndex(indname, typ1, False)
return _IndexStructure._replace_dummy_names(indices, free, dum)
@staticmethod
def _get_generator_for_dummy_indices(free):
cdt = defaultdict(int)
# if the free indices have names with dummy_name, start with an
# index higher than those for the dummy indices
# to avoid name collisions
for indx, ipos in free:
if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name:
cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
return dummy_name_gen
@staticmethod
def _replace_dummy_names(indices, free, dum):
dum.sort(key=lambda x: x[0])
new_indices = [ind for ind in indices]
assert len(indices) == len(free) + 2*len(dum)
generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free)
for ipos1, ipos2 in dum:
typ1 = new_indices[ipos1].tensor_index_type
indname = generate_dummy_name(typ1)
new_indices[ipos1] = TensorIndex(indname, typ1, True)
new_indices[ipos2] = TensorIndex(indname, typ1, False)
return new_indices
def get_free_indices(self):
"""
Get a list of free indices.
"""
# get sorted indices according to their position:
free = sorted(self.free, key=lambda x: x[1])
return [i[0] for i in free]
def __str__(self):
return "_IndexStructure({0}, {1}, {2})".format(self.free, self.dum, self.index_types)
def __repr__(self):
return self.__str__()
def _get_sorted_free_indices_for_canon(self):
sorted_free = self.free[:]
sorted_free.sort(key=lambda x: x[0])
return sorted_free
def _get_sorted_dum_indices_for_canon(self):
return sorted(self.dum, key=lambda x: x[0])
def _get_lexicographically_sorted_index_types(self):
permutation = self.indices_canon_args()[0]
index_types = [None]*self._ext_rank
for i, it in enumerate(self.index_types):
index_types[permutation(i)] = it
return index_types
def _get_lexicographically_sorted_indices(self):
permutation = self.indices_canon_args()[0]
indices = [None]*self._ext_rank
for i, it in enumerate(self.indices):
indices[permutation(i)] = it
return indices
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``
``g`` permutation corresponding to the tensor in the representation
used in canonicalization
``is_canon_bp`` if True, then ``g`` is the permutation
corresponding to the canonical form of the tensor
"""
sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()]
lex_index_types = self._get_lexicographically_sorted_index_types()
lex_indices = self._get_lexicographically_sorted_indices()
nfree = len(sorted_free)
rank = self._ext_rank
dum = [[None]*2 for i in range((rank - nfree)//2)]
free = []
index_types = [None]*rank
indices = [None]*rank
for i in range(rank):
gi = g[i]
index_types[i] = lex_index_types[gi]
indices[i] = lex_indices[gi]
if gi < nfree:
ind = sorted_free[gi]
assert index_types[i] == sorted_free[gi].tensor_index_type
free.append((ind, i))
else:
j = gi - nfree
idum, cov = divmod(j, 2)
if cov:
dum[idum][1] = i
else:
dum[idum][0] = i
dum = [tuple(x) for x in dum]
return _IndexStructure(free, dum, index_types, indices)
def indices_canon_args(self):
"""
Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize``
see ``canonicalize`` in ``tensor_can.py`` in combinatorics module
"""
# to be called after sorted_components
from sympy.combinatorics.permutations import _af_new
n = self._ext_rank
g = [None]*n + [n, n+1]
# Converts the symmetry of the metric into msym from .canonicalize()
# method in the combinatorics module
def metric_symmetry_to_msym(metric):
if metric is None:
return None
sym = metric.symmetry
if sym == TensorSymmetry.fully_symmetric(2):
return 0
if sym == TensorSymmetry.fully_symmetric(-2):
return 1
return None
# ordered indices: first the free indices, ordered by types
# then the dummy indices, ordered by types and contravariant before
# covariant
# g[position in tensor] = position in ordered indices
for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()):
g[ipos] = i
pos = len(self.free)
j = len(self.free)
dummies = []
prev = None
a = []
msym = []
for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon():
g[ipos1] = j
g[ipos2] = j + 1
j += 2
typ = self.index_types[ipos1]
if typ != prev:
if a:
dummies.append(a)
a = [pos, pos + 1]
prev = typ
msym.append(metric_symmetry_to_msym(typ.metric))
else:
a.extend([pos, pos + 1])
pos += 2
if a:
dummies.append(a)
return _af_new(g), dummies, msym
def components_canon_args(components):
numtyp = []
prev = None
for t in components:
if t == prev:
numtyp[-1][1] += 1
else:
prev = t
numtyp.append([prev, 1])
v = []
for h, n in numtyp:
if h.comm == 0 or h.comm == 1:
comm = h.comm
else:
comm = TensorManager.get_comm(h.comm, h.comm)
v.append((h.symmetry.base, h.symmetry.generators, n, comm))
return v
class _TensorDataLazyEvaluator(CantSympify):
"""
EXPERIMENTAL: do not rely on this class, it may change without deprecation
warnings in future versions of SymPy.
This object contains the logic to associate components data to a tensor
expression. Components data are set via the ``.data`` property of tensor
expressions, is stored inside this class as a mapping between the tensor
expression and the ``ndarray``.
Computations are executed lazily: whereas the tensor expressions can have
contractions, tensor products, and additions, components data are not
computed until they are accessed by reading the ``.data`` property
associated to the tensor expression.
"""
_substitutions_dict = dict()
_substitutions_dict_tensmul = dict()
def __getitem__(self, key):
dat = self._get(key)
if dat is None:
return None
from .array import NDimArray
if not isinstance(dat, NDimArray):
return dat
if dat.rank() == 0:
return dat[()]
elif dat.rank() == 1 and len(dat) == 1:
return dat[0]
return dat
def _get(self, key):
"""
Retrieve ``data`` associated with ``key``.
This algorithm looks into ``self._substitutions_dict`` for all
``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a
TensorHead instance). It reconstructs the components data that the
tensor expression should have by performing on components data the
operations that correspond to the abstract tensor operations applied.
Metric tensor is handled in a different manner: it is pre-computed in
``self._substitutions_dict_tensmul``.
"""
if key in self._substitutions_dict:
return self._substitutions_dict[key]
if isinstance(key, TensorHead):
return None
if isinstance(key, Tensor):
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in key.get_indices()])
srch = (key.component,) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
array_list = [self.data_from_tensor(key)]
return self.data_contract_dum(array_list, key.dum, key.ext_rank)
if isinstance(key, TensMul):
tensmul_args = key.args
if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1:
# special case to handle metrics. Metric tensors cannot be
# constructed through contraction by the metric, their
# components show if they are a matrix or its inverse.
signature = tuple([i.is_up for i in tensmul_args[0].get_indices()])
srch = (tensmul_args[0].components[0],) + signature
if srch in self._substitutions_dict_tensmul:
return self._substitutions_dict_tensmul[srch]
#data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)]
data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)]
coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)])
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank)
return coeff*data_result
if isinstance(key, TensAdd):
data_list = []
free_args_list = []
for arg in key.args:
if isinstance(arg, TensExpr):
data_list.append(arg.data)
free_args_list.append([x[0] for x in arg.free])
else:
data_list.append(arg)
free_args_list.append([])
if all([i is None for i in data_list]):
return None
if any([i is None for i in data_list]):
raise ValueError("Mixing tensors with associated components "\
"data with tensors without components data")
sum_list = []
from .array import permutedims
for data, free_args in zip(data_list, free_args_list):
if len(free_args) < 2:
sum_list.append(data)
else:
free_args_pos = {y: x for x, y in enumerate(free_args)}
axes = [free_args_pos[arg] for arg in key.free_args]
sum_list.append(permutedims(data, axes))
return reduce(lambda x, y: x+y, sum_list)
return None
@staticmethod
def data_contract_dum(ndarray_list, dum, ext_rank):
from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray
arrays = list(map(MutableDenseNDimArray, ndarray_list))
prodarr = tensorproduct(*arrays)
return tensorcontraction(prodarr, *dum)
def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead):
"""
This method is used when assigning components data to a ``TensMul``
object, it converts components data to a fully contravariant ndarray,
which is then stored according to the ``TensorHead`` key.
"""
if data is None:
return None
return self._correct_signature_from_indices(
data,
tensmul.get_indices(),
tensmul.free,
tensmul.dum,
True)
def data_from_tensor(self, tensor):
"""
This method corrects the components data to the right signature
(covariant/contravariant) using the metric associated with each
``TensorIndexType``.
"""
tensorhead = tensor.component
if tensorhead.data is None:
return None
return self._correct_signature_from_indices(
tensorhead.data,
tensor.get_indices(),
tensor.free,
tensor.dum)
def _assign_data_to_tensor_expr(self, key, data):
if isinstance(key, TensAdd):
raise ValueError('cannot assign data to TensAdd')
# here it is assumed that `key` is a `TensMul` instance.
if len(key.components) != 1:
raise ValueError('cannot assign data to TensMul with multiple components')
tensorhead = key.components[0]
newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead)
return tensorhead, newdata
def _check_permutations_on_data(self, tens, data):
from .array import permutedims
from .array.arrayop import Flatten
if isinstance(tens, TensorHead):
rank = tens.rank
generators = tens.symmetry.generators
elif isinstance(tens, Tensor):
rank = tens.rank
generators = tens.components[0].symmetry.generators
elif isinstance(tens, TensorIndexType):
rank = tens.metric.rank
generators = tens.metric.symmetry.generators
# Every generator is a permutation, check that by permuting the array
# by that permutation, the array will be the same, except for a
# possible sign change if the permutation admits it.
for gener in generators:
sign_change = +1 if (gener(rank) == rank) else -1
data_swapped = data
last_data = data
permute_axes = list(map(gener, list(range(rank))))
# the order of a permutation is the number of times to get the
# identity by applying that permutation.
for i in range(gener.order()-1):
data_swapped = permutedims(data_swapped, permute_axes)
# if any value in the difference array is non-zero, raise an error:
if any(Flatten(last_data - sign_change*data_swapped)):
raise ValueError("Component data symmetry structure error")
last_data = data_swapped
def __setitem__(self, key, value):
"""
Set the components data of a tensor object/expression.
Components data are transformed to the all-contravariant form and stored
with the corresponding ``TensorHead`` object. If a ``TensorHead`` object
cannot be uniquely identified, it will raise an error.
"""
data = _TensorDataLazyEvaluator.parse_data(value)
self._check_permutations_on_data(key, data)
# TensorHead and TensorIndexType can be assigned data directly, while
# TensMul must first convert data to a fully contravariant form, and
# assign it to its corresponding TensorHead single component.
if not isinstance(key, (TensorHead, TensorIndexType)):
key, data = self._assign_data_to_tensor_expr(key, data)
if isinstance(key, TensorHead):
for dim, indextype in zip(data.shape, key.index_types):
if indextype.data is None:
raise ValueError("index type {} has no components data"\
" associated (needed to raise/lower index)".format(indextype))
if not indextype.dim.is_number:
continue
if dim != indextype.dim:
raise ValueError("wrong dimension of ndarray")
self._substitutions_dict[key] = data
def __delitem__(self, key):
del self._substitutions_dict[key]
def __contains__(self, key):
return key in self._substitutions_dict
def add_metric_data(self, metric, data):
"""
Assign data to the ``metric`` tensor. The metric tensor behaves in an
anomalous way when raising and lowering indices.
A fully covariant metric is the inverse transpose of the fully
contravariant metric (it is meant matrix inverse). If the metric is
symmetric, the transpose is not necessary and mixed
covariant/contravariant metrics are Kronecker deltas.
"""
# hard assignment, data should not be added to `TensorHead` for metric:
# the problem with `TensorHead` is that the metric is anomalous, i.e.
# raising and lowering the index means considering the metric or its
# inverse, this is not the case for other tensors.
self._substitutions_dict_tensmul[metric, True, True] = data
inverse_transpose = self.inverse_transpose_matrix(data)
# in symmetric spaces, the transpose is the same as the original matrix,
# the full covariant metric tensor is the inverse transpose, so this
# code will be able to handle non-symmetric metrics.
self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose
# now mixed cases, these are identical to the unit matrix if the metric
# is symmetric.
m = data.tomatrix()
invt = inverse_transpose.tomatrix()
self._substitutions_dict_tensmul[metric, True, False] = m * invt
self._substitutions_dict_tensmul[metric, False, True] = invt * m
@staticmethod
def _flip_index_by_metric(data, metric, pos):
from .array import tensorproduct, tensorcontraction
mdim = metric.rank()
ddim = data.rank()
if pos == 0:
data = tensorcontraction(
tensorproduct(
metric,
data
),
(1, mdim+pos)
)
else:
data = tensorcontraction(
tensorproduct(
data,
metric
),
(pos, ddim)
)
return data
@staticmethod
def inverse_matrix(ndarray):
m = ndarray.tomatrix().inv()
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def inverse_transpose_matrix(ndarray):
m = ndarray.tomatrix().inv().T
return _TensorDataLazyEvaluator.parse_data(m)
@staticmethod
def _correct_signature_from_indices(data, indices, free, dum, inverse=False):
"""
Utility function to correct the values inside the components data
ndarray according to whether indices are covariant or contravariant.
It uses the metric matrix to lower values of covariant indices.
"""
# change the ndarray values according covariantness/contravariantness of the indices
# use the metric
for i, indx in enumerate(indices):
if not indx.is_up and not inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i)
elif not indx.is_up and inverse:
data = _TensorDataLazyEvaluator._flip_index_by_metric(
data,
_TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data),
i
)
return data
@staticmethod
def _sort_data_axes(old, new):
from .array import permutedims
new_data = old.data.copy()
old_free = [i[0] for i in old.free]
new_free = [i[0] for i in new.free]
for i in range(len(new_free)):
for j in range(i, len(old_free)):
if old_free[j] == new_free[i]:
old_free[i], old_free[j] = old_free[j], old_free[i]
new_data = permutedims(new_data, (i, j))
break
return new_data
@staticmethod
def add_rearrange_tensmul_parts(new_tensmul, old_tensmul):
def sorted_compo():
return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul)
_TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo()
@staticmethod
def parse_data(data):
"""
Transform ``data`` to array. The parameter ``data`` may
contain data in various formats, e.g. nested lists, sympy ``Matrix``,
and so on.
Examples
========
>>> from sympy.tensor.tensor import _TensorDataLazyEvaluator
>>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12])
[1, 3, -6, 12]
>>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]])
[[1, 2], [4, 7]]
"""
from .array import MutableDenseNDimArray
if not isinstance(data, MutableDenseNDimArray):
if len(data) == 2 and hasattr(data[0], '__call__'):
data = MutableDenseNDimArray(data[0], data[1])
else:
data = MutableDenseNDimArray(data)
return data
_tensor_data_substitution_dict = _TensorDataLazyEvaluator()
class _TensorManager(object):
"""
Class to manage tensor properties.
Notes
=====
Tensors belong to tensor commutation groups; each group has a label
``comm``; there are predefined labels:
``0`` tensors commuting with any other tensor
``1`` tensors anticommuting among themselves
``2`` tensors not commuting, apart with those with ``comm=0``
Other groups can be defined using ``set_comm``; tensors in those
groups commute with those with ``comm=0``; by default they
do not commute with any other group.
"""
def __init__(self):
self._comm_init()
def _comm_init(self):
self._comm = [{} for i in range(3)]
for i in range(3):
self._comm[0][i] = 0
self._comm[i][0] = 0
self._comm[1][1] = 1
self._comm[2][1] = None
self._comm[1][2] = None
self._comm_symbols2i = {0:0, 1:1, 2:2}
self._comm_i2symbol = {0:0, 1:1, 2:2}
@property
def comm(self):
return self._comm
def comm_symbols2i(self, i):
"""
get the commutation group number corresponding to ``i``
``i`` can be a symbol or a number or a string
If ``i`` is not already defined its commutation group number
is set.
"""
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
return n
return self._comm_symbols2i[i]
def comm_i2symbol(self, i):
"""
Returns the symbol corresponding to the commutation group number.
"""
return self._comm_i2symbol[i]
def set_comm(self, i, j, c):
"""
set the commutation parameter ``c`` for commutation groups ``i, j``
Parameters
==========
i, j : symbols representing commutation groups
c : group commutation number
Notes
=====
``i, j`` can be symbols, strings or numbers,
apart from ``0, 1`` and ``2`` which are reserved respectively
for commuting, anticommuting tensors and tensors not commuting
with any other group apart with the commuting tensors.
For the remaining cases, use this method to set the commutation rules;
by default ``c=None``.
The group commutation number ``c`` is assigned in correspondence
to the group commutation symbols; it can be
0 commuting
1 anticommuting
None no commutation property
Examples
========
``G`` and ``GH`` do not commute with themselves and commute with
each other; A is commuting.
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz')
>>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz)
>>> A = TensorHead('A', [Lorentz])
>>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm')
>>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm')
>>> TensorManager.set_comm('Gcomm', 'GHcomm', 0)
>>> (GH(i1)*G(i0)).canon_bp()
G(i0)*GH(i1)
>>> (G(i1)*G(i0)).canon_bp()
G(i1)*G(i0)
>>> (G(i1)*A(i0)).canon_bp()
A(i0)*G(i1)
"""
if c not in (0, 1, None):
raise ValueError('`c` can assume only the values 0, 1 or None')
if i not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[n][0] = 0
self._comm[0][n] = 0
self._comm_symbols2i[i] = n
self._comm_i2symbol[n] = i
if j not in self._comm_symbols2i:
n = len(self._comm)
self._comm.append({})
self._comm[0][n] = 0
self._comm[n][0] = 0
self._comm_symbols2i[j] = n
self._comm_i2symbol[n] = j
ni = self._comm_symbols2i[i]
nj = self._comm_symbols2i[j]
self._comm[ni][nj] = c
self._comm[nj][ni] = c
def set_comms(self, *args):
"""
set the commutation group numbers ``c`` for symbols ``i, j``
Parameters
==========
args : sequence of ``(i, j, c)``
"""
for i, j, c in args:
self.set_comm(i, j, c)
def get_comm(self, i, j):
"""
Return the commutation parameter for commutation group numbers ``i, j``
see ``_TensorManager.set_comm``
"""
return self._comm[i].get(j, 0 if i == 0 or j == 0 else None)
def clear(self):
"""
Clear the TensorManager.
"""
self._comm_init()
TensorManager = _TensorManager()
class TensorIndexType(Basic):
"""
A TensorIndexType is characterized by its name and its metric.
Parameters
==========
name : name of the tensor type
dummy_name : name of the head of dummy indices
dim : dimension, it can be a symbol or an integer or ``None``
eps_dim : dimension of the epsilon tensor
metric_symmetry : integer that denotes metric symmetry or `None` for no metirc
metric_name : string with the name of the metric tensor
Attributes
==========
``metric`` : the metric tensor
``delta`` : ``Kronecker delta``
``epsilon`` : the ``Levi-Civita epsilon`` tensor
``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis.
Notes
=====
The possible values of the `metric_symmetry` parameter are:
``1`` : metric tensor is fully symmetric
``0`` : metric tensor possesses no index symmetry
``-1`` : metric tensor is fully antisymmetric
``None``: there is no metric tensor (metric equals to `None`)
The metric is assumed to be symmetric by default. It can also be set
to a custom tensor by the `.set_metric()` method.
If there is a metric the metric is used to raise and lower indices.
In the case of non-symmetric metric, the following raising and
lowering conventions will be adopted:
``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)``
From these it is easy to find:
``g(-a, b) = delta(-a, b)``
where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta``
(see ``TensorIndex`` for the conventions on indices).
For antisymmetric metrics there is also the following equality:
``g(a, -b) = -delta(a, -b)``
If there is no metric it is not possible to raise or lower indices;
e.g. the index of the defining representation of ``SU(N)``
is 'covariant' and the conjugate representation is
'contravariant'; for ``N > 2`` they are linearly independent.
``eps_dim`` is by default equal to ``dim``, if the latter is an integer;
else it can be assigned (for use in naive dimensional regularization);
if ``eps_dim`` is not an integer ``epsilon`` is ``None``.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> Lorentz.metric
metric(Lorentz,Lorentz)
"""
def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None,
metric_symmetry=1, metric_name='metric', **kwargs):
if 'dummy_fmt' in kwargs:
SymPyDeprecationWarning(useinstead="dummy_name",
feature="dummy_fmt", issue=17517,
deprecated_since_version="1.5").warn()
dummy_name = kwargs.get('dummy_fmt')
if isinstance(name, string_types):
name = Symbol(name)
if dummy_name is None:
dummy_name = str(name)[0]
if isinstance(dummy_name, string_types):
dummy_name = Symbol(dummy_name)
if dim is None:
dim = Symbol("dim_" + dummy_name.name)
else:
dim = sympify(dim)
if eps_dim is None:
eps_dim = dim
else:
eps_dim = sympify(eps_dim)
metric_symmetry = sympify(metric_symmetry)
if isinstance(metric_name, string_types):
metric_name = Symbol(metric_name)
if 'metric' in kwargs:
SymPyDeprecationWarning(useinstead="metric_symmetry or .set_metric()",
feature="metric argument", issue=17517,
deprecated_since_version="1.5").warn()
metric = kwargs.get('metric')
if metric is not None:
if metric in (True, False, 0, 1):
metric_name = 'metric'
metric_antisym = metric
else:
metric_name = metric.name
metric_antisym = metric.antisym
if metric:
metric_symmetry = -1
else:
metric_symmetry = 1
obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim,
metric_symmetry, metric_name)
obj._autogenerated = []
return obj
@property
def name(self):
return self.args[0].name
@property
def dummy_name(self):
return self.args[1].name
@property
def dim(self):
return self.args[2]
@property
def eps_dim(self):
return self.args[3]
@memoize_property
def metric(self):
metric_symmetry = self.args[4]
metric_name = self.args[5]
if metric_symmetry is None:
return None
if metric_symmetry == 0:
symmetry = TensorSymmetry.no_symmetry(2)
elif metric_symmetry == 1:
symmetry = TensorSymmetry.fully_symmetric(2)
elif metric_symmetry == -1:
symmetry = TensorSymmetry.fully_symmetric(-2)
return TensorHead(metric_name, [self]*2, symmetry)
@memoize_property
def delta(self):
return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2))
@memoize_property
def epsilon(self):
if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)):
return None
symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim)
return TensorHead('Eps', [self]*self.eps_dim, symmetry)
def set_metric(self, tensor):
self._metric = tensor
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return self.name
__repr__ = __str__
# Everything below this line is deprecated
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# This assignment is a bit controversial, should metric components be assigned
# to the metric only or also to the TensorIndexType object? The advantage here
# is the ability to assign a 1D array and transform it to a 2D diagonal array.
from .array import MutableDenseNDimArray
data = _TensorDataLazyEvaluator.parse_data(data)
if data.rank() > 2:
raise ValueError("data have to be of rank 1 (diagonal metric) or 2.")
if data.rank() == 1:
if self.dim.is_number:
nda_dim = data.shape[0]
if nda_dim != self.dim:
raise ValueError("Dimension mismatch")
dim = data.shape[0]
newndarray = MutableDenseNDimArray.zeros(dim, dim)
for i, val in enumerate(data):
newndarray[i, i] = val
data = newndarray
dim1, dim2 = data.shape
if dim1 != dim2:
raise ValueError("Non-square matrix tensor.")
if self.dim.is_number:
if self.dim != dim1:
raise ValueError("Dimension mismatch")
_tensor_data_substitution_dict[self] = data
_tensor_data_substitution_dict.add_metric_data(self.metric, data)
delta = self.get_kronecker_delta()
i1 = TensorIndex('i1', self)
i2 = TensorIndex('i2', self)
delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1))
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_kronecker_delta(self):
sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
delta = TensorHead('KD', [self]*2, sym2)
return delta
@deprecated(useinstead=".delta", issue=17517,
deprecated_since_version="1.5")
def get_epsilon(self):
if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)):
return None
sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1))
epsilon = TensorHead('Eps', [self]*self._eps_dim, sym)
return epsilon
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
This destroys components data associated to the ``TensorIndexType``, if
any, specifically:
* metric tensor data
* Kronecker tensor data
"""
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def delete_tensmul_data(key):
if key in _tensor_data_substitution_dict._substitutions_dict_tensmul:
del _tensor_data_substitution_dict._substitutions_dict_tensmul[key]
# delete metric data:
delete_tensmul_data((self.metric, True, True))
delete_tensmul_data((self.metric, True, False))
delete_tensmul_data((self.metric, False, True))
delete_tensmul_data((self.metric, False, False))
# delete delta tensor data:
delta = self.get_kronecker_delta()
if delta in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[delta]
class TensorIndex(Basic):
"""
Represents a tensor index
Parameters
==========
name : name of the index, or ``True`` if you want it to be automatically assigned
tensor_index_type : ``TensorIndexType`` of the index
is_up : flag for contravariant index (is_up=True by default)
Attributes
==========
``name``
``tensor_index_type``
``is_up``
Notes
=====
Tensor indices are contracted with the Einstein summation convention.
An index can be in contravariant or in covariant form; in the latter
case it is represented prepending a ``-`` to the index name. Adding
``-`` to a covariant (is_up=False) index makes it contravariant.
Dummy indices have a name with head given by
``tensor_inde_type.dummy_name`` with underscore and a number.
Similar to ``symbols`` multiple contravariant indices can be created
at once using ``tensor_indices(s, typ)``, where ``s`` is a string
of names.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> mu = TensorIndex('mu', Lorentz, is_up=False)
>>> nu, rho = tensor_indices('nu, rho', Lorentz)
>>> A = TensorHead('A', [Lorentz, Lorentz])
>>> A(mu, nu)
A(-mu, nu)
>>> A(-mu, -rho)
A(mu, -rho)
>>> A(mu, -mu)
A(-L_0, L_0)
"""
def __new__(cls, name, tensor_index_type, is_up=True):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
elif name is True:
name = "_i{0}".format(len(tensor_index_type._autogenerated))
name_symbol = Symbol(name)
tensor_index_type._autogenerated.append(name_symbol)
else:
raise ValueError("invalid name")
is_up = sympify(is_up)
return Basic.__new__(cls, name_symbol, tensor_index_type, is_up)
@property
def name(self):
return self.args[0].name
@property
def tensor_index_type(self):
return self.args[1]
@property
def is_up(self):
return self.args[2]
def _print(self):
s = self.name
if not self.is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return ((self.tensor_index_type, self.name) <
(other.tensor_index_type, other.name))
def __neg__(self):
t1 = TensorIndex(self.name, self.tensor_index_type,
(not self.is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, string_types):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
tilist = [TensorIndex(i, typ) for i in a]
if len(tilist) == 1:
return tilist[0]
return tilist
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric
index permutation). For the relevant terminology see ``tensor_can.py``
section of the combinatorics module.
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor
(i.e., Bianchi identity), are not covered. See combinatorics module for
information on how to generate BSGS for a general index permutation group.
Simple symmetries can be generated using built-in methods.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor of rank 2
>>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> sym = TensorSymmetry(get_symmetric_group_sgs(2))
>>> T = TensorHead('T', [Lorentz]*2, sym)
Note, that the same can also be done using built-in TensorSymmetry methods
>>> sym2 = TensorSymmetry.fully_symmetric(2)
>>> sym == sym2
True
"""
def __new__(cls, *args, **kw_args):
if len(args) == 1:
base, generators = args[0]
elif len(args) == 2:
base, generators = args
else:
raise TypeError("bsgs required, either two separate parameters or one tuple")
if not isinstance(base, Tuple):
base = Tuple(*base)
if not isinstance(generators, Tuple):
generators = Tuple(*generators)
return Basic.__new__(cls, base, generators, **kw_args)
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.generators[0].size - 2
@classmethod
def fully_symmetric(cls, rank):
"""
Returns a fully symmetric (antisymmetric if ``rank``<0)
TensorSymmetry object for ``abs(rank)`` indices.
"""
if rank > 0:
bsgs = get_symmetric_group_sgs(rank, False)
elif rank < 0:
bsgs = get_symmetric_group_sgs(-rank, True)
elif rank == 0:
bsgs = ([], [Permutation(1)])
return TensorSymmetry(bsgs)
@classmethod
def direct_product(cls, *args):
"""
Returns a TensorSymmetry object that is being a direct product of
fully (anti-)symmetric index permutation groups.
Notes
=====
Some examples for different values of ``(*args)``:
``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)``
``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)``
``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)``
``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting
``(1, 1, 1)`` tensor with 3 indices without any symmetry
"""
base, sgs = [], [Permutation(1)]
for arg in args:
if arg > 0:
bsgs2 = get_symmetric_group_sgs(arg, False)
elif arg < 0:
bsgs2 = get_symmetric_group_sgs(-arg, True)
else:
continue
base, sgs = bsgs_direct_product(base, sgs, *bsgs2)
return TensorSymmetry(base, sgs)
@classmethod
def riemann(cls):
"""
Returns a monotorem symmetry of the Riemann tensor
"""
return TensorSymmetry(riemann_bsgs)
@classmethod
def no_symmetry(cls, rank):
"""
TensorSymmetry object for ``rank`` indices with no symmetry
"""
return TensorSymmetry([], [Permutation(rank+1)])
@deprecated(useinstead="TensorSymmetry class constructor and methods", issue=17108,
deprecated_since_version="1.5")
def tensorsymmetry(*args):
"""
Returns a ``TensorSymmetry`` object. This method is deprecated, use
``TensorSymmetry.direct_product()`` or ``.riemann()`` instead.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry(Tuple(), Tuple(Permutation(1)))
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry(Tuple(base, sgs))
class TensorType(Basic):
"""
Class of tensor types. Deprecated, use tensor_heads() instead.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
deprecate_TensorType()
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' % ([str(x) for x in self.index_types])
def __call__(self, s, comm=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
"""
if isinstance(s, string_types):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self.index_types, self.symmetry, comm)
else:
return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names]
@deprecated(useinstead="TensorHead class constructor or tensor_heads()",
issue=17108, deprecated_since_version="1.5")
def tensorhead(name, typ, sym=None, comm=0):
"""
Function generating tensorhead(s). This method is deprecated,
use TensorHead constructor or tensor_heads() instead.
Parameters
==========
name : name or sequence of names (as in ``symbols``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
"""
if sym is None:
sym = [[1] for i in range(len(typ))]
sym = tensorsymmetry(*sym)
return TensorHead(name, typ, sym, comm)
class TensorHead(Basic):
"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
index_types : list of TensorIndexType
symmetry : TensorSymmetry of the tensor
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank`` : total number of indices
``symmetry``
``comm`` : commutation group
Notes
=====
Similar to ``symbols`` multiple TensorHeads can be created using
``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s``
is the string of names and ``sym`` is the monoterm tensor symmetry
(see ``tensorsymmetry``).
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
Define a fully antisymmetric tensor of rank 2:
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> asym2 = TensorSymmetry.fully_symmetric(-2)
>>> A = TensorHead('A', [Lorentz, Lorentz], asym2)
Examples with ndarray values, the components data assigned to the
``TensorHead`` object are assumed to be in a fully-contravariant
representation. In case it is necessary to assign components data which
represents the values of a non-fully covariant tensor, see the other
examples.
>>> from sympy.tensor.tensor import tensor_indices
>>> from sympy import diag
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i0, i1 = tensor_indices('i0:2', Lorentz)
Specify a replacement dictionary to keep track of the arrays to use for
replacements in the tensorial expression. The ``TensorIndexType`` is
associated to the metric used for contractions (in fully covariant form):
>>> repl = {Lorentz: diag(1, -1, -1, -1)}
Let's see some examples of working with components with the electromagnetic
tensor:
>>> from sympy import symbols
>>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
>>> c = symbols('c', positive=True)
Let's define `F`, an antisymmetric tensor:
>>> F = TensorHead('F', [Lorentz, Lorentz], asym2)
Let's update the dictionary to contain the matrix to use in the
replacements:
>>> repl.update({F(-i0, -i1): [
... [0, Ex/c, Ey/c, Ez/c],
... [-Ex/c, 0, -Bz, By],
... [-Ey/c, Bz, 0, -Bx],
... [-Ez/c, -By, Bx, 0]]})
Now it is possible to retrieve the contravariant form of the Electromagnetic
tensor:
>>> F(i0, i1).replace_with_arrays(repl, [i0, i1])
[[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]]
and the mixed contravariant-covariant form:
>>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1])
[[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]]
Energy-momentum of a particle may be represented as:
>>> from sympy import symbols
>>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1))
>>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True)
>>> repl.update({P(i0): [E, px, py, pz]})
The contravariant and covariant components are, respectively:
>>> P(i0).replace_with_arrays(repl, [i0])
[E, p_x, p_y, p_z]
>>> P(-i0).replace_with_arrays(repl, [-i0])
[E, -p_x, -p_y, -p_z]
The contraction of a 1-index tensor by itself:
>>> expr = P(i0)*P(-i0)
>>> expr.replace_with_arrays(repl, [])
E**2 - p_x**2 - p_y**2 - p_z**2
"""
is_commutative = False
def __new__(cls, name, index_types, symmetry=None, comm=0):
if isinstance(name, string_types):
name_symbol = Symbol(name)
elif isinstance(name, Symbol):
name_symbol = name
else:
raise ValueError("invalid name")
if symmetry is None:
symmetry = TensorSymmetry.no_symmetry(len(index_types))
else:
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry)
obj.comm = TensorManager.comm_symbols2i(comm)
return obj
@property
def name(self):
return self.args[0].name
@property
def index_types(self):
return list(self.args[1])
@property
def symmetry(self):
return self.args[2]
@property
def rank(self):
return len(self.index_types)
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def commutes_with(self, other):
"""
Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute.
Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute.
"""
r = TensorManager.get_comm(self.comm, other.comm)
return r
def _print(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def __call__(self, *indices, **kw_args):
"""
Returns a tensor with indices.
There is a special behavior in case of indices denoted by ``True``,
they are considered auto-matrix indices, their slots are automatically
filled, and confer to the tensor the behavior of a matrix or vector
upon multiplication with another tensor containing auto-matrix indices
of the same ``TensorIndexType``. This means indices get summed over the
same way as in matrix multiplication. For matrix behavior, define two
auto-matrix indices, for vector behavior define just one.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
>>> t = A(a, -b)
>>> t
A(a, -b)
"""
tensor = Tensor(self, indices, **kw_args)
return tensor.doit()
# Everything below this line is deprecated
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power on abstract tensors.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
metrics = [_.data for _ in self.index_types]
marray = self.data
marraydim = marray.rank()
for metric in metrics:
marray = tensorproduct(marray, metric, marray)
marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2))
return marray ** (other * S.Half)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
return self.data.__iter__()
def _components_data_full_destroy(self):
"""
EXPERIMENTAL: do not rely on this API method.
Destroy components data associated to the ``TensorHead`` object, this
checks for attached components data, and destroys components data too.
"""
# do not garbage collect Kronecker tensor (it should be done by
# ``TensorIndexType`` garbage collection)
deprecate_data()
if self.name == "KD":
return
# the data attached to a tensor must be deleted only by the TensorHead
# destructor. If the TensorHead is deleted, it means that there are no
# more instances of that tensor anywhere.
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def tensor_heads(s, index_types, symmetry=None, comm=0):
"""
Returns a sequence of TensorHeads from a string `s`
"""
if isinstance(s, string_types):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
thlist = [TensorHead(name, index_types, symmetry, comm) for name in names]
if len(thlist) == 1:
return thlist[0]
return thlist
class TensExpr(Expr):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position
of the component tensor with contravariant index, ``ipos1`` is the
slot which the index occupies in that component tensor.
Contracted indices are therefore nameless in the internal representation.
"""
_op_priority = 12.0
is_commutative = False
def __neg__(self):
return self*S.NegativeOne
def __abs__(self):
raise NotImplementedError
def __add__(self, other):
return TensAdd(self, other).doit()
def __radd__(self, other):
return TensAdd(other, self).doit()
def __sub__(self, other):
return TensAdd(self, -other).doit()
def __rsub__(self, other):
return TensAdd(other, -self).doit()
def __mul__(self, other):
"""
Multiply two tensors using Einstein summation convention.
If the two tensors have an index in common, one contravariant
and the other covariant, in their product the indices are summed
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t1 = p(m0)
>>> t2 = q(-m0)
>>> t1*t2
p(L_0)*q(-L_0)
"""
return TensMul(self, other).doit()
def __rmul__(self, other):
return TensMul(other, self).doit()
def __div__(self, other):
other = _sympify(other)
if isinstance(other, TensExpr):
raise ValueError('cannot divide by a tensor')
return TensMul(self, S.One/other).doit()
def __rdiv__(self, other):
raise ValueError('cannot divide by a tensor')
def __pow__(self, other):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
if self.data is None:
raise ValueError("No power without ndarray data.")
deprecate_data()
from .array import tensorproduct, tensorcontraction
free = self.free
marray = self.data
mdim = marray.rank()
for metric in free:
marray = tensorcontraction(
tensorproduct(
marray,
metric[0].tensor_index_type.data,
marray),
(0, mdim), (mdim+1, mdim+2)
)
return marray ** (other * S.Half)
def __rpow__(self, other):
raise NotImplementedError
__truediv__ = __div__
__rtruediv__ = __rdiv__
def fun_eval(self, *index_tuples):
deprecate_fun_eval()
return self.substitute_indices(*index_tuples)
def get_matrix(self):
"""
DEPRECATED: do not use.
Returns ndarray components data as a matrix, if components data are
available and ndarray dimension does not exceed 2.
"""
from sympy import Matrix
deprecate_data()
if 0 < self.rank <= 2:
rows = self.data.shape[0]
columns = self.data.shape[1] if self.rank == 2 else 1
if self.rank == 2:
mat_list = [] * rows
for i in range(rows):
mat_list.append([])
for j in range(columns):
mat_list[i].append(self[i, j])
else:
mat_list = [None] * rows
for i in range(rows):
mat_list[i] = self[i]
return Matrix(mat_list)
else:
raise NotImplementedError(
"missing multidimensional reduction to matrix.")
@staticmethod
def _get_indices_permutation(indices1, indices2):
return [indices1.index(i) for i in indices2]
def expand(self, **hints):
return _expand(self, **hints).doit()
def _expand(self, **kwargs):
return self
def _get_free_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_free_indices_set())
return indset
def _get_dummy_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_dummy_indices_set())
return indset
def _get_indices_set(self):
indset = set([])
for arg in self.args:
if isinstance(arg, TensExpr):
indset.update(arg._get_indices_set())
return indset
@property
def _iterate_dummy_indices(self):
dummy_set = self._get_dummy_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in dummy_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@property
def _iterate_free_indices(self):
free_set = self._get_free_indices_set()
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
if expr in free_set:
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@property
def _iterate_indices(self):
def recursor(expr, pos):
if isinstance(expr, TensorIndex):
yield (expr, pos)
elif isinstance(expr, (Tuple, TensExpr)):
for p, arg in enumerate(expr.args):
for i in recursor(arg, pos+(p,)):
yield i
return recursor(self, ())
@staticmethod
def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict):
from .array import tensorcontraction, tensorproduct, permutedims
index_types1 = [i.tensor_index_type for i in free_ind1]
# Check if variance of indices needs to be fixed:
pos2up = []
pos2down = []
free2remaining = free_ind2[:]
for pos1, index1 in enumerate(free_ind1):
if index1 in free2remaining:
pos2 = free2remaining.index(index1)
free2remaining[pos2] = None
continue
if -index1 in free2remaining:
pos2 = free2remaining.index(-index1)
free2remaining[pos2] = None
free_ind2[pos2] = index1
if index1.is_up:
pos2up.append(pos2)
else:
pos2down.append(pos2)
else:
index2 = free2remaining[pos1]
if index2 is None:
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
free2remaining[pos1] = None
free_ind2[pos1] = index1
if index1.is_up ^ index2.is_up:
if index1.is_up:
pos2up.append(pos1)
else:
pos2down.append(pos1)
if len(set(free_ind1) & set(free_ind2)) < len(free_ind1):
raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2))
# TODO: add possibility of metric after (spinors)
def contract_and_permute(metric, array, pos):
array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos))
permu = list(range(len(free_ind1)))
permu[0], permu[pos] = permu[pos], permu[0]
return permutedims(array, permu)
# Raise indices:
for pos in pos2up:
metric = replacement_dict[index_types1[pos]]
metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric)
array = contract_and_permute(metric_inverse, array, pos)
# Lower indices:
for pos in pos2down:
metric = replacement_dict[index_types1[pos]]
array = contract_and_permute(metric, array, pos)
if free_ind1:
permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1)
array = permutedims(array, permutation)
if hasattr(array, "rank") and array.rank() == 0:
array = array[()]
return free_ind2, array
def replace_with_arrays(self, replacement_dict, indices=None):
"""
Replace the tensorial expressions with arrays. The final array will
correspond to the N-dimensional array with indices arranged according
to ``indices``.
Parameters
==========
replacement_dict
dictionary containing the replacement rules for tensors.
indices
the index order with respect to which the array is read. The
original index order will be used if no value is passed.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> from sympy.tensor.tensor import TensorHead
>>> from sympy import symbols, diag
>>> L = TensorIndexType("L")
>>> i, j = tensor_indices("i j", L)
>>> A = TensorHead("A", [L])
>>> A(i).replace_with_arrays({A(i): [1, 2]}, [i])
[1, 2]
Since 'indices' is optional, we can also call replace_with_arrays by
this way if no specific index order is needed:
>>> A(i).replace_with_arrays({A(i): [1, 2]})
[1, 2]
>>> expr = A(i)*A(j)
>>> expr.replace_with_arrays({A(i): [1, 2]})
[[1, 2], [2, 4]]
For contractions, specify the metric of the ``TensorIndexType``, which
in this case is ``L``, in its covariant form:
>>> expr = A(i)*A(-i)
>>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)})
-3
Symmetrization of an array:
>>> H = TensorHead("H", [L, L])
>>> a, b, c, d = symbols("a b c d")
>>> expr = H(i, j)/2 + H(j, i)/2
>>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]})
[[a, b/2 + c/2], [b/2 + c/2, d]]
Anti-symmetrization of an array:
>>> expr = H(i, j)/2 - H(j, i)/2
>>> repl = {H(i, j): [[a, b], [c, d]]}
>>> expr.replace_with_arrays(repl)
[[0, b/2 - c/2], [-b/2 + c/2, 0]]
The same expression can be read as the transpose by inverting ``i`` and
``j``:
>>> expr.replace_with_arrays(repl, [j, i])
[[0, -b/2 + c/2], [b/2 - c/2, 0]]
"""
from .array import Array
indices = indices or []
replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()}
# Check dimensions of replaced arrays:
for tensor, array in replacement_dict.items():
if isinstance(tensor, TensorIndexType):
expected_shape = [tensor.dim for i in range(2)]
else:
expected_shape = [index_type.dim for index_type in tensor.index_types]
if len(expected_shape) != array.rank() or (not all([dim1 == dim2 if
dim1.is_number else True for dim1, dim2 in zip(expected_shape,
array.shape)])):
raise ValueError("shapes for tensor %s expected to be %s, "\
"replacement array shape is %s" % (tensor, expected_shape,
array.shape))
ret_indices, array = self._extract_data(replacement_dict)
last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict)
#permutation = self._get_indices_permutation(indices, ret_indices)
#if not hasattr(array, "rank"):
#return array
#if array.rank() == 0:
#array = array[()]
#return array
#array = permutedims(array, permutation)
return array
def _check_add_Sum(self, expr, index_symbols):
from sympy import Sum
indices = self.get_indices()
dum = self.dum
sum_indices = [ (index_symbols[i], 0,
indices[i].tensor_index_type.dim-1) for i, j in dum]
if sum_indices:
expr = Sum(expr, *sum_indices)
return expr
class TensAdd(TensExpr, AssocOp):
"""
Sum of tensors
Parameters
==========
free_args : list of the free indices
Attributes
==========
``args`` : tuple of addends
``rank`` : rank of the tensor
``free_args`` : list of the free indices in sorted order
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(a) + q(a); t
p(a) + q(a)
Examples with components data added to the tensor expression:
>>> from sympy import symbols, diag
>>> x, y, z, t = symbols("x y z t")
>>> repl = {}
>>> repl[Lorentz] = diag(1, -1, -1, -1)
>>> repl[p(a)] = [1, 2, 3, 4]
>>> repl[q(a)] = [x, y, z, t]
The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58
>>> expr = p(a) + q(a)
>>> expr.replace_with_arrays(repl, [a])
[x + 1, y + 2, z + 3, t + 4]
"""
def __new__(cls, *args, **kw_args):
args = [_sympify(x) for x in args if x]
args = TensAdd._tensAdd_flatten(args)
if not args:
return S.Zero
if len(args) == 1:
return args[0]
return Basic.__new__(cls, *args, **kw_args)
@memoize_property
def rank(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].rank
else:
return 0
@memoize_property
def free_args(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].free_args
else:
return []
@memoize_property
def free_indices(self):
if isinstance(self.args[0], TensExpr):
return self.args[0].free_indices
else:
return set()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
if not args:
return S.Zero
if len(args) == 1 and not isinstance(args[0], TensExpr):
return args[0]
# now check that all addends have the same indices:
TensAdd._tensAdd_check(args)
# if TensAdd has only 1 element in its `args`:
if len(args) == 1: # and isinstance(args[0], TensMul):
return args[0]
# Remove zeros:
args = [x for x in args if x]
# if there are no more args (i.e. have cancelled out),
# just return zero:
if not args:
return S.Zero
if len(args) == 1:
return args[0]
# Collect terms appearing more than once, differing by their coefficients:
args = TensAdd._tensAdd_collect_terms(args)
# collect canonicalized terms
def sort_key(t):
x = get_index_structure(t)
if not isinstance(t, TensExpr):
return ([], [], [])
return (t.components, x.free, x.dum)
args.sort(key=sort_key)
if not args:
return S.Zero
# it there is only a component tensor return it
if len(args) == 1:
return args[0]
obj = self.func(*args)
return obj
@staticmethod
def _tensAdd_flatten(args):
# flatten TensAdd, coerce terms which are not tensors to tensors
a = []
for x in args:
if isinstance(x, (Add, TensAdd)):
a.extend(list(x.args))
else:
a.append(x)
args = [x for x in a if x.coeff]
return args
@staticmethod
def _tensAdd_check(args):
# check that all addends have the same free indices
indices0 = set([x[0] for x in get_index_structure(args[0]).free])
list_indices = [set([y[0] for y in get_index_structure(x).free]) for x in args[1:]]
if not all(x == indices0 for x in list_indices):
raise ValueError('all tensors must have the same indices')
@staticmethod
def _tensAdd_collect_terms(args):
# collect TensMul terms differing at most by their coefficient
terms_dict = defaultdict(list)
scalars = S.Zero
if isinstance(args[0], TensExpr):
free_indices = set(args[0].get_free_indices())
else:
free_indices = set([])
for arg in args:
if not isinstance(arg, TensExpr):
if free_indices != set([]):
raise ValueError("wrong valence")
scalars += arg
continue
if free_indices != set(arg.get_free_indices()):
raise ValueError("wrong valence")
# TODO: what is the part which is not a coeff?
# needs an implementation similar to .as_coeff_Mul()
terms_dict[arg.nocoeff].append(arg.coeff)
new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0]
if isinstance(scalars, Add):
new_args = list(scalars.args) + new_args
elif scalars != 0:
new_args = [scalars] + new_args
return new_args
def get_indices(self):
indices = []
for arg in self.args:
indices.extend([i for i in get_indices(arg) if i not in indices])
return indices
def _expand(self, **hints):
return TensAdd(*[_expand(i, **hints) for i in self.args])
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
index_tuples = list(zip(free_args, indices))
a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args]
res = TensAdd(*a).doit()
return res
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
"""
expr = self.expand()
args = [canon_bp(x) for x in expr.args]
res = TensAdd(*args).doit()
return res
def equals(self, other):
other = _sympify(other)
if isinstance(other, TensMul) and other.coeff == 0:
return all(x.coeff == 0 for x in self.args)
if isinstance(other, TensExpr):
if self.rank != other.rank:
return False
if isinstance(other, TensAdd):
if set(self.args) != set(other.args):
return False
else:
return True
t = self - other
if not isinstance(t, TensExpr):
return t == 0
else:
if isinstance(t, TensMul):
return t.coeff == 0
else:
return all(x.coeff == 0 for x in t.args)
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def contract_delta(self, delta):
args = [x.contract_delta(delta) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
contract_all : if True, eliminate all ``g`` which are contracted
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
"""
args = [contract_metric(x, g) for x in self.args]
t = TensAdd(*args).doit()
return canon_bp(t)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensAdd(*new_args).doit()
def _print(self):
a = []
args = self.args
for x in args:
a.append(str(x))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _extract_data(self, replacement_dict):
from sympy.tensor.array import Array, permutedims
args_indices, arrays = zip(*[
arg._extract_data(replacement_dict) if
isinstance(arg, TensExpr) else ([], arg) for arg in self.args
])
arrays = [Array(i) for i in arrays]
ref_indices = args_indices[0]
for i in range(1, len(args_indices)):
indices = args_indices[i]
array = arrays[i]
permutation = TensMul._get_indices_permutation(indices, ref_indices)
arrays[i] = permutedims(array, permutation)
return ref_indices, sum(arrays, Array.zeros(*array.shape))
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self.expand()]
@data.setter
def data(self, data):
deprecate_data()
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
def __iter__(self):
deprecate_data()
if not self.data:
raise ValueError("No iteration on abstract tensors")
return self.data.flatten().__iter__()
def _eval_rewrite_as_Indexed(self, *args):
return Add.fromiter(args)
class Tensor(TensExpr):
"""
Base tensor class, i.e. this represents a tensor, the single unit to be
put into an expression.
This object is usually created from a ``TensorHead``, by attaching indices
to it. Indices preceded by a minus sign are considered contravariant,
otherwise covariant.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead
>>> Lorentz = TensorIndexType("Lorentz", dummy_name="L")
>>> mu, nu = tensor_indices('mu nu', Lorentz)
>>> A = TensorHead("A", [Lorentz, Lorentz])
>>> A(mu, -nu)
A(mu, -nu)
>>> A(mu, -mu)
A(L_0, -L_0)
It is also possible to use symbols instead of inidices (appropriate indices
are then generated automatically).
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> A(x, mu)
A(x, mu)
>>> A(x, -x)
A(L_0, -L_0)
"""
is_commutative = False
def __new__(cls, tensor_head, indices, **kw_args):
is_canon_bp = kw_args.pop('is_canon_bp', False)
indices = cls._parse_indices(tensor_head, indices)
obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args)
obj._index_structure = _IndexStructure.from_indices(*indices)
obj.free = obj._index_structure.free[:]
obj.dum = obj._index_structure.dum[:]
obj.ext_rank = obj._index_structure._ext_rank
obj.coeff = S.One
obj.nocoeff = obj
obj.component = tensor_head
obj.components = [tensor_head]
if tensor_head.rank != len(indices):
raise ValueError("wrong number of indices")
obj.is_canon_bp = is_canon_bp
obj._index_map = Tensor._build_index_map(indices, obj._index_structure)
return obj
@property
def head(self):
return self.args[0]
@property
def indices(self):
return self.args[1]
@property
def free_indices(self):
return set(self._index_structure.get_free_indices())
@property
def index_types(self):
return self.head.index_types
@property
def rank(self):
return len(self.free_indices)
@staticmethod
def _build_index_map(indices, index_structure):
index_map = {}
for idx in indices:
index_map[idx] = (indices.index(idx),)
return index_map
def doit(self, **kwargs):
args, indices, free, dum = TensMul._tensMul_contract_indices([self])
return args[0]
@staticmethod
def _parse_indices(tensor_head, indices):
if not isinstance(indices, (tuple, list, Tuple)):
raise TypeError("indices should be an array, got %s" % type(indices))
indices = list(indices)
for i, index in enumerate(indices):
if isinstance(index, Symbol):
indices[i] = TensorIndex(index, tensor_head.index_types[i], True)
elif isinstance(index, Mul):
c, e = index.as_coeff_Mul()
if c == -1 and isinstance(e, Symbol):
indices[i] = TensorIndex(e, tensor_head.index_types[i], False)
else:
raise ValueError("index not understood: %s" % index)
elif not isinstance(index, TensorIndex):
raise TypeError("wrong type for index: %s is %s" % (index, type(index)))
return indices
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
return self.func(self.args[0], indices, is_canon_bp=kw_args.pop('is_canon_bp', False)).doit()
def _get_free_indices_set(self):
return set([i[0] for i in self._index_structure.free])
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self._index_structure.dum))
return set(idx for i, idx in enumerate(self.args[1]) if i in dummy_pos)
def _get_indices_set(self):
return set(self.args[1].args)
@property
def free_in_args(self):
return [(ind, pos, 0) for ind, pos in self.free]
@property
def dum_in_args(self):
return [(p1, p2, 0, 0) for p1, p2 in self.dum]
@property
def free_args(self):
return sorted([x[0] for x in self.free])
def commutes_with(self, other):
"""
:param other:
:return:
0 commute
1 anticommute
None neither commute nor anticommute
"""
if not isinstance(other, TensExpr):
return 0
elif isinstance(other, Tensor):
return self.component.commutes_with(other.component)
return NotImplementedError
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp)
def canon_bp(self):
if self.is_canon_bp:
return self
expr = self.expand()
g, dummies, msym = expr._index_structure.indices_canon_args()
v = components_canon_args([expr.component])
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tensor = self.perm2tensor(can, True)
return tensor
def split(self):
return [self]
def _expand(self, **kwargs):
return self
def sorted_components(self):
return self
def get_indices(self):
"""
Get a list of indices, corresponding to those of the tensor.
"""
return list(self.args[1])
def get_free_indices(self):
"""
Get a list of free indices, corresponding to those of the tensor.
"""
return self._index_structure.get_free_indices()
def as_base_exp(self):
return self, S.One
def substitute_indices(self, *index_tuples):
"""
Return a tensor with free indices substituted according to ``index_tuples``
``index_types`` list of tuples ``(old_index, new_index)``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(i, k)*B(-k, -j); t
A(i, L_0)*B(-L_0, -j)
>>> t.substitute_indices((i, k),(-j, l))
A(k, L_0)*B(-L_0, l)
"""
indices = []
for index in self.indices:
for ind_old, ind_new in index_tuples:
if (index.name == ind_old.name and index.tensor_index_type ==
ind_old.tensor_index_type):
if index.is_up == ind_old.is_up:
indices.append(ind_new)
else:
indices.append(-ind_new)
break
else:
indices.append(index)
return self.head(*indices)
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
# TODO: put this into TensExpr?
def __iter__(self):
deprecate_data()
return self.data.__iter__()
# TODO: put this into TensExpr?
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _extract_data(self, replacement_dict):
from .array import Array
for k, v in replacement_dict.items():
if isinstance(k, Tensor) and k.args[0] == self.args[0]:
other = k
array = v
break
else:
raise ValueError("%s not found in %s" % (self, replacement_dict))
# TODO: inefficient, this should be done at root level only:
replacement_dict = {k: Array(v) for k, v in replacement_dict.items()}
array = Array(array)
dum1 = self.dum
dum2 = other.dum
if len(dum2) > 0:
for pair in dum2:
# allow `dum2` if the contained values are also in `dum1`.
if pair not in dum1:
raise NotImplementedError("%s with contractions is not implemented" % other)
# Remove elements in `dum2` from `dum1`:
dum1 = [pair for pair in dum1 if pair not in dum2]
if len(dum1) > 0:
indices2 = other.get_indices()
repl = {}
for p1, p2 in dum1:
repl[indices2[p2]] = -indices2[p1]
other = other.xreplace(repl).doit()
array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2))
free_ind1 = self.get_free_indices()
free_ind2 = other.get_free_indices()
return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict)
@property
def data(self):
deprecate_data()
return _tensor_data_substitution_dict[self]
@data.setter
def data(self, data):
deprecate_data()
# TODO: check data compatibility with properties of tensor.
_tensor_data_substitution_dict[self] = data
@data.deleter
def data(self):
deprecate_data()
if self in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self]
if self.metric in _tensor_data_substitution_dict:
del _tensor_data_substitution_dict[self.metric]
def _print(self):
indices = [str(ind) for ind in self.indices]
component = self.component
if component.rank > 0:
return ('%s(%s)' % (component.name, ', '.join(indices)))
else:
return ('%s' % component.name)
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return S.One == other
def _get_compar_comp(self):
t = self.canon_bp()
r = (t.coeff, tuple(t.components), \
tuple(sorted(t.free)), tuple(sorted(t.dum)))
return r
return _get_compar_comp(self) == _get_compar_comp(other)
def contract_metric(self, g):
# if metric is not the same, ignore this step:
if self.component != g:
return self
# in case there are free components, do not perform anything:
if len(self.free) != 0:
return self
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
sign = S.One
typ = g.index_types[0]
if not antisym:
# g(i, -i)
sign = sign*typ.dim
else:
# g(i, -i)
sign = sign*typ.dim
dp0, dp1 = self.dum[0]
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
return sign
def contract_delta(self, metric):
return self.contract_metric(metric)
def _eval_rewrite_as_Indexed(self, tens, indices):
from sympy import Indexed
# TODO: replace .args[0] with .name:
index_symbols = [i.args[0] for i in self.get_indices()]
expr = Indexed(tens.args[0], *index_symbols)
return self._check_add_Sum(expr, index_symbols)
class TensMul(TensExpr, AssocOp):
"""
Product of tensors
Parameters
==========
coeff : SymPy coefficient of the tensor
args
Attributes
==========
``components`` : list of ``TensorHead`` of the component tensors
``types`` : list of nonrepeated ``TensorIndexType``
``free`` : list of ``(ind, ipos, icomp)``, see Notes
``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes
``ext_rank`` : rank of the tensor counting the dummy indices
``rank`` : rank of the tensor
``coeff`` : SymPy coefficient of the tensor
``free_args`` : list of the free indices in sorted order
``is_canon_bp`` : ``True`` if the tensor in in canonical form
Notes
=====
``args[0]`` list of ``TensorHead`` of the component tensors.
``args[1]`` list of ``(ind, ipos, icomp)``
where ``ind`` is a free index, ``ipos`` is the slot position
of ``ind`` in the ``icomp``-th component tensor.
``args[2]`` list of tuples representing dummy indices.
``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant
dummy index is the ``ipos1``-th slot position in the ``icomp1``-th
component tensor; the corresponding covariant index is
in the ``ipos2`` slot position in the ``icomp2``-th component tensor.
"""
identity = S.One
def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
# Flatten:
args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])]
args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj.index_types = index_types[:]
obj._index_structure = index_structure
obj.free = index_structure.free[:]
obj.dum = index_structure.dum[:]
obj.free_indices = set([x[0] for x in obj.free])
obj.rank = len(obj.free)
obj.ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj.coeff = S.One
obj._is_canon_bp = is_canon_bp
return obj
@staticmethod
def _indices_to_free_dum(args_indices):
free2pos1 = {}
free2pos2 = {}
dummy_data = []
indices = []
# Notation for positions (to better understand the code):
# `pos1`: position in the `args`.
# `pos2`: position in the indices.
# Example:
# A(i, j)*B(k, m, n)*C(p)
# `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul).
# `pos2` of `n` is 4 because it's the fifth overall index.
# Counter for the index position wrt the whole expression:
pos2 = 0
for pos1, arg_indices in enumerate(args_indices):
for index_pos, index in enumerate(arg_indices):
if not isinstance(index, TensorIndex):
raise TypeError("expected TensorIndex")
if -index in free2pos1:
# Dummy index detected:
other_pos1 = free2pos1.pop(-index)
other_pos2 = free2pos2.pop(-index)
if index.is_up:
dummy_data.append((index, pos1, other_pos1, pos2, other_pos2))
else:
dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2))
indices.append(index)
elif index in free2pos1:
raise ValueError("Repeated index: %s" % index)
else:
free2pos1[index] = pos1
free2pos2[index] = pos2
indices.append(index)
pos2 += 1
free = [(i, p) for (i, p) in free2pos2.items()]
free_names = [i.name for i in free2pos2.keys()]
dummy_data.sort(key=lambda x: x[3])
return indices, free, free_names, dummy_data
@staticmethod
def _dummy_data_to_dum(dummy_data):
return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data]
@staticmethod
def _tensMul_contract_indices(args, replace_indices=True):
replacements = [{} for _ in args]
#_index_order = all([_has_index_order(arg) for arg in args])
args_indices = [get_indices(arg) for arg in args]
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
cdt = defaultdict(int)
def dummy_name_gen(tensor_index_type):
nd = str(cdt[tensor_index_type])
cdt[tensor_index_type] += 1
return tensor_index_type.dummy_name + '_' + nd
if replace_indices:
for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data:
index_type = old_index.tensor_index_type
while True:
dummy_name = dummy_name_gen(index_type)
if dummy_name not in free_names:
break
dummy = TensorIndex(dummy_name, index_type, True)
replacements[pos1cov][old_index] = dummy
replacements[pos1contra][-old_index] = -dummy
indices[pos2cov] = dummy
indices[pos2contra] = -dummy
args = [arg.xreplace(repl) for arg, repl in zip(args, replacements)]
dum = TensMul._dummy_data_to_dum(dummy_data)
return args, indices, free, dum
@staticmethod
def _get_components_from_args(args):
"""
Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied
by one another.
"""
components = []
for arg in args:
if not isinstance(arg, TensExpr):
continue
if isinstance(arg, TensAdd):
continue
components.extend(arg.components)
return components
@staticmethod
def _rebuild_tensors_list(args, index_structure):
indices = index_structure.get_indices()
#tensors = [None for i in components] # pre-allocate list
ind_pos = 0
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
prev_pos = ind_pos
ind_pos += arg.ext_rank
args[i] = Tensor(arg.component, indices[prev_pos:ind_pos])
def doit(self, **kwargs):
is_canon_bp = self._is_canon_bp
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
args = [arg for arg in args if arg != self.identity]
# Extract non-tensor coefficients:
coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One)
args = [arg for arg in args if isinstance(arg, TensExpr)]
if len(args) == 0:
return coeff
if coeff != self.identity:
args = [coeff] + args
if coeff == 0:
return S.Zero
if len(args) == 1:
return args[0]
args, indices, free, dum = TensMul._tensMul_contract_indices(args)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = self.func(*args)
obj._index_types = index_types
obj._index_structure = index_structure
obj.ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj.coeff = coeff
obj._is_canon_bp = is_canon_bp
return obj
# TODO: this method should be private
# TODO: should this method be renamed _from_components_free_dum ?
@staticmethod
def from_data(coeff, components, free, dum, **kw_args):
return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit()
@staticmethod
def _get_tensors_from_components_free_dum(components, free, dum):
"""
Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``.
"""
index_structure = _IndexStructure.from_components_free_dum(components, free, dum)
indices = index_structure.get_indices()
tensors = [None for i in components] # pre-allocate list
# distribute indices on components to build a list of tensors:
ind_pos = 0
for i, component in enumerate(components):
prev_pos = ind_pos
ind_pos += component.rank
tensors[i] = Tensor(component, indices[prev_pos:ind_pos])
return tensors
def _get_free_indices_set(self):
return set([i[0] for i in self.free])
def _get_dummy_indices_set(self):
dummy_pos = set(itertools.chain(*self.dum))
return set(idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos)
def _get_position_offset_for_indices(self):
arg_offset = [None for i in range(self.ext_rank)]
counter = 0
for i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
for j in range(arg.ext_rank):
arg_offset[j + counter] = counter
counter += arg.ext_rank
return arg_offset
@property
def free_args(self):
return sorted([x[0] for x in self.free])
@property
def components(self):
return self._get_components_from_args(self.args)
@property
def free_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free]
@property
def nocoeff(self):
return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit()
@property
def dum_in_args(self):
arg_offset = self._get_position_offset_for_indices()
argpos = self._get_indices_to_args_pos()
return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum]
def equals(self, other):
if other == 0:
return self.coeff == 0
other = _sympify(other)
if not isinstance(other, TensExpr):
assert not self.components
return self.coeff == other
return self.canon_bp() == other.canon_bp()
def get_indices(self):
"""
Returns the list of indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
The dummy indices are given a name which does not collide with
the names of the free indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_indices()
[L_0, -L_0, m2]
"""
return self._indices
def get_free_indices(self):
"""
Returns the list of free indices of the tensor
The indices are listed in the order in which they appear in the
component tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m1)*g(m0,m2)
>>> t.get_free_indices()
[m1, m0, m2]
>>> t2 = p(m1)*g(-m1, m2)
>>> t2.get_free_indices()
[m2]
"""
return self._index_structure.get_free_indices()
def split(self):
"""
Returns a list of tensors, whose product is ``self``
Dummy indices contracted among different tensor components
become free indices with the same name as the one used to
represent the dummy indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
>>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
>>> t = A(a,b)*B(-b,c)
>>> t
A(a, L_0)*B(-L_0, c)
>>> t.split()
[A(a, L_0), B(-L_0, c)]
"""
if self.args == ():
return [self]
splitp = []
res = 1
for arg in self.args:
if isinstance(arg, Tensor):
splitp.append(res*arg)
res = 1
else:
res *= arg
return splitp
def _expand(self, **hints):
# TODO: temporary solution, in the future this should be linked to
# `Expr.expand`.
args = [_expand(arg, **hints) for arg in self.args]
args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args]
return TensAdd(*[
TensMul(*i) for i in itertools.product(*args1)]
)
def __neg__(self):
return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit()
def __getitem__(self, item):
deprecate_data()
return self.data[item]
def _get_args_for_traditional_printer(self):
args = list(self.args)
if (self.coeff < 0) == True:
# expressions like "-A(a)"
sign = "-"
if self.coeff == S.NegativeOne:
args = args[1:]
else:
args[0] = -args[0]
else:
sign = ""
return sign, args
def _sort_args_for_sorted_components(self):
"""
Returns the ``args`` sorted according to the components commutation
properties.
The sorting is done taking into account the commutation group
of the component tensors.
"""
cv = [arg for arg in self.args if isinstance(arg, TensExpr)]
sign = 1
n = len(cv) - 1
for i in range(n):
for j in range(n, i, -1):
c = cv[j-1].commutes_with(cv[j])
# if `c` is `None`, it does neither commute nor anticommute, skip:
if c not in [0, 1]:
continue
typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name)
typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name)
if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name):
cv[j-1], cv[j] = cv[j], cv[j-1]
# if `c` is 1, the anticommute, so change sign:
if c:
sign = -sign
coeff = sign * self.coeff
if coeff != 1:
return [coeff] + cv
return cv
def sorted_components(self):
"""
Returns a tensor product with sorted components.
"""
return TensMul(*self._sort_args_for_sorted_components()).doit()
def perm2tensor(self, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
return perm2tensor(self, g, is_canon_bp=is_canon_bp)
def canon_bp(self):
"""
Canonicalize using the Butler-Portugal algorithm for canonicalization
under monoterm symmetries.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
>>> t = A(m0,-m1)*A(m1,-m0)
>>> t.canon_bp()
-A(L_0, L_1)*A(-L_0, -L_1)
>>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0)
>>> t.canon_bp()
0
"""
if self._is_canon_bp:
return self
expr = self.expand()
if isinstance(expr, TensAdd):
return expr.canon_bp()
if not expr.components:
return expr
t = expr.sorted_components()
g, dummies, msym = t._index_structure.indices_canon_args()
v = components_canon_args(t.components)
can = canonicalize(g, dummies, msym, *v)
if can == 0:
return S.Zero
tmul = t.perm2tensor(can, True)
return tmul
def contract_delta(self, delta):
t = self.contract_metric(delta)
return t
def _get_indices_to_args_pos(self):
"""
Get a dict mapping the index position to TensMul's argument number.
"""
pos_map = dict()
pos_counter = 0
for arg_i, arg in enumerate(self.args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
for i in range(arg.ext_rank):
pos_map[pos_counter] = arg_i
pos_counter += 1
return pos_map
def contract_metric(self, g):
"""
Raise or lower indices with the metric ``g``
Parameters
==========
g : metric
Notes
=====
see the ``TensorIndexType`` docstring for the contraction conventions
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz)
>>> g = Lorentz.metric
>>> p, q = tensor_heads('p,q', [Lorentz])
>>> t = p(m0)*q(m1)*g(-m0, -m1)
>>> t.canon_bp()
metric(L_0, L_1)*p(-L_0)*q(-L_1)
>>> t.contract_metric(g).canon_bp()
p(L_0)*q(-L_0)
"""
expr = self.expand()
if self != expr:
expr = expr.canon_bp()
return expr.contract_metric(g)
pos_map = self._get_indices_to_args_pos()
args = list(self.args)
#antisym = g.index_types[0].metric_antisym
if g.symmetry == TensorSymmetry.fully_symmetric(-2):
antisym = 1
elif g.symmetry == TensorSymmetry.fully_symmetric(2):
antisym = 0
elif g.symmetry == TensorSymmetry.no_symmetry(2):
antisym = None
else:
raise NotImplementedError
# list of positions of the metric ``g`` inside ``args``
gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g]
if not gpos:
return self
# Sign is either 1 or -1, to correct the sign after metric contraction
# (for spinor indices).
sign = 1
dum = self.dum[:]
free = self.free[:]
elim = set()
for gposx in gpos:
if gposx in elim:
continue
free1 = [x for x in free if pos_map[x[1]] == gposx]
dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx]
if not dum1:
continue
elim.add(gposx)
# subs with the multiplication neutral element, that is, remove it:
args[gposx] = 1
if len(dum1) == 2:
if not antisym:
dum10, dum11 = dum1
if pos_map[dum10[1]] == gposx:
# the index with pos p0 contravariant
p0 = dum10[0]
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
else:
dum10, dum11 = dum1
# change the sign to bring the indices of the metric to contravariant
# form; change the sign if dum10 has the metric index in position 0
if pos_map[dum10[1]] == gposx:
# the index with pos p0 is contravariant
p0 = dum10[0]
if dum10[1] == 1:
sign = -sign
else:
# the index with pos p0 is covariant
p0 = dum10[1]
if dum10[0] == 0:
sign = -sign
if pos_map[dum11[1]] == gposx:
# the index with pos p1 is contravariant
p1 = dum11[0]
sign = -sign
else:
# the index with pos p1 is covariant
p1 = dum11[1]
dum.append((p0, p1))
elif len(dum1) == 1:
if not antisym:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
else:
dp0, dp1 = dum1[0]
if pos_map[dp0] == pos_map[dp1]:
# g(i, -i)
typ = g.index_types[0]
sign = sign*typ.dim
if dp0 < dp1:
# g(i, -i) = -D with antisymmetric metric
sign = -sign
else:
# g(i0, i1)*p(-i1)
if pos_map[dp0] == gposx:
p1 = dp1
if dp0 == 0:
sign = -sign
else:
p1 = dp0
ind, p = free1[0]
free.append((ind, p1))
dum = [x for x in dum if x not in dum1]
free = [x for x in free if x not in free1]
# shift positions:
shift = 0
shifts = [0]*len(args)
for i in range(len(args)):
if i in elim:
shift += 2
continue
shifts[i] = shift
free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim]
dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim]
res = sign*TensMul(*args).doit()
if not isinstance(res, TensExpr):
return res
im = _IndexStructure.from_components_free_dum(res.components, free, dum)
return res._set_new_index_structure(im)
def _set_new_index_structure(self, im, is_canon_bp=False):
indices = im.get_indices()
return self._set_indices(*indices, is_canon_bp=is_canon_bp)
def _set_indices(self, *indices, **kw_args):
if len(indices) != self.ext_rank:
raise ValueError("indices length mismatch")
args = list(self.args)[:]
pos = 0
is_canon_bp = kw_args.pop('is_canon_bp', False)
for i, arg in enumerate(args):
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
ext_rank = arg.ext_rank
args[i] = arg._set_indices(*indices[pos:pos+ext_rank])
pos += ext_rank
return TensMul(*args, is_canon_bp=is_canon_bp).doit()
@staticmethod
def _index_replacement_for_contract_metric(args, free, dum):
for arg in args:
if not isinstance(arg, TensExpr):
continue
assert isinstance(arg, Tensor)
def substitute_indices(self, *index_tuples):
new_args = []
for arg in self.args:
if isinstance(arg, TensExpr):
arg = arg.substitute_indices(*index_tuples)
new_args.append(arg)
return TensMul(*new_args).doit()
def __call__(self, *indices):
deprecate_fun_eval()
free_args = self.free_args
indices = list(indices)
if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]:
raise ValueError('incompatible types')
if indices == free_args:
return self
t = self.substitute_indices(*list(zip(free_args, indices)))
# object is rebuilt in order to make sure that all contracted indices
# get recognized as dummies, but only if there are contracted indices.
if len(set(i if i.is_up else -i for i in indices)) != len(indices):
return t.func(*t.args)
return t
def _extract_data(self, replacement_dict):
args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)])
coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One)
indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices)
dum = TensMul._dummy_data_to_dum(dummy_data)
ext_rank = self.ext_rank
free.sort(key=lambda x: x[1])
free_indices = [i[0] for i in free]
return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank)
@property
def data(self):
deprecate_data()
dat = _tensor_data_substitution_dict[self.expand()]
return dat
@data.setter
def data(self, data):
deprecate_data()
raise ValueError("Not possible to set component data to a tensor expression")
@data.deleter
def data(self):
deprecate_data()
raise ValueError("Not possible to delete component data to a tensor expression")
def __iter__(self):
deprecate_data()
if self.data is None:
raise ValueError("No iteration on abstract tensors")
return self.data.__iter__()
def _eval_rewrite_as_Indexed(self, *args):
from sympy import Sum
index_symbols = [i.args[0] for i in self.get_indices()]
args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args]
expr = Mul.fromiter(args)
return self._check_add_Sum(expr, index_symbols)
class TensorElement(TensExpr):
"""
Tensor with evaluated components.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry
>>> from sympy import symbols
>>> L = TensorIndexType("L")
>>> i, j, k = symbols("i j k")
>>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2))
>>> A(i, j).get_free_indices()
[i, j]
If we want to set component ``i`` to a specific value, use the
``TensorElement`` class:
>>> from sympy.tensor.tensor import TensorElement
>>> te = TensorElement(A(i, j), {i: 2})
As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd
element), the free indices will only contain ``j``:
>>> te.get_free_indices()
[j]
"""
def __new__(cls, expr, index_map):
if not isinstance(expr, Tensor):
# remap
if not isinstance(expr, TensExpr):
raise TypeError("%s is not a tensor expression" % expr)
return expr.func(*[TensorElement(arg, index_map) for arg in expr.args])
expr_free_indices = expr.get_free_indices()
name_translation = {i.args[0]: i for i in expr_free_indices}
index_map = {name_translation.get(index, index): value for index, value in index_map.items()}
index_map = {index: value for index, value in index_map.items() if index in expr_free_indices}
if len(index_map) == 0:
return expr
free_indices = [i for i in expr_free_indices if i not in index_map.keys()]
index_map = Dict(index_map)
obj = TensExpr.__new__(cls, expr, index_map)
obj._free_indices = free_indices
return obj
@property
def free(self):
return [(index, i) for i, index in enumerate(self.get_free_indices())]
@property
def dum(self):
# TODO: inherit dummies from expr
return []
@property
def expr(self):
return self._args[0]
@property
def index_map(self):
return self._args[1]
def get_free_indices(self):
return self._free_indices
def get_indices(self):
return self.get_free_indices()
def _extract_data(self, replacement_dict):
ret_indices, array = self.expr._extract_data(replacement_dict)
index_map = self.index_map
slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices)
ret_indices = [i for i in ret_indices if i not in index_map]
array = array.__getitem__(slice_tuple)
return ret_indices, array
def canon_bp(p):
"""
Butler-Portugal canonicalization. See ``tensor_can.py`` from the
combinatorics module for the details.
"""
if isinstance(p, TensExpr):
return p.canon_bp()
return p
def tensor_mul(*a):
"""
product of tensors
"""
if not a:
return TensMul.from_data(S.One, [], [], [])
t = a[0]
for tx in a[1:]:
t = t*tx
return t
def riemann_cyclic_replace(t_r):
"""
replace Riemann tensor with an equivalent expression
``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)``
"""
free = sorted(t_r.free, key=lambda x: x[1])
m, n, p, q = [x[0] for x in free]
t0 = t_r*Rational(2, 3)
t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3)
t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3)
t3 = t0 + t1 + t2
return t3
def riemann_cyclic(t2):
"""
replace each Riemann tensor with an equivalent expression
satisfying the cyclic identity.
This trick is discussed in the reference guide to Cadabra.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_name='L')
>>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz)
>>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
>>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
>>> riemann_cyclic(t)
0
"""
t2 = t2.expand()
if isinstance(t2, (TensMul, Tensor)):
args = [t2]
else:
args = t2.args
a1 = [x.split() for x in args]
a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1]
a3 = [tensor_mul(*v) for v in a2]
t3 = TensAdd(*a3).doit()
if not t3:
return t3
else:
return canon_bp(t3)
def get_lines(ex, index_type):
"""
returns ``(lines, traces, rest)`` for an index type,
where ``lines`` is the list of list of positions of a matrix line,
``traces`` is the list of list of traced matrix lines,
``rest`` is the rest of the elements ot the tensor.
"""
def _join_lines(a):
i = 0
while i < len(a):
x = a[i]
xend = x[-1]
xstart = x[0]
hit = True
while hit:
hit = False
for j in range(i + 1, len(a)):
if j >= len(a):
break
if a[j][0] == xend:
hit = True
x.extend(a[j][1:])
xend = x[-1]
a.pop(j)
continue
if a[j][0] == xstart:
hit = True
a[i] = reversed(a[j][1:]) + x
x = a[i]
xstart = a[i][0]
a.pop(j)
continue
if a[j][-1] == xend:
hit = True
x.extend(reversed(a[j][:-1]))
xend = x[-1]
a.pop(j)
continue
if a[j][-1] == xstart:
hit = True
a[i] = a[j][:-1] + x
x = a[i]
xstart = x[0]
a.pop(j)
continue
i += 1
return a
arguments = ex.args
dt = {}
for c in ex.args:
if not isinstance(c, TensExpr):
continue
if c in dt:
continue
index_types = c.index_types
a = []
for i in range(len(index_types)):
if index_types[i] is index_type:
a.append(i)
if len(a) > 2:
raise ValueError('at most two indices of type %s allowed' % index_type)
if len(a) == 2:
dt[c] = a
#dum = ex.dum
lines = []
traces = []
traces1 = []
#indices_to_args_pos = ex._get_indices_to_args_pos()
# TODO: add a dum_to_components_map ?
for p0, p1, c0, c1 in ex.dum_in_args:
if arguments[c0] not in dt:
continue
if c0 == c1:
traces.append([c0])
continue
ta0 = dt[arguments[c0]]
ta1 = dt[arguments[c1]]
if p0 not in ta0:
continue
if ta0.index(p0) == ta1.index(p1):
# case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1;
# to deal with this case one could add to the position
# a flag for transposition;
# one could write [(c0, False), (c1, True)]
raise NotImplementedError
# if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1
# if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0
ta0 = dt[arguments[c0]]
b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0)
lines1 = lines[:]
for line in lines:
if line[-1] == b0:
if line[0] == b1:
n = line.index(min(line))
traces1.append(line)
traces.append(line[n:] + line[:n])
else:
line.append(b1)
break
elif line[0] == b1:
line.insert(0, b0)
break
else:
lines1.append([b0, b1])
lines = [x for x in lines1 if x not in traces1]
lines = _join_lines(lines)
rest = []
for line in lines:
for y in line:
rest.append(y)
for line in traces:
for y in line:
rest.append(y)
rest = [x for x in range(len(arguments)) if x not in rest]
return lines, traces, rest
def get_free_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_free_indices()
def get_indices(t):
if not isinstance(t, TensExpr):
return ()
return t.get_indices()
def get_index_structure(t):
if isinstance(t, TensExpr):
return t._index_structure
return _IndexStructure([], [], [], [])
def get_coeff(t):
if isinstance(t, Tensor):
return S.One
if isinstance(t, TensMul):
return t.coeff
if isinstance(t, TensExpr):
raise ValueError("no coefficient associated to this tensor expression")
return t
def contract_metric(t, g):
if isinstance(t, TensExpr):
return t.contract_metric(g)
return t
def perm2tensor(t, g, is_canon_bp=False):
"""
Returns the tensor corresponding to the permutation ``g``
For further details, see the method in ``TIDS`` with the same name.
"""
if not isinstance(t, TensExpr):
return t
elif isinstance(t, (Tensor, TensMul)):
nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp)
res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp)
if g[-1] != len(g) - 1:
return -res
return res
raise NotImplementedError()
def substitute_indices(t, *index_tuples):
if not isinstance(t, TensExpr):
return t
return t.substitute_indices(*index_tuples)
def _expand(expr, **kwargs):
if isinstance(expr, TensExpr):
return expr._expand(**kwargs)
else:
return expr.expand(**kwargs)
| 34.031742
| 157
| 0.580254
|
3bd9891efbedd6a59cd40abe7a6667ddca9bdd20
| 1,181
|
py
|
Python
|
nodes/input_logx3dpro.py
|
WisconsinRobotics/wreadinput
|
8d63d130dd2e6a8e440b9ca1cee571c01d4091ea
|
[
"MIT"
] | null | null | null |
nodes/input_logx3dpro.py
|
WisconsinRobotics/wreadinput
|
8d63d130dd2e6a8e440b9ca1cee571c01d4091ea
|
[
"MIT"
] | 1
|
2022-03-16T06:49:21.000Z
|
2022-03-16T06:49:21.000Z
|
nodes/input_logx3dpro.py
|
WisconsinRobotics/wreadinput
|
8d63d130dd2e6a8e440b9ca1cee571c01d4091ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from wreadinput import DeviceAxis, DeviceShape, DeviceKey, default_node
# TODO check to make sure these are what actually get mapped by evdev
SHAPE_LOGX3DPRO = DeviceShape()\
.with_axis(DeviceAxis.ABS_X, 'stick_x', deadband = 0.1)\
.with_axis(DeviceAxis.ABS_Y, 'stick_y', 1, -1, deadband = 0.1)\
.with_axis(DeviceAxis.ABS_RZ, 'stick_twist')\
.with_axis(DeviceAxis.ABS_THROTTLE, 'throttle', 0, 1)\
.with_axis(DeviceAxis.ABS_HAT0X, 'pov_x')\
.with_axis(DeviceAxis.ABS_HAT0Y, 'pov_y')\
.with_key(DeviceKey.BTN_TRIGGER, 'trigger')\
.with_key(DeviceKey.BTN_THUMB, 'thumb')\
.with_key(DeviceKey.BTN_THUMB2, '3')\
.with_key(DeviceKey.BTN_TOP, '4')\
.with_key(DeviceKey.BTN_TOP2, '5')\
.with_key(DeviceKey.BTN_PINKIE, '6')\
.with_key(DeviceKey.BTN_BASE, '7')\
.with_key(DeviceKey.BTN_BASE2, '8')\
.with_key(DeviceKey.BTN_BASE3, '9')\
.with_key(DeviceKey.BTN_BASE4, '10')\
.with_key(DeviceKey.BTN_BASE5, '11')\
.with_key(DeviceKey.BTN_BASE6, '12')
# see `wreadinput.default_node` for implementation details
if __name__ == '__main__':
default_node.main('input_logx3dpro', SHAPE_LOGX3DPRO)
| 40.724138
| 71
| 0.707875
|
b9fede06c12e550f23f845691e9b72397575c184
| 597
|
py
|
Python
|
data/AVA/extract_flow.py
|
pedro-abreu/twostream-attention
|
60a47c50b8f2427911e5e30fd6c6f933dbf08a4e
|
[
"MIT"
] | 10
|
2018-10-26T05:19:15.000Z
|
2019-02-02T03:39:29.000Z
|
data/AVA/extract_flow.py
|
pedro-abreu/twostream-attention
|
60a47c50b8f2427911e5e30fd6c6f933dbf08a4e
|
[
"MIT"
] | 2
|
2018-11-14T05:03:56.000Z
|
2019-02-19T09:06:52.000Z
|
data/AVA/extract_flow.py
|
pedro-abreu/twostream-attention
|
60a47c50b8f2427911e5e30fd6c6f933dbf08a4e
|
[
"MIT"
] | 3
|
2018-11-14T05:12:24.000Z
|
2019-03-02T15:01:15.000Z
|
import os
set_type = 'test'
OUT_DIR = '/media/pedro/actv-ssd/flow_' + set_type + "_warp/"
DATA_DIR = "/media/pedro/actv-ssd/segments_" + set_type + "/"
GPU_FLOW_DIR = '../../arch/tvl1_flow/build/'
def _process_dataset():
os.system(GPU_FLOW_DIR + "./compute_flow_si_warp --gpuID=0 --type=1 --vid_path=" +
DATA_DIR + " --out_path=" + OUT_DIR + " --skip=" + str(2))
def main():
# Create directories for the classes
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR + "/")
# Process dataset
_process_dataset()
if __name__ == '__main__':
main()
| 22.961538
| 86
| 0.624791
|
83802fedda5bae7f834072b922480bab18cf1730
| 6,992
|
py
|
Python
|
day22/day22.py
|
mpirnat/adventofcode
|
f63ce466b91d5be2687be95fd4ab0b442f1ea5c8
|
[
"MIT"
] | 12
|
2015-12-02T05:56:36.000Z
|
2017-11-29T18:58:47.000Z
|
day22/day22.py
|
mpirnat/adventofcode
|
f63ce466b91d5be2687be95fd4ab0b442f1ea5c8
|
[
"MIT"
] | 1
|
2015-12-06T03:36:18.000Z
|
2015-12-06T20:52:29.000Z
|
day22/day22.py
|
mpirnat/adventofcode
|
f63ce466b91d5be2687be95fd4ab0b442f1ea5c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Solve day 22 of Advent of Code.
http://adventofcode.com/day/22
Right now this implementation is somewhat flaky and doesn't reliably
find the right winning scenarios but it did produce the right output
a couple of times. It should probably be rewritten if I can muster
up the energy to care.
"""
import copy
import random
class Character:
def __init__(self, name='', hit_points=1,
natural_damage=0, natural_defense=0,
weapon=None, armor=None, rings=None):
self.name = name
self.hit_points = hit_points
self.natural_damage = natural_damage
self.natural_defense = natural_defense
self.weapon = weapon
self.armor = armor
self.rings = rings or []
@property
def damage(self):
return (self.natural_damage +
(self.weapon.damage if self.weapon else 0) +
(self.armor.damage if self.armor else 0) +
sum([x.damage for x in self.rings]))
@property
def defense(self):
return (self.natural_defense +
(self.weapon.defense if self.weapon else 0) +
(self.armor.defense if self.armor else 0) +
sum([x.defense for x in self.rings]))
@property
def inventory_cost(self):
return ((self.weapon.cost if self.weapon else 0) +
(self.armor.cost if self.armor else 0) +
sum([x.cost for x in self.rings]))
@property
def rings(self):
return self._rings
@rings.setter
def rings(self, rings):
self._rings = [x for x in rings if x]
class MagicUser(Character):
def __init__(self, name='', hit_points=1, mana=0,
natural_damage=0, natural_defense=0,
weapon=None, armor=None, rings=None):
super(MagicUser, self).__init__(name=name, hit_points=hit_points)
self.mana = mana
self.mana_spent = 0
self.temporary_defense = 0
@property
def defense(self):
return (self.natural_defense + self.temporary_defense)
class Item:
def __init__(self, name, cost, damage, defense):
self.name = name
self.cost = cost
self.damage = damage
self.defense = defense
class Spell:
def __init__(self, name, cost, effect):
self.name = name
self.cost = cost
self.effect = effect
class Effect:
def __init__(self, damage=0, heal=0, restore=0, defense=0, duration=1):
self.damage = damage
self.heal = heal
self.restore = restore
self.defense = defense
self.duration = duration
self.duration_remaining = duration
SPELLS = [
Spell('Magic Missile', 53, Effect(damage=4)),
Spell('Drain', 73, Effect(damage=2, heal=2)),
Spell('Shield', 113, Effect(defense=7, duration=6)),
Spell('Poison', 173, Effect(damage=3, duration=6)),
Spell('Recharge', 229, Effect(restore=101, duration=5))]
class GameOver(Exception):
pass
class Combat:
def __init__(self, character1, character2):
self.character1 = character1
self.character2 = character2
self.effects = []
def do_turn(self):
# do player turn
try:
self.process_effects(self.character1, self.character2)
self.cast_spell(self.character1)
except GameOver:
return self.character2
# do enemy turn
self.process_effects(self.character1, self.character2)
self.attack(self.character2, self.character1)
#print("Player HP:", self.character1.hit_points,
# "Mana:", self.character1.mana)
#print("Boss HP:", self.character2.hit_points)
return (
self.winner(self.character1, self.character2) or
self.winner(self.character2, self.character1))
def cast_spell(self, caster):
#print("Player mana:", caster.mana)
# Rule out spells already in effect & spells that cost too much
available_spells = [spell for spell in SPELLS
if spell.effect not in self.effects and
caster.mana >= spell.cost]
#print("Available spells:", [x.name for x in available_spells])
# If no spells available, game over!
if not available_spells:
#print("Could not cast anything!")
raise GameOver()
# Choose a spell at random
spell = random.choice(available_spells)
#print("Player cast:", spell.name)
# Decrement caster's mana
caster.mana -= spell.cost
# Track caster's total expenditure
caster.mana_spent += spell.cost
# Add the effect to effects
spell.effect.duration_remaining = spell.effect.duration
self.effects.append(spell.effect)
def process_effects(self, caster, defender):
#print("Active effects:")
for effect in self.effects:
if effect.duration == effect.duration_remaining:
caster.temporary_defense += effect.defense
defender.hit_points -= effect.damage
caster.hit_points += effect.heal
caster.mana += effect.restore
effect.duration_remaining -= 1
if not effect.duration_remaining:
caster.temporary_defense -= effect.defense
self.effects = [x for x in self.effects if x.duration_remaining]
if caster.hit_points <= 0:
raise GameOver()
def attack(self, attacker, defender):
if attacker.hit_points > 0:
defender.hit_points -= self.calculate_damage(attacker, defender)
def calculate_damage(self, attacker, defender):
return max([1, attacker.damage - defender.defense])
def winner(self, candidate, adversary):
return (candidate.hit_points > 0 and adversary.hit_points <= 0 and
candidate) or None
def do_full_combat(self):
winner = None
while not winner:
winner = self.do_turn()
return winner
def find_least_mana_to_win(hard=False):
least_mana = float('inf')
for player, enemy in generate_scenarios(1000000):
combat = Combat(player, enemy)
winner = combat.do_full_combat()
combat.effects = []
if hard:
combat.effects = [Effect(heal=-1, duration=float('inf'))]
if winner == player and player.mana_spent < least_mana:
least_mana = player.mana_spent
return least_mana
def generate_scenarios(iterations):
for i in range(iterations):
#print("*****")
player = MagicUser(name='player', hit_points=50, mana=500)
enemy = Character(name='boss', hit_points=71,
natural_damage=10, natural_defense=0)
yield player, enemy
if __name__ == '__main__':
least_mana = find_least_mana_to_win()
print("Part 1:", least_mana)
least_mana = find_least_mana_to_win(hard=True)
print("Part 2:", least_mana)
| 29.133333
| 76
| 0.616848
|
ab563e0af5bc6a0c402b6bcb67814b1e5305b814
| 6,909
|
py
|
Python
|
FWD/server.py
|
Nasfame/Assignments-Masai
|
0dc95c3fb58849637a7aad4914b92970c9196eca
|
[
"MIT"
] | 1
|
2020-05-29T09:00:44.000Z
|
2020-05-29T09:00:44.000Z
|
FWD/server.py
|
Nasfame/Assignments-Masai
|
0dc95c3fb58849637a7aad4914b92970c9196eca
|
[
"MIT"
] | null | null | null |
FWD/server.py
|
Nasfame/Assignments-Masai
|
0dc95c3fb58849637a7aad4914b92970c9196eca
|
[
"MIT"
] | null | null | null |
import csv
import json
from flask import Flask, request
flag = False
app = Flask(__name__)
@app.route('/')
def hello_world() :
return json.dumps('Hello Guys!')#jwt.encode("jsjd","sdjs")
@app.route('/users/details')
def listing() :
with open('data/users.csv', 'r') as f1 :
f1 = csv.DictReader(f1)
li = list(f1)
return json.dumps(li)
@app.route('/users/register', methods=['POST'])
def create() :
if flag == False :
return json.dumps("Authentication error")
with open('data/users.csv', 'a') as f1 :
f1 = csv.DictWriter(f1,fieldnames=['id','name','contact_number','address','password'])
cnt = json.loads(listing())
values = request.json
print(values)
values['id']=len(cnt)+1
f1.writerow(values)
return json.dumps("Success")
@app.route('/users/login',methods=['POST'])
def login():
login_data = list(request.json.values())
db = json.loads(listing())
values=[]
for i in db:
values.append([i['name'],i['password']])
if login_data in values:
global flag
flag=True
return json.dumps("Login Successful")
else:
return json.dumps("Login Failed")
@app.route('/users/modify/<id>', methods=['PATCH'])
def edit(id) :
if flag == False :
return json.dumps("Authentication error")
id = int(id)
cnt = json.loads(listing())
if id>len(cnt):
return json.dumps("User not in the DB")
cnt[id - 1]['password'] = request.json['password']
with open('data/users.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id', 'name', 'contact_number', 'address', 'password'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("Modified password successfully")
@app.route('/users/delete/<int:id>', methods=['DELETE'])
def delete(id) :
if flag==True:
cnt = json.loads(listing())
cnt.pop(id - 1)
for i in range(len(cnt)) :
cnt[i]['id'] = str(i + 1)
with open('data/users.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id', 'name', 'email', 'mobile', 'age'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("Deleted")
else:
return json.dumps("Authentication error")
################Start of the bus
@app.route('/buses',methods=['POST'])
def listingb() :
if flag==False:
return json.dumps("Authentication error")
with open('data/buses.csv', 'r') as f1 :
f1 = csv.DictReader(f1)
li = list(f1)
return json.dumps(li)
@app.route('/buses/register', methods=['POST'])
def createb() :
if flag == False :
return json.dumps("Authentication error")
with open('data/buses.csv', 'a') as f1 :
f1 = csv.DictWriter(f1,fieldnames=['id', 'bus_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
cnt = json.loads(listingb())
values = request.json
print(values)
values['id']=len(cnt)+1
f1.writerow(values)
return json.dumps("Success")
@app.route('/buses/search',methods=['POST'])
def show() :
if flag == False :
return json.dumps("Authentication error")
number = request.json['bus_number']
cnt = json.loads(listingb())
for i in cnt:
if i['bus_number']==number:
flags = "Found in the DB"
break
else:
flags = "Not in the DB"
return json.dumps(flags)
@app.route('/buses/modify/<id>', methods=['PATCH'])
def editbbb(id) :
if flag == False :
return json.dumps("Authentication error")
id = int(id)
cnt = json.loads(listingb())
if id>len(cnt):
return "Bus not in the DB"
cnt[id - 1] = request.json
cnt[id-1]['id']=str(id)
with open('data/buses.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id', 'bus_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("Modified successfully")
@app.route('/buses/delete/<int:id>', methods=['DELETE'])
def delss(id):
if flag == False :
return json.dumps("Authentication error")
cnt = json.loads(listingb())
cnt.pop(id - 1)
for i in range(len(cnt)) :
cnt[i]['id'] = str(i + 1)
with open('data/buses.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id', 'bus_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("deletebd")
################Start of the train
@app.route('/trains',methods=['POST'])
def listingt() :
if flag == False :
return json.dumps("Authentication error")
with open('data/trains.csv', 'r') as f1 :
f1 = csv.DictReader(f1)
li = list(f1)
return json.dumps(li)
@app.route('/trains/register', methods=['POST'])
def createt() :
if flag == False :
return json.dumps("Authentication error")
with open('data/trains.csv', 'a') as f1 :
f1 = csv.DictWriter(f1,fieldnames=['id', 'train_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
cnt = json.loads(listingt())
values = request.json
print(values)
values['id']=len(cnt)+1
f1.writerow(values)
return json.dumps("Success")
@app.route('/trains/search',methods=['POST'])
def searcht() :
if flag == False :
return json.dumps("Authentication error")
number = request.json['train_number']
cnt = json.loads(listingt())
for i in cnt:
if i['train_number']==number:
flags = "Found in the DB"
break
else:
flags = "Not in the DB"
return json.dumps(flag)
@app.route('/trains/modify/<id>', methods=['PATCH'])
def editt(id) :
if flag == False :
return json.dumps("Authentication error")
id = int(id)
cnt = json.loads(listingt())
if id>len(cnt):
return "train not in the DB"
cnt[id - 1] = request.json
cnt[id-1]['id']=str(id)
with open('data/trains.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id', 'train_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("Modified successfully")
@app.route('/trains/delete/<int:id>', methods=['DELETE'])
def deletet(id) :
if flag == False :
return json.dumps("Authentication error")
cnt = json.loads(listingt())
cnt.pop(id - 1)
for i in range(len(cnt)) :
cnt[i]['id'] = str(i + 1)
with open('data/trains.csv', 'w') as f1 :
f1 = csv.DictWriter(f1, fieldnames=['id','train_number', 'departure_loc', 'arrival_loc', 'journey_duration','fare'])
f1.writeheader()
f1.writerows(cnt)
return json.dumps("deletebd")
if __name__ == '__main__' :
app.run(debug=True)
| 29.275424
| 125
| 0.586916
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.